diff --git a/.gitattributes b/.gitattributes index 389e0c4d1f1c9c814681704fddd6e869085137b1..e727bb460688cf96d8923b228a79264add57746d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1349,3 +1349,11 @@ data/2025/2504_04xxx/2504.04471/030e3422-55fa-4f44-818a-0358dd425a44_origin.pdf data/2025/2504_04xxx/2504.04517/8164d78d-023a-42dd-b6ef-ce944834edfa_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_04xxx/2504.04519/147c4ff8-2cf6-4e0e-b2f8-ed59851df0d2_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_04xxx/2504.04633/22c2bbd5-4c84-49d9-943a-b4159ee93d1f_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_content_list.json b/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..041af9a366bc94cdad18e4793a01a11e1d5be21a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_content_list.json @@ -0,0 +1,3070 @@ +[ + { + "type": "text", + "text": "CO-Bench: Benchmarking Language Model Agents in Algorithm Search for Combinatorial Optimization", + "text_level": 1, + "bbox": [ + 225, + 119, + 774, + 164 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weiwei Sun* Shengyu Feng* Shanda Li Yiming Yang", + "bbox": [ + 254, + 176, + 750, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Carnegie Mellon University", + "bbox": [ + 405, + 200, + 591, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{weiweis, shengyuf, shandal, yiming}@cs.cmu.edu", + "bbox": [ + 281, + 215, + 712, + 229 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 273, + 313, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Although LLM-based agents have attracted significant attention in domains such as software engineering and machine learning research, their role in advancing combinatorial optimization (CO) remains relatively underexplored. This gap underscores the need for a deeper understanding of their potential in tackling structured, constraint-intensive problems—a pursuit currently limited by the absence of comprehensive benchmarks for systematic investigation. To address this, we introduce CO-Bench, a benchmark suite featuring 36 real-world CO problems drawn from a broad range of domains and complexity levels. CO-Bench includes structured problem formulations and curated data to support rigorous investigation of LLM agents. We evaluate multiple agentic frameworks against established human-designed algorithms, revealing the strengths and limitations of existing LLM agents and identifying promising directions for future research. CO-Bench is publicly available at https://github.com/sunnweiwei/CO-Bench.", + "bbox": [ + 98, + 294, + 464, + 510 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Introduction", + "text_level": 1, + "bbox": [ + 225, + 527, + 336, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Combinatorial Optimization (CO) is a foundational problem class in computer science and operation research, focused on finding optimal solutions in discrete, structured, and constraint-rich domains. It underpins a wide range of real-world applications, including logistics (Vogiatzis and Pardalos 2013), production planning (Crama 1997), bioinformatics (Gusfield 1997), etc. Many CO problems are computationally intractable and classified as NP-hard, making exact solutions impractical at scale. As a result, developing effective algorithms often demands significant domain expertise and manual effort—posing a long-standing challenge in both academic research and industrial applications.", + "bbox": [ + 81, + 547, + 482, + 714 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in Large Language Models (LLMs) (OpenAI 2024b; DeepSeek-AI 2025a) have positioned LLM-based agents as increasingly promising tools for a variety of prediction and decision-making tasks (Jimenez et al. 2023; Chan et al. 2024; Gottweis et al. 2025). In particular, there is growing interest in applying LLMs to CO problems. Initial investigations have largely focused on solution correctness, evaluated on small-scale test instances (Ramamonjison et al. 2023; Yang et al. 2025a; Xiao et al. 2024a), and are often geared towards solving problems posed by general users. More recent works have begun to explore autonomous LLMs as a new approach.", + "bbox": [ + 81, + 713, + 482, + 867 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/86195873cb96386557e499b2d62386ffe100f9c92ca0b6fd4133962c06d1ff1b.jpg", + "image_caption": [ + "Figure 1: Overview of CO-Bench. CO-Bench includes 36 problems from 8 categories, and aims to evaluate LLM agents' ability to develop effective and efficient algorithms for solving real-world combinatorial optimization problems." + ], + "image_footnote": [], + "bbox": [ + 519, + 270, + 910, + 460 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "agents capable of conducting research and designing more efficient algorithms for complex scientific and industrial challenges. For example, FunSearch (Romera-Paredes et al. 2023) combines LLM prompting with evolutionary search to discover heuristics that outperform human-designed counterparts in the Cap Set and Bin Packing problems. Subsequent methods (Liu et al. 2024; Ye et al. 2024; Novikov et al. 2025) further improve computational efficiency and broaden applicability to domains such as routing and scheduling.", + "bbox": [ + 514, + 555, + 915, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite these advancements, most existing efforts focus on narrow components (e.g., priority functions) within established algorithms, across a limited set of tasks (typically 4-7 problems), and often rely on heavily handcrafted, problem-specific prompts and templates (Romera-Paredes et al. 2023; Ye et al. 2024). Furthermore, there remains a lack of systematic evaluation of how these agents perform across a broader and more diverse collection of real-world CO problems.", + "bbox": [ + 514, + 681, + 915, + 791 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address this gap, we introduce CO-Bench, a comprehensive benchmark designed to evaluate LLM agents in the context of efficient CO algorithm development. CO-Bench comprises real-world CO problems spanning a wide range of domains and complexities. Figure 1 illustrates the problem categories and examples, while Table 1 compares CO-Bench with existing CO benchmarks. Compared to prior bench", + "bbox": [ + 514, + 791, + 915, + 888 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.04310v3 [cs.CL] 22 Aug 2025", + "bbox": [ + 22, + 276, + 60, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*These authors contributed equally.", + "bbox": [ + 104, + 875, + 316, + 888 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "marks, CO-Bench offers broader problem coverage, and emphasizes end-to-end evaluation of LLM-based research agents, focusing on their ability to design efficient, potentially novel algorithms from abstract problem descriptions. This design enables reproducible and scalable evaluation of agent performance, including comparisons with human-designed classical CO solver under equivalent time constraints. By doing so, CO-Bench introduces new challenges for LLM agent development, such as the discovery of algorithms that extend beyond current human knowledge of CO.", + "bbox": [ + 81, + 68, + 480, + 205 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Using CO-Bench, we benchmark 15 LLMs and 9 agentic frameworks, comparing their performances against both human-designed classical algorithms and the best-known solutions reported in the literature. Our results show that reasoning models (e.g., o3-mini and Claude-3.7-sonnet) consistently outperform standard no-reasoning LLMs. When integrated into agentic frameworks like FunSearch, LLMs further improve through trial-and-error exploration. Notably, on 25 problems, LLM-generated algorithms outperformed classical solvers, and on 3 problems, they surpassed the best-known solutions. However, our analysis also reveals current limitations, such as limited algorithmic novelty and insufficient handling of feasibility constraints. These findings highlight both the promise and challenges of LLM-driven research in CO and suggest key directions for advancing autonomous algorithm design.", + "bbox": [ + 81, + 207, + 480, + 428 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, this paper makes the following contributions:", + "bbox": [ + 98, + 428, + 480, + 443 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(i) We introduce CO-Bench, the first comprehensive benchmark to evaluate the capability of LLMs to develop algorithms for diverse and challenging real-world CO problems", + "(ii) We benchmark 15 LLMs and 9 agentic frameworks, analyzing their performance relative to expert-designed pipelines. Our results highlight the strengths of agent-generated algorithms, while also revealing limitations in planning, feasibility checking, and the generation of efficient solution." + ], + "bbox": [ + 76, + 446, + 480, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Preliminary", + "text_level": 1, + "bbox": [ + 228, + 599, + 334, + 616 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Combinatorial Optimization", + "text_level": 1, + "bbox": [ + 83, + 619, + 305, + 633 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For each CO problem $c$ (for example, Traveling salesman problem), we follow Papadimitriou and Steiglitz (1982) to formulate it as a constrained optimization problem in the discrete space. Consider an instance $p$ , the optimization problem could be expressed as", + "bbox": [ + 81, + 636, + 480, + 705 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {x \\in S _ {c} (p)} f _ {c} (x; p) + g _ {c} (x; p), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 189, + 710, + 478, + 734 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $S_{c}(p)$ represents the solution space, e.g., $\\mathbf{Z}^{m} \\times \\mathbb{R}^{n}$ for $d$ discrete variables and $n$ continuous variables, $f_{c}(x;p)$ corresponds to the objective function, and $g_{c}(x;p)$ stands for the constraint violation, which is 0 for feasible solutions and $+\\infty$ otherwise. To avoid the clutter, we simply denote $h_c(x;p) = f_c(x;p) + g_c(x;p)$ in the following text and omit $c$ if the context is clear.", + "bbox": [ + 81, + 739, + 478, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given an algorithm set $\\mathcal{A}$ and a problem instance distribution $D$ , the algorithm search problem is defined as", + "bbox": [ + 81, + 837, + 480, + 864 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\min _ {A \\in \\mathcal {A}} \\mathbb {E} _ {p \\sim D, x \\sim A (p)} [ h (x; p) ]. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 869, + 478, + 892 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/4648f1b0a565d32fb21802117ec4365be6437a9799aec287ad8b5c78c80d0ed6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetAlgorithm DevProblem NumInstance NumLargest Variables
NPHardEvalX990024
NL4OPTX52893
OptiBenchX460518
ComplexORX201009
ReEvo75971,000
CO-Bench366,48211,000
", + "bbox": [ + 517, + 65, + 913, + 200 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1: Data statistics for CO-Bench and related CO benchmarks, including the indicator for algorithm development support, the number of problem types, the number of test-set problem instances, and the largest number of test-set variables (e.g., the number of nodes in the largest graph).", + "bbox": [ + 514, + 208, + 913, + 279 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In contrast to previous neural CO solvers (Bengio, Lodi, and Prouvost 2020) that directly parameterize $A$ with a neural network, we focus on symbolic searching space where $A$ consists of all algorithms that could be represented by a Python Program, with a maximum number of $d$ tokens, where $d$ is typically decided by the output length limit of an LLM. Considering the popularity of randomized algorithms (Motwani and Raghavan 2013) for CO, we treat the output of an algorithm $A(p)$ as a distribution of solutions, while deterministic algorithms would correspond to the point distributions.", + "bbox": [ + 514, + 306, + 913, + 446 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The main endeavor of this work is focused on the shaping of the algorithm set $\\mathcal{A}$ , the curation of the data distribution $D$ and the definition of $h$ on our collected CO problems.", + "bbox": [ + 516, + 446, + 911, + 488 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "LLM Agents", + "text_level": 1, + "bbox": [ + 517, + 500, + 619, + 516 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given a CO problem $c$ , a candidate algorithm could be generated by an LLM as", + "bbox": [ + 516, + 518, + 913, + 547 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nA \\sim M (\\text {t e x t i f y} (c); \\theta), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 638, + 556, + 911, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $M$ denotes an LLM with parameters $\\theta$ . However, one-time generation usually leads to infeasible code or suboptimal algorithms (Madaan et al. 2023), and agentic frameworks address this by enabling iterative refinement through structured interactions with external tools (e.g., a coding environment). Formally, an agent performs reasoning-action iterations (Yao et al. 2022):", + "bbox": [ + 514, + 579, + 913, + 676 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nr _ {t + 1} \\sim M \\left(\\operatorname {t e x t i f y} _ {r} \\left(c, A _ {t}, H _ {t}\\right); \\theta\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 685, + 913, + 700 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\na _ {t + 1} \\sim M \\left(\\text {t e x t i f y} _ {a} \\left(r _ {t + 1}, H _ {t}\\right); \\theta\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 703, + 913, + 719 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $r_t$ is the reasoning step, $a_t$ is the action step (e.g., executing code, evaluating results), and $H_t = (r_i, a_i, \\text{result}(a_i))_{i=1}^{t-1}$ maintains the interaction history. Thus, an LLM agent is formally defined as an LLM $M(\\cdot; \\theta)$ guided by a structured workflow specifying iterative external interactions to enhance its outputs.", + "bbox": [ + 514, + 727, + 913, + 813 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "CO-Bench", + "text_level": 1, + "bbox": [ + 668, + 825, + 761, + 840 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We introduce CO-Bench, a comprehensive benchmark designed to evaluate the algorithm development ability of LLM agents on combinatorial optimization (CO) problems. The", + "bbox": [ + 514, + 845, + 913, + 888 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "benchmark consists of 36 problems mainly sourced from OR-Library (Beasley 1990), an established archive containing datasets accumulated by researchers across over 30 years of operations research. These problems span a wide range of realistic CO challenges in academia and industrial applications.", + "bbox": [ + 81, + 68, + 480, + 154 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Curation", + "text_level": 1, + "bbox": [ + 83, + 165, + 200, + 179 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Problem Selection We first perform rigorous filtering and cleaning, and select 36 CO problems that cover diverse domains and complexities, including:", + "bbox": [ + 81, + 185, + 480, + 228 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Packing problems: Bin packing (Falkenauer 1996), Multi-Demand Multidimensional Knapsack problem (Cappanera and Trubian 2001), Multidimensional knapsack problem (Petersen 1967), Container loading (Bischoff and Ratcliff 1995; Ivancic 1988), Container loading with weight restrictions (Ratcliff and Bischoff 1998; Bischoff 2006), Packing unequal circles (López and Beasley 2016), Packing unequal rectangles and squares number / area (López and Beasley 2018).", + "- Cutting problems: Assortment problem (Beasley 1985a), Constrained / unconstrained guillotine cutting (Christofides and Whitlock 1977; Beasley 1985b), Constrained non-guillotine cutting (Beasley 1985c, 2004).", + "- Facility location problems: Capacitated / Uncapacitated warehouse location (Beasley 1988, 1993), Capacitated / Uncapacitated p-median problem (Beasley 1985d; Osman and Christofides 1994).", + "- Scheduling problems: Aircraft landing (Beasley et al. 2000, 2004), Crew scheduling (Beasley and Cao 1996), Common due date scheduling (Biskup and Feldmann 2001), Flow shop scheduling (Taillard 1993), Hybrid Reentrant Shop Scheduling (Chakhlevitch and Glass 2009), Job shop scheduling (Taillard 1993), Open shop scheduling (Taillard 1993).", + "- Routing problems: Traveling salesman problem (Laporte 1992), Period vehicle routing problem (Christofides and Beasley 1984), Resource constrained shortest path (Beasley and Christofides 1989).", + "- Assignment problems: Constrained / unconstrained assignment (Osman 1995; and 1990).", + "- Tree problems: Euclidean Steiner (Beasley 1992), Corporate structuring (Anken and Beasley 2012)", + "- Graph and set problems: Maximal Independent Set (Erdos and Renyi 1984), Graph colouring (Fleurent and Ferland 1996), Equitable partitioning (Mingers and O'Brien 1995), Set partitioning (Chu and Beasley 1998), Set covering (Beasley and Jornsten 1992)." + ], + "bbox": [ + 102, + 232, + 478, + 703 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Annotation For each problem, we manually annotate the following components: (1) Problem description: a formal definition of the optimization problem in natural language, accompanied by a clearly specified solve function as the starter code; (2) Data loading function: a load_data function to load and preprocess raw data from the test files; (3) Evaluation function: an eval_func function that rigorously and robustly evaluates the quality of a solution. Additionally, each problem comprises a development set and a test set, each containing several problem instances.", + "bbox": [ + 81, + 712, + 480, + 852 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Evaluation Framework We develop a rigorous and efficient evaluation framework to assess the performance of", + "bbox": [ + 83, + 859, + 480, + 888 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLM agents in simulated, time-constrained competition scenarios (Chan et al. 2024). Specifically, LLM agents operate within a sandbox environment with access to a Linux machine. For each problem, agents are provided with a problem description, development datasets, and an API endpoint for submitting their solutions (i.e. codebases) to receive evaluation feedback. An independent evaluation system, which is protected by built-in safeguards, scores the submitted solutions on the development set in parallel. After a limited number of research steps, the agent submits its final solution for evaluation on the test set. During the agent development process, both eval_func and test data are invisible. Figure 2 shows the evaluation pipeline in CO-Bench.", + "bbox": [ + 514, + 68, + 915, + 250 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Designing Classical Solver Baselines To investigate how existing LLM agents perform compared to classical solvers, we establish a classical solver baseline. Specifically, the authors of this paper—who have extensive experience in related areas and are familiar with the problems in CO-Bench—spent approximately 30 minutes per problem testing and selecting the most effective classical solvers (e.g., LKH for TSP, CPLEX for scheduling, Gurobi for MIS) and tuning their hyperparameters on the development set. This process ensures that the classical solver baseline is well-tuned and competitive for each problem in CO-Bench.", + "bbox": [ + 514, + 255, + 915, + 409 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Evaluation Metrics", + "text_level": 1, + "bbox": [ + 516, + 420, + 669, + 434 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Avg Score The main evaluation metric is similar to the Primal Gap (Berthold 2006), defined as the normalized score of the primal bound $h(x; p)$ against a pre-computed optimal (or best-known) objective value $h_p^*$ :", + "bbox": [ + 514, + 438, + 911, + 496 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\ns (x, p) = \\frac {\\operatorname* {m i n} \\left\\{\\left| h (x , p) \\right| , \\left| h _ {p} ^ {*} \\right| \\right\\}}{\\operatorname* {m a x} \\left\\{\\left| h (x , p) \\right| , \\left| h _ {p} ^ {*} \\right| \\right\\}}, \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 503, + 911, + 540 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A higher value indicates better performance and a score of 1 signifies the performance identical to the optimal or best-known solution. Program errors or infeasible solutions lead to a score of 0.0. The score of a solver on a given problem is computed by averaging its scores across all test instances. The overall benchmark score is then obtained by averaging these problem-level scores across all 36 problems.", + "bbox": [ + 514, + 546, + 913, + 645 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Valid Solution We compute the percentage of problems for which the generated code is correct on all test instances. Any raised error—such as constraint violation or timeout—is treated as an invalid signal. If any test instance for a given problem results in an invalid signal, the entire solution for that problem is considered invalid, even if it produces valid results on other test instances.", + "bbox": [ + 514, + 651, + 911, + 747 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Above Classical Given the performance of classical solver, we calculate the portion of problems where the model outperforms the classical solver baseline.", + "bbox": [ + 514, + 755, + 913, + 797 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Survival Rate The survival rate measures that, for each problem, the percentage of test instances where the model's solution is above $99\\%$ of the reference score (reported optimal or best-known solution from literature). This serve as a challenge metric as the model can only get credit when it is very close or better than previous-best algorithm.", + "bbox": [ + 514, + 804, + 913, + 888 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a9a3f71437619cfb208811e67ebfaeb37a592dd218250bc67a376effc29926bd.jpg", + "image_caption": [ + "Figure 2: CO-Bench is an evaluation environment for AI agents. Each problem has an associated description and a development dataset. Following the setup in Chan et al. (2024), the agent-generated code implements an algorithm design, which is further graded and compared against the best-known solution and human expert solution." + ], + "image_footnote": [], + "bbox": [ + 127, + 65, + 872, + 238 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Experimental Setup", + "text_level": 1, + "bbox": [ + 194, + 315, + 366, + 333 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Benchmarked Methods", + "text_level": 1, + "bbox": [ + 83, + 334, + 267, + 348 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "On CO-Bench, we evaluate various LLMs combined with different agentic frameworks, and compare them with existing human-designed CO solvers.", + "bbox": [ + 81, + 351, + 480, + 395 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "LLMs We conduct experiments on 5 open-source models and 10 proprietary models. These include instruction-tuned models such as Llama-3.3-70B-Instruct (Meta 2024), Qwen-2.5-Code-32B-Instruct (Hui et al. 2024), DeepSeek-V3 (DeepSeek-AI 2024), and GPT-4o (OpenAI 2024a), as well as frontier reasoning models, including o3-mini (OpenAI 2025), Claude-3.7-Sonnet-Thinking (Anthropic 2025), DeepSeek-R1 (DeepSeek-AI 2025b), Grok-3-Thinking (xAI 2025), QwQ-32B (Qwen 2025), and Gemini 2.5 Pro (DeepMind 2025).", + "bbox": [ + 81, + 398, + 482, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Agentic frameworks For the aforementioned LLMs, we apply various agentic frameworks to evaluate their performance across different strategies. These range from simple approaches, such as direct generation, to more sophisticated frameworks that augment LLM with additional tools, workflows, and test-time compute:", + "bbox": [ + 81, + 544, + 482, + 628 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Direct Answer: The simplest approach, where the LLM directly generates a solution to the combinatorial optimization problem without further refinement.", + "- BestOfN Sampling (Chen et al. 2021): Generate $N$ candidate solutions, evaluate each on a development set, and select the solution with the best performance.", + "- Chain of Experts (Xiao et al. 2024a): A multi-agent prompting framework where agents of different roles cooperate to debug and deliver one solution.", + "- Greedy Refinement (Shinn et al. 2023; Madaan et al. 2023): Iteratively prompt the LLM to refine the current best solution based on the evaluation results of the development set, repeating this refinement process for $N$ steps.", + "- FunSearch (Romera-Paredes et al. 2023): Prompt the LLM to either draft a new solution or refine an existing one, followed by employing an evolutionary algorithm to iteratively select and improve candidate solutions." + ], + "bbox": [ + 89, + 630, + 482, + 888 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- EoH (Liu et al. 2024): Evolve both thoughts and codes in an evolutionary search framework for generating high-performance heuristics.", + "- AIDE (Jiang et al. 2025): A representative method for machine learning engineering tasks, which stores existing solutions in a tree structure and selectively prompts the LLM to draft new solutions, debug or improve previously stored solutions.", + "- ReEvo (Ye et al. 2024): A recent evolutionary algorithm that incorporates short-term and long-term reflection modules, as well as a multi-agentic framework.", + "- MSTC-AHD (Zheng et al. 2025): A Monte Carlo Tree Search (MCTS)-based agentic pipeline that organizes all LLM-generated heuristics in a tree structure and uses the MCTS algorithm with progressive widening technique to guide the evolution of heuristics." + ], + "bbox": [ + 524, + 316, + 915, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Implementation Details", + "text_level": 1, + "bbox": [ + 516, + 563, + 700, + 579 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For benchmark evaluation, we limit the solving time of each test instance to 10 seconds on a single CPU, such that the exact solving of the problem (achieving the optimal solution) is impossible on most test instances. Test instances that result in a timeout or error receive a score of 0.", + "bbox": [ + 514, + 580, + 913, + 651 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For agent implementation, we use o3-mini-medium as the default base model. Since the original implementations of these agents may use different evaluation setups, we adapt their approaches to our benchmark setting (i.e., end-to-end algorithm search) by adjusting the prompts and tools. For all agents, we set the number of iteration steps to 64. In each step, the agent generates a code block as a candidate algorithm and obtains its evaluation score on the development set. After 64 iterations, the agent produces 64 candidate algorithms, from which the best-performing solution on the development set is selected for final benchmark evaluation. All evaluations are conducted on a single CPU core of a dual AMD EPYC 7313 16-Core processor.", + "bbox": [ + 514, + 651, + 913, + 832 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Main Results", + "text_level": 1, + "bbox": [ + 516, + 842, + 624, + 856 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 3 presents the results of LLMs and agents on the test set. We highlight the following key findings.", + "bbox": [ + 514, + 859, + 913, + 890 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/e28e2f58b1de465d6a56fd4bf5fc752436137e04e2ed0ea6c5ad22e5eae41853.jpg", + "image_caption": [ + "Figure 3: Overall Performance. LLM Agents are all based on o3-mini-medium. Avg Score refers to the average normalized objective scores across all problems. Valid Solution indicates the percentage of test-set problems for which the solutions are feasible. Above Classical represents the percentage of test instances where the model outperforms the classical solver baseline. Survival Rate measures the percentage of test instances where the model's score exceeds $99\\%$ of the reference score." + ], + "image_footnote": [], + "bbox": [ + 86, + 65, + 911, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Direct generation performance is limited. LLMs show significantly lower average scores compared to the classical solver. They often fail to generate valid solutions (i.e., bug-free code that satisfies all constraints within the time limit), rarely outperform the classical solver on individual instances, and often fail to produce optimal solutions. Reasoning-capable models tend to perform better than nonreasoning ones. The best-performing LLM for one-shot generation is Claude-3.7 Sonnet, with an average score of 0.65.", + "bbox": [ + 81, + 613, + 482, + 741 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agentic systems substantially improve LLM performance. Compared to direct generation, the agentic pipeline achieves considerably higher scores across all metrics. Among the evaluated frameworks, FunSearch attains the highest average score of 0.842, outperforming the classical solver (0.797). It also surpasses the solver on over half the test instances (see \"Above Classical\" score) and achieves a higher survival rate. These results highlight the effectiveness of LLM-based agents in solving CO problems.", + "bbox": [ + 81, + 744, + 482, + 871 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agent performance varies widely. Some advanced agentic", + "bbox": [ + 83, + 875, + 480, + 891 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "frameworks, such as AIDE, underperform compared to simpler strategies like BestOfN on most metrics, though they show higher valid solution rates—possibly due to their debugging capabilities. This indicates that current planning mechanisms in agents are still underdeveloped and may not reliably outperform random sampling.", + "bbox": [ + 514, + 613, + 915, + 698 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Valid solution rates still lag behind classical solvers. According to the Valid Solution metric, the best-performing agents achieve a success rate of 0.555—lower than that of the classical solver (0.611). This suggests that current agents often struggle with solution feasibility and reliability.", + "bbox": [ + 514, + 698, + 915, + 768 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Agents Error Analysis", + "text_level": 1, + "bbox": [ + 516, + 782, + 692, + 799 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To investigate why the agents' valid solution scores are low, Figure 4 shows the types of errors among invalid solutions for five agents. We observe that code errors (i.e., bugs that prevent compilation) are the least frequent issue. The dominant error type varies across agents: Greedy Refine and ReEvo exhibit more constraint violations, while FunSearch, AIDE, and", + "bbox": [ + 514, + 804, + 915, + 890 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/761b895f5325873705d7f6de307e584bd678aed22c73aa6d947d3748d99f59e0.jpg", + "image_caption": [ + "Figure 4: Agents Error Analysis. Distribution of three types of errors among invalid solutions for five agents." + ], + "image_footnote": [], + "bbox": [ + 127, + 63, + 872, + 200 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/85dd609a586c94b65a492ef0a650c2e384cd798f6f13c90be03fb5f20d969b8d.jpg", + "image_caption": [ + "Figure 5: Avg Score vs. the number of iteration steps (in total 64 steps) during the algorithm development." + ], + "image_footnote": [], + "bbox": [ + 88, + 250, + 475, + 448 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "BoN encounter more timeout errors. This highlights agents' limitations in satisfying constraints and generating efficient algorithms within time limits.", + "bbox": [ + 81, + 518, + 480, + 561 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Performance over Iteration Steps", + "text_level": 1, + "bbox": [ + 83, + 574, + 341, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 5 illustrates the performance of several representative LLM agents across different iteration steps. At each step, the agent generates a new algorithm and receives evaluation results on the development set. We also include the performance of the classical solver baseline for comparison.", + "bbox": [ + 81, + 595, + 478, + 666 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "All agents exhibit the ability to improve their performance with more iteration steps. FunSearch consistently achieves the best results, reaching a score of 0.8423 and converging after around 50 steps. Notably, both FunSearch and Refine discover algorithms that outperform the classical solver within approximately 10 steps. However, performance tends to saturate after 30 steps, with further search yielding diminishing returns. Enabling more consistent improvements under longer search budgets presents an interesting future direction.", + "bbox": [ + 81, + 666, + 480, + 791 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 6 shows an example trajectory of algorithm development by Greedy Refinement (o3-mini) on TSP over multiple search steps. In the early stages, the agent enhances code efficiency by adopting vectorized data structures and utilizing a K-D tree. It then increases the number of search iterations and introduces perturbations to escape local optima. Finally, the agent integrates simulated annealing to balance exploration", + "bbox": [ + 81, + 791, + 480, + 888 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/954c0315897a14185b49ad98c139c33d7c283f5cff1989886d08731c96bbf2cf.jpg", + "image_caption": [ + "Figure 6: Trajectory of algorithm development for Greedy Refinement on TSP over 64 steps. The curve and highlighted dots indicate the best-ever score and the steps where improvements occurred. The algorithmic ideas behind each improvement step are summarized in corresponding boxes." + ], + "image_footnote": [], + "bbox": [ + 517, + 251, + 911, + 412 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "and exploitation and applies adaptive heuristics for different instance sizes. This example demonstrates that LLMs excel in applying established techniques to improve efficiency and implementation quality, but failing at algorithmic novelty.", + "bbox": [ + 514, + 518, + 913, + 575 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparison to Neural Solvers", + "text_level": 1, + "bbox": [ + 514, + 587, + 754, + 603 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2 compares the performance of agents with representative neural solvers on TSP and MIS, two well-studied CO problems. We include DIMES (Qiu, Sun, and Yang 2022), DIFUSCO (Sun and Yang 2023), and T2T (Li et al. 2023) as neural baselines. For the method with multiple variants, we only include their best results on each dataset. We also consider a hybrid method, LEHD + ReEvo (Ye et al. 2024), which combines the neural solver with LLM-designed heuristics. We report both the objective values (the tour length for TSP and set size for MIS) and the solving time. The results show that the agents such as Greedy Refine and FunSearch achieve competitive performance on both problems, often outperforming existing neural solvers under similar time budget and approaching the best results achieved by previous solvers given extended search time.", + "bbox": [ + 514, + 606, + 915, + 815 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Solution Analysis", + "text_level": 1, + "bbox": [ + 514, + 827, + 656, + 842 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Figure 7, we plot the percentage of algorithms developed by the Greedy Refinement agent for the 36 CO problems that utilize existing solvers (e.g., code importing ortools,", + "bbox": [ + 514, + 845, + 913, + 888 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c60a3cdc429fa90622c07bf6619a002a061ab95eb3b5be46a9ea9e3efb2bd15f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TSP-500TSP-1000TSP-10000ER-SmallER-Large
Len ↓Time ↓Len ↓Time ↓Len ↓Time ↓Size ↑Time ↓Size ↑Time ↓
Gurobi16.5545.6h----41.3850.0m--
DIMES18.841.1m26.362.4m85.754.8m42.0612.0m332.8012.5m
DIFUSCO16.6511.5m23.4548.1m73.896.72h41.1226.6m--
T2T16.6116.0m23.3054.6m--41.3729.7m--
LEHD + ReEvo16.78-23.82-------
Greedy Refine (o3-mini)17.3719.1m24.4019.1m77.652.5m42.3520.1m354.002.5m
FunSearch (o3-mini)17.2019.1m25.3119.1m80.182.5m41.651.9m356.502.1m
", + "bbox": [ + 147, + 65, + 849, + 212 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Objective values and solving time of different solvers on TSP and MIS, with varying data sizes.", + "bbox": [ + 153, + 222, + 839, + 238 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/1d43eb5705a42b8bfe01034dfd0a3d46833c0ca788e8039ac91c760f5fe8f163.jpg", + "image_caption": [ + "Figure 7: Percentage of algorithms developed by the Greedy Refinement agent that rely on existing solvers (e.g., code importing ortools, pulp) over 64 iteration steps. We observe an increasing use of existing solvers." + ], + "image_footnote": [], + "bbox": [ + 114, + 270, + 446, + 435 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "pulp). The percentages are shown across 64 iteration steps. We observe an increasing trend in the use of existing solvers in the agent's solutions. After 64 iterations, the final usage rate reaches $25\\%$ (i.e., solutions for 9 problems use existing solvers). The solvers used throughout all steps and problems are limited to three: ortools, pulp, and scipy.", + "bbox": [ + 81, + 530, + 478, + 613 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This suggests that while existing LLM agents are capable of developing algorithms without relying on existing solvers for most problems, there is a growing tendency to do so over time. Moreover, the solvers used are basic general-purpose tools rather than state-of-the-art solvers specifically designed for each problem (e.g., LKH for TSP), indicating that the agent lacks the necessary knowledge to select the best-performing solver.", + "bbox": [ + 81, + 613, + 480, + 726 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Related Work", + "text_level": 1, + "bbox": [ + 220, + 738, + 341, + 753 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Automatic Algorithm Search for CO", + "text_level": 1, + "bbox": [ + 83, + 758, + 367, + 773 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Automating algorithm search for combinatorial optimization (CO) has emerged as a significant research direction in the machine learning community. Traditional machine learning solvers primarily parameterize CO algorithms as trainable neural networks (Bengio, Lodi, and Prouvost 2020; Cappart et al. 2023). Although effective in capturing data distributions, these neural approaches often struggle to generate feasible solutions, necessitating integration with human-", + "bbox": [ + 81, + 777, + 482, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "designed heuristics such as branch-and-bound (Gasse et al. 2019) and tree search (Böther et al. 2022). To address this limitation, Kuang et al. (2024a,b) propose to decompose CO algorithms into symbolic operators and conduct searches in the symbolic space. However, designing these unit symbolic operators demands substantial human expertise, limiting generalizability and comprehensive coverage of all algorithm types. Recent advances in Large Language Models (LLMs) and LLM-based agents have significantly mitigated this challenge by enabling symbolic searching in programming language formats (Romera-Paredes et al. 2023; Ye et al. 2024; Liu et al. 2024). Building on these developments, CO-Bench aims to extend the success of these methods to more real-world CO problems and facilitate further research in this domain.", + "bbox": [ + 514, + 263, + 915, + 472 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "CO Benchmarks for LLMs", + "text_level": 1, + "bbox": [ + 516, + 486, + 728, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Existing CO benchmarks can be roughly classified into two categories. The first type formulates CO problems as question-answering tasks (Fan et al. 2024; Tang et al. 2025). Although LLMs have the potential to solve CO problems via natural language reasoning, their excessive parameter size makes them inefficient CO solvers in general. Therefore, the second type of benchmarks evaluates the tool-using ability of LLMs, e.g., calling an existing CO solver, to address CO problems (Xiao et al. 2024b; Ahmaditeshnizi, Gao, and Udell 2024; Yang et al. 2025b). However, these benchmarks only evaluate the correctness of the generated algorithm on small-scale CO problems, whose problem parameters could be fully expressed in natural language. In contrast, CO-Bench targets scientific and industrial challenges, emphasizing the evaluation of algorithm efficiency on diverse, large-scale CO instances. This results in a more demanding benchmark, well-suited for assessing powerful reasoning models and agents.", + "bbox": [ + 514, + 505, + 915, + 743 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Conclusion", + "text_level": 1, + "bbox": [ + 665, + 757, + 764, + 771 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This work introduces CO-Bench, the first benchmark designed to evaluate the ability of LLMs in the search of combinatorial optimization (CO) algorithms. Our systematic evaluation reveals that reasoning-focused LLMs, especially when paired with agentic frameworks, can automatically discover effective algorithms that rival or surpass the classical solvers designed by human experts, with competitive searching time. However, we also identify key limitations of current LLM", + "bbox": [ + 514, + 777, + 915, + 888 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "agents such as they struggle to understand the problem constraints. These shortcomings highlight the need for future research to enhance agents' problem comprehension and creative reasoning abilities in CO tasks, enabling more robust and autonomous scientific discovery.", + "bbox": [ + 83, + 68, + 480, + 138 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 233, + 152, + 330, + 167 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ahmaditeshnizi, A.; Gao, W.; and Udell, M. 2024. OptiMUS: Scalable Optimization Modeling with (MI)LP Solvers and Large Language Models. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 577-596. PMLR.", + "and, J. E. B. 1990. Linear Programming on Cray Supercomputers. Journal of the Operational Research Society, 41(2): 133-139.", + "Anken, F.; and Beasley, J. E. 2012. Corporate structure optimisation for multinational companies. Omega-international Journal of Management Science, 40: 230-243.", + "Anthropic. 2025. Claude Sonnet. https://www.anthropic.com/claude/sonnet. Accessed: 2025-03-24.", + "Beasley, J. E. 1985a. An algorithm for the two-dimensional assortment problem. European Journal of Operational Research, 19: 253-261.", + "Beasley, J. E. 1985b. Algorithms for Unconstrained Two-Dimensional Guillotine Cutting. Journal of the Operational Research Society, 36: 297-306.", + "Beasley, J. E. 1985c. An Exact Two-Dimensional Non-Guillotine Cutting Tree Search Procedure. Oper. Res., 33: 49-64.", + "Beasley, J. E. 1985d. A note on solving large p-median problems. European Journal of Operational Research, 21: 270-273.", + "Beasley, J. E. 1988. An algorithm for solving large capacitated warehouse location problems. European Journal of Operational Research, 33: 314-325.", + "Beasley, J. E. 1990. OR-Library: Distributing Test Problems by Electronic Mail. Journal of the Operational Research Society, 41: 1069-1072.", + "Beasley, J. E. 1992. A heuristic for Euclidean and rectilinear Steiner problems. European Journal of Operational Research, 58: 284-292.", + "Beasley, J. E. 1993. Lagrangean heuristics for location problems. European Journal of Operational Research, 65: 383-399.", + "Beasley, J. E. 2004. A population heuristic for constrained two-dimensional non-guillotine cutting. *Eur. J. Oper. Res.*, 156: 601-627.", + "Beasley, J. E.; and Cao, B. 1996. A tree search algorithm for the crew scheduling problem. European Journal of Operational Research, 94: 517-526.", + "Beasley, J. E.; and Christofides, N. 1989. An algorithm for the resource constrained shortest path problem. Networks, 19: 379-394." + ], + "bbox": [ + 83, + 171, + 480, + 887 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Beasley, J. E.; and Jornsten, K. 1992. Enhancing an algorithm for set covering problems. European Journal of Operational Research, 58: 293-300.", + "Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2000. Scheduling Aircraft Landings - The Static Case. Transp. Sci., 34: 180-197.", + "Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2004. Displacement problem and dynamically scheduling aircraft landings. Journal of the Operational Research Society, 55: 54-64.", + "Bengio, Y.; Lodi, A.; and Prouvost, A. 2020. Machine Learning for Combinatorial Optimization: a Methodological Tour d'Horizon. arXiv:1811.06128.", + "Berthold, T. 2006. Primal heuristics for mixed integer programs. Ph.D. thesis, Zuse Institute Berlin (ZIB).", + "Bischoff, E. E. 2006. Three-dimensional packing of items with limited load bearing strength. Eur. J. Oper. Res., 168: 952-966.", + "Bischoff, E. E.; and Ratcliff, M. S. W. 1995. Issues in the development of approaches to container loading. Omega-international Journal of Management Science, 23: 377-390.", + "Biskup, D.; and Feldmann, M. 2001. Benchmarks for scheduling on a single machine against restrictive and unrestricted common due dates. Comput. Oper. Res., 28: 787-801.", + "Böther, M.; Kißig, O.; Taraz, M.; Cohen, S.; Seidel, K.; and Friedrich, T. 2022. What's Wrong with Deep Learning in Tree Search for Combinatorial Optimization. In International Conference on Learning Representations.", + "Cappanera, P.; and Trubian, M. 2001. A Local-Search-Based Heuristic for the Demand-Constrained Multidimensional Knapsack Problem. INFORMS J. Comput., 17: 82-98.", + "Cappart, Q.; ChA©telat, D.; Khalil, E. B.; Lodi, A.; Morris, C.; and VeliAkoviA‡, P. 2023. Combinatorial Optimization and Reasoning with Graph Neural Networks. Journal of Machine Learning Research, 24(130): 1-61.", + "Chakhlevitch, K.; and Glass, C. A. 2009. Scheduling reentrant jobs on parallel machines with a remote server. Comput. Oper. Res., 36: 2580-2589.", + "Chan, J. S.; Chowdhury, N.; Jaffe, O.; Aung, J.; Sherburn, D.; Mays, E.; Starace, G.; Liu, K.; Maksin, L.; Patwardhan, T. A.; Weng, L.; and Mkadry, A. 2024. MLE-bench: Evaluating Machine Learning Agents on Machine Learning Engineering. ArXiv, abs/2410.07095.", + "Chen, M.; Tworek, J.; Jun, H.; Yuan, Q.; Ponde, H.; Kaplan, J.; Edwards, H.; Burda, Y.; Joseph, N.; Brockman, G.; Ray, A.; Puri, R.; Krueger, G.; Petrov, M.; Khlaaf, H.; Sastry, G.; Mishkin, P.; Chan, B.; Gray, S.; Ryder, N.; Pavlov, M.; Power, A.; Kaiser, L.; Bavarian, M.; Winter, C.; Tillet, P.; Such, F. P.; Cummings, D. W.; Plappert, M.; Chantzis, F.; Barnes, E.; Herbert-Voss, A.; Guss, W. H.; Nichol, A.; Babuschkin, I.; Balaji, S.; Jain, S.; Carr, A.; Leike, J.; Achiam, J.; Misra, V.; Morikawa, E.; Radford, A.; Knight, M. M.; Brundage, M.; Murati, M.; Mayer, K.; Welinder, P.; McGrew, B.; Amodei, D.; McCandlish, S.; Sutskever, I.; and Zaremba, W. 2021. Evaluating Large Language Models Trained on Code. ArXiv, abs/2107.03374." + ], + "bbox": [ + 517, + 68, + 911, + 887 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Christofides, N.; and Beasley, J. E. 1984. The period routing problem. Networks, 14: 237-256.", + "Christofides, N.; and Whitlock, C. 1977. An Algorithm for Two-Dimensional Cutting Problems. Oper. Res., 25: 30-44.", + "Chu, P. C.; and Beasley, J. E. 1998. Constraint Handling in Genetic Algorithms: The Set Partitioning Problem. Journal of Heuristics, 4: 323-357.", + "Crama, Y. 1997. Combinatorial optimization models for production scheduling in automated manufacturing systems. European Journal of Operational Research, 99(1): 136-153.", + "DeepMind, G. 2025. Flash Thinking: Behind the Scenes of Gemini. https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-03-24.", + "DeepSeek-AI. 2024. DeepSeek-V3 Technical Report. ArXiv, abs/2412.19437.", + "DeepSeek-AI. 2025a. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv:2501.12948.", + "DeepSeek-AI. 2025b. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. ArXiv, abs/2501.12948.", + "Erdos, P. L.; and Rényi, A. 1984. On the evolution of random graphs. Transactions of the American Mathematical Society, 286: 257-257.", + "Falkenauer, E. 1996. A hybrid grouping genetic algorithm for bin packing. Journal of Heuristics, 2: 5-30.", + "Fan, L.; Hua, W.; Li, L.; Ling, H.; and Zhang, Y. 2024. NPHardEval: Dynamic Benchmark on Reasoning Ability of Large Language Models via Complexity Classes. In Ku, L.-W.; Martins, A.; and Srikumar, V., eds., Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 4092-4114. Bangkok, Thailand: Association for Computational Linguistics.", + "Fleurent, C.; and Ferland, J. A. 1996. Genetic and hybrid algorithms for graph coloring. Annals of Operations Research, 63: 437-461.", + "Gasse, M.; Chételat, D.; Ferroni, N.; Charlin, L.; and Lodi, A. 2019. Exact Combinatorial Optimization with Graph Convolutional Neural Networks. In Advances in Neural Information Processing Systems 32.", + "Gottweis, J.; Weng, W.-H.; Daryin, A.; Tu, T.; Palepu, A.; Sirkovic, P.; Myaskovsky, A.; Weissenberger, F.; Rong, K.; Tanno, R.; Saab, K.; Popovici, D.; Blum, J.; Zhang, F.; Chou, K.; Hassidim, A.; Gokturk, B.; Vahdat, A.; Kohli, P.; Matias, Y.; Carroll, A.; Kulkarni, K.; Tomaev, N.; Guan, Y.; Dhillon, V.; Vaishnav, E. D.; Lee, B.; Costa, T. R. D.; Penad'es, J. R.; Peltz, G.; Xu, Y.; Pawlosky, A.; Karthikesalingam, A.; and Natarajan, V. 2025. Towards an AI co-scientist. *ArXiv*, abs/2502.18864.", + "Gusfield, D. 1997. Algorithms on stings, trees, and sequences: Computer science and computational biology. *Acm Sigact News*, 28(4): 41-60.", + "Hui, B.; Yang, J.; Cui, Z.; Yang, J.; Liu, D.; Zhang, L.; Liu, T.; Zhang, J.; Yu, B.; Dang, K.; Yang, A.; Men, R.; Huang, F.; Quan, S.; Ren, X.; Ren, X.; Zhou, J.; and Lin, J. 2024. Qwen2.5-Coder Technical Report. ArXiv, abs/2409.12186." + ], + "bbox": [ + 83, + 68, + 480, + 888 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ivancic, N. J. 1988. An integer programming based heuristic approach to the three-dimensional packing problem.", + "Jiang, Z.; Schmidt, D.; Srikanth, D.; Xu, D.; Kaplan, I.; Jacenko, D.; and Wu, Y. 2025. AIDE: AI-Driven Exploration in the Space of Code. ArXiv, abs/2502.13138.", + "Jimenez, C. E.; Yang, J.; Wettig, A.; Yao, S.; Pei, K.; Press, O.; and Narasimhan, K. 2023. SWE-bench: Can Language Models Resolve Real-World GitHub Issues? ArXiv, abs/2310.06770.", + "Kuang, Y.; Wang, J.; Liu, H.; Zhu, F.; Li, X.; Zeng, J.; HAO, J.; Li, B.; and Wu, F. 2024a. Rethinking Branching on Exact Combinatorial Optimization Solver: The First Deep Symbolic Discovery Framework. In *The Twelfth International Conference on Learning Representations*.", + "Kuang, Y.; Wang, J.; Zhou, Y.; Li, X.; Zhu, F.; Hao, J.; and Wu, F. 2024b. Towards General Algorithm Discovery for Combinatorial Optimization: Learning Symbolic Branching Policy from Bipartite Graph. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 25623-25641. PMLR.", + "Laporte, G. 1992. The traveling salesman problem: An overview of exact and approximate algorithms. European Journal of Operational Research, 59(2): 231-247.", + "Li, Y.; Guo, J.; Wang, R.; and Yan, J. 2023. From Distribution Learning in Training to Gradient Search in Testing for Combinatorial Optimization. In Neural Information Processing Systems.", + "Liu, F.; Tong, X.; Yuan, M.; Lin, X.; Luo, F.; Wang, Z.; Lu, Z.; and Zhang, Q. 2024. Evolution of Heuristics: Towards Efficient Automatic Algorithm Design Using Large Language Model. In ICML.", + "López, C. O.; and Beasley, J. E. 2016. A formulation space search heuristic for packing unequal circles in a fixed size circular container. Eur. J. Oper. Res., 251: 64-73.", + "López, C. O.; and Beasley, J. E. 2018. Packing unequal rectangles and squares in a fixed size circular container using formulation space search. Comput. Oper. Res., 94: 106-117.", + "Madaan, A.; Tandon, N.; Gupta, P.; Hallinan, S.; Gao, L.; Wegreffe, S.; Alon, U.; Dziri, N.; Prabhumoye, S.; Yang, Y.; Welleck, S.; Majumder, B. P.; Gupta, S.; Yazdanbakhsh, A.; and Clark, P. 2023. Self-Refine: Iterative Refinement with Self-Feedback. ArXiv, abs/2303.17651.", + "Meta. 2024. The Llama 3 Herd of Models. ArXiv, abs/2407.21783.", + "Mingers, J. C.; and O'Brien, F. A. 1995. Creating student groups with similar characteristics: A heuristic approach. Omega-international Journal of Management Science, 23: 313-321.", + "Motwani, R.; and Raghavan, P. 2013. Randomized Algorithms. USA: Cambridge University Press. ISBN 0511814070.", + "Novikov, A.; V~u, N.; Eisenberger, M.; Dupont, E.; Huang, P.-S.; Wagner, A. Z.; Shirobokov, S.; Kozlovskii, B. M.; Ruiz, F. J. R.; Mehrabian, A.; Kumar, M. P.; See, A.; Chaudhuri, S.;" + ], + "bbox": [ + 517, + 68, + 913, + 888 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Holland, G.; Davies, A.; Nowozin, S.; Kohli, P.; Balog, M.; and Deepmind, G. 2025. AlphaEvolve: A coding agent for scientific and algorithmic discovery. *ArXiv*, abs/2506.13131.", + "OpenAI. 2024a. GPT-4o System Card. ArXiv, abs/2410.21276.", + "OpenAI. 2024b. OpenAI o1 System Card. arXiv:2412.16720.", + "OpenAI. 2025. OpenAI o3-mini System Card.", + "Osman, I. H. 1995. Heuristics for the generalised assignment problem: simulated annealing and tabu search approaches. Operations-Research-Spektrum, 17: 211-225.", + "Osman, I. H.; and Christofides, N. 1994. Capacitated clustering problems by hybrid simulated annealing and tabu search. International Transactions in Operational Research, 1: 317-336.", + "Papadimitriou, C.; and Steiglitz, K. 1982. Combinatorial Optimization: Algorithms and Complexity, volume 32. Courier Corporation. ISBN 0-13-152462-3.", + "Petersen, C. C. 1967. Computational Experience with Variants of the Balas Algorithm Applied to the Selection of R&D Projects. Management Science, 13: 736-750.", + "Qiu, R.; Sun, Z.; and Yang, Y. 2022. DIMES: A Differentiable Meta Solver for Combinatorial Optimization Problems. In Oh, A. H.; Agarwal, A.; Belgrave, D.; and Cho, K., eds., Advances in Neural Information Processing Systems.", + "Qwen. 2025. QwQ-32B: Embracing the Power of Reinforcement Learning. https://qwenlm.github.io/blog/qwq-32b/. Accessed: 2025-03-24.", + "Ramamonjison, R.; Yu, T. T.; Li, R.; Li, H.; Carenini, G.; Ghaddar, B.; He, S.; Mostajabdaveh, M.; Banitalebi-Dehkordi, A.; Zhou, Z.; and Zhang, Y. 2023. NL4Opt Competition: Formulating Optimization Problems Based on Their Natural Language Descriptions. In Neural Information Processing Systems.", + "Ratcliff, M. S. W.; and Bischoff, E. E. 1998. Allowing for weight considerations in container loading. Operations-Research-Spektrum, 20: 65-71.", + "Romera-Paredes, B.; Barekatain, M.; Novikov, A.; Balog, M.; Kumar, M. P.; Dupont, E.; Ruiz, F. J. R.; Ellenberg, J. S.; Wang, P.; Fawzi, O.; Kohli, P.; Fawzi, A.; Grochow, J.; Lodi, A.; Mouret, J.-B.; Ringer, T.; and Yu, T. 2023. Mathematical discoveries from program search with large language models. Nature, 625: 468 - 475.", + "Shinn, N.; Cassano, F.; Labash, B.; Gopinath, A.; Narasimhan, K.; and Yao, S. 2023. Reflexion: language agents with verbal reinforcement learning. In Neural Information Processing Systems.", + "Sun, Z.; and Yang, Y. 2023. DIFUSCO: Graph-based Diffusion Solvers for Combinatorial Optimization. ArXiv, abs/2302.08224.", + "Taillard, E. 1993. Benchmarks for basic scheduling problems. European Journal of Operational Research, 64(2): 278-285.", + "Tang, J.; Zhang, Q.; Li, Y.; Chen, N.; and Li, J. 2025. GraphArena: Evaluating and Improving Large Language Models on Graph Computation. In International Conference on Learning Representations." + ], + "bbox": [ + 83, + 68, + 480, + 888 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Vogiatzis, C.; and Pardalos, P. 2013. Combinatorial optimization in transportation and logistics networks, volume 2-5, 673-722. Germany: Springer. ISBN 9781441979964. Publisher Copyright: $\\text{©}$ Springer Science+Business Media New York 2013. All rights are reserved.", + "xAI. 2025. Grok-3 and the Next Phase of xAI. https://x.ai/news/grok-3. Accessed: 2025-03-24.", + "Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024a. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In International Conference on Learning Representations.", + "Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024b. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In The Twelfth International Conference on Learning Representations.", + "Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025a. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations.", + "Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025b. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations.", + "Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2022. ReAct: Synergizing Reasoning and Acting in Language Models. ArXiv, abs/2210.03629.", + "Ye, H.; Wang, J.; Cao, Z.; Berto, F.; Hua, C.; Kim, H.; Park, J.; and Song, G. 2024. ReEvo: Large Language Models as Hyper-Heuristics with Reflective Evolution. In The Thirty-eighth Annual Conference on Neural Information Processing Systems.", + "Zheng, Z.; Xie, Z.; Wang, Z.; and Hooi, B. 2025. Monte Carlo Tree Search for Comprehensive Exploration in LLM-Based Automatic Heuristic Design. ArXiv, abs/2501.08603." + ], + "bbox": [ + 517, + 68, + 915, + 619 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Problem Description and Scores", + "text_level": 1, + "bbox": [ + 143, + 66, + 418, + 85 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Aircraft landing", + "text_level": 1, + "bbox": [ + 83, + 88, + 212, + 106 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The problem is to schedule landing times for a set of planes across one or more runways such that each landing occurs within its prescribed time window and all pairwise separation requirements are satisfied; specifically, if plane i lands at or before plane j on the same runway, then the gap between their landing times must be at least the specified separation time provided in the input. In a multiple-runway setting, each plane must also be assigned to one runway, and if planes land on different runways, the separation requirement (which may differ) is applied accordingly. Each plane has an earliest, target, and latest landing time, with penalties incurred proportionally for landing before (earliness) or after (lateness) its target time. The objective is to minimize the total penalty cost while ensuring that no constraints are violated—if any constraint is breached, the solution receives no score.", + "bbox": [ + 81, + 109, + 480, + 316 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/df632d4e73c3be9bc14ac0b13f0075642385e842b8c38ae2fbcc4eed3552d4e3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.5985295365478638
BestOfN0.8057479826999232
Refine0.7503157815146175
FunSearch0.9688863336568327
AIDE0.800637046201484
ReEvo0.9134454710810906
MCTS0.801655240273729
EoH0.8019818529389835
", + "bbox": [ + 84, + 330, + 377, + 473 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Assignment problem", + "text_level": 1, + "bbox": [ + 83, + 529, + 246, + 545 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The Assignment Problem involves optimally assigning $n$ items to $n$ agents based on a provided $n$ imes $n$ cost matrix, where each entry $extcost\\_matrix[i][j]$ denotes the cost of assigning item $i + 1$ to agent $j + 1$ . The goal is to identify a permutation—each item assigned exactly one agent—that minimizes the total assignment cost. Formally, this is an optimization problem to find a permutation $\\pi$ of agents such that the total cost $\\sum i = 1^n extcost\\_matrix[i - 1][\\pi(i) - 1]$ is minimized. The solution returned includes both the minimal total cost and the corresponding optimal assignments.", + "bbox": [ + 81, + 549, + 480, + 690 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/80e0672adbc3bc370f8f401854e7171a4d6db329894679d039400745e48eb3cb.jpg", + "table_caption": [ + "Table 3: Aircraft landing" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver1
BestOfN1
Refine1
FunSearch1
AIDE1
ReEvo1
MCTS1
EoH1
", + "bbox": [ + 84, + 703, + 272, + 845 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Assortment problem", + "text_level": 1, + "bbox": [ + 517, + 68, + 679, + 84 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This optimization problem involves arranging a set of rectangular pieces within available stock rectangles to minimize the overall waste area percentage. Each stock rectangle has a defined area, and each piece—which may be rotated by $90^{\\circ}$ —must be fully contained within a stock without overlapping with other pieces. Additionally, each piece type has specific total minimum and maximum placement limits. You have access to an unlimited number of stocks for each type, but you may use at most two stock types. The objective is to achieve the lowest possible waste area percentage, defined as the ratio of unused area to the total stock area. Solutions must ensure efficient resource utilization while satisfying all geometric and quantity constraints. Any violation of these constraints results in no score.", + "bbox": [ + 514, + 89, + 913, + 282 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/1d7a5b5a0d918fa26138e139ee5f84229a4e985dacfb1f0643e8314d50d8e7f7.jpg", + "table_caption": [ + "Table 4: Assignment problem" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.3222852468406736
BestOfN0.36161788534475603
Refine0.10475936163370339
FunSearch0.3622886282031154
AIDE0.1698107561339298
ReEvo0.24290833308629933
MCTS0.1757439194813797
EoH0.2519474328966603
", + "bbox": [ + 517, + 295, + 820, + 438 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Bin packing - one-dimensional", + "text_level": 1, + "bbox": [ + 516, + 497, + 754, + 513 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The **one-dimensional bin packing problem** seeks to minimize the number of bins required to pack a given set of items while ensuring that the sum of item sizes within each bin does not exceed the specified bin capacity. Given a test case with an identifier ('id'), a fixed 'bin_capacity', and a list of 'num_items' with their respective sizes ('items'), the objective is to find a packing arrangement that uses the least number of bins. The solution is evaluated based on the total 'num_bins' used, with invalid solutions (e.g., missing or duplicated items, or bins exceeding capacity) incurring a inf heavy penalty. The output must include the number of bins used and a valid assignment of item indices to bins.", + "bbox": [ + 514, + 518, + 913, + 686 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/ee8ea23f151b8868e90fbe35bf9062dedafc6ecd15e07eb3ea88335107dfbc49.jpg", + "table_caption": [ + "Table 5: Assortment problem" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9628049317089281
BestOfN0.8933315064694979
Refine0.9870315022407082
FunSearch0.9557154223933677
AIDE0.8366913237780297
ReEvo0.9492158360156572
MCTS0.9396436307329097
EoH0.9693475618912389
", + "bbox": [ + 517, + 700, + 810, + 844 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 6: Bin packing - one-dimensional", + "bbox": [ + 583, + 852, + 846, + 868 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Capacitated warehouse location", + "text_level": 1, + "bbox": [ + 86, + 68, + 330, + 83 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The Capacitated Warehouse Location Problem with Splittable Demand aims to determine which warehouses to open and how to allocate portions of customer demands among these warehouses in order to minimize total costs. Given a set of potential warehouse locations, each with a fixed opening cost and capacity limit, and a set of customers with individual demands and associated per-unit assignment costs to each warehouse, the objective is to decide which warehouses to open and how to distribute each customer's demand among these open warehouses. The allocation must satisfy the constraint that the sum of portions assigned to each customer equals their total demand, and that the total demand allocated to any warehouse does not exceed its capacity. The optimization seeks to minimize the sum of fixed warehouse opening costs and the total per-unit assignment costs. However, if any solution violates these constraints (i.e., a customer's demand is not fully satisfied or a warehouse's capacity is exceeded), then no score is provided.", + "bbox": [ + 86, + 85, + 478, + 335 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/34dc9e46e81271756af4bf590f60077ed6c6035553f204712a8b8d812acc3266.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.6976400141361688
BestOfN0.0
Refine0.7518838886310322
FunSearch0.7196713948459038
AIDE0.6647355906610447
ReEvo0.6715266955394039
MCTS0.6891495773105485
EoH0.7502493181324346
", + "bbox": [ + 84, + 349, + 374, + 489 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Common due date scheduling", + "text_level": 1, + "bbox": [ + 86, + 545, + 313, + 561 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Given floor, where $h$ is a predefined fraction (defaulting to 0.6). The goal is to determine an optimal job sequence that minimizes the penalty, calculated as follows: for each job, if its completion time $C$ is earlier than $d$ , an earliness penalty of $aimes(d - C)$ is incurred; if $C$ exceeds $d$ , a tardiness penalty of $bimes(C - d)$ is applied; otherwise, no penalty is incurred. The problem requires finding a permutation of job indices (1-based) that minimizes the total penalty. The evaluation metric sums these penalties for a given schedule.", + "bbox": [ + 86, + 564, + 478, + 689 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/62a6f341da3b481b237dc66465655ab43268510b005cdc9b2fac8b1a88709108.jpg", + "table_caption": [ + "Table 7: Capacitated warehouse location" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9187662046144239
BestOfN0.97731110557282
Refine0.9776844987221935
FunSearch0.976604327923604
AIDE0.6291657473867996
ReEvo0.9743199070415761
MCTS0.8838457578182489
EoH0.9773286503168127
", + "bbox": [ + 84, + 703, + 374, + 844 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Constrained guillotine cutting", + "text_level": 1, + "bbox": [ + 519, + 68, + 750, + 83 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The problem involves optimizing the guillotine feasible placement of a set of rectangular pieces on a given stock sheet to maximize total value. Each piece type is characterized by its length, width, an upper bound on the number of times it may appear in the final cutting pattern, and an assigned value. Orientation of the pieces is fixed (the edges of the pieces are parallel to the edges of the sheet). The task is to select and place pieces such that each lies completely within the boundaries of the stock sheet, no two pieces overlap, and the number of pieces of any type does not exceed its specified maximum. A set of placements is considered guillotine feasible if there exists at least one straight cut (vertical or horizontal) that does not slice through any rectangle, and the property holds recursively on the resulting subregions. Empty regions or regions exactly matching a placed piece are considered valid. The objective is to maximize the sum of the values of the placed pieces; however, if any spatial or count constraint is violated, the solution is deemed invalid. The output is defined as a dictionary reporting the total value and a list of placements, with each placement specified by the piece type index, x and y coordinates, placed dimensions, and orientation flag.", + "bbox": [ + 519, + 85, + 911, + 392 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/97261b48dee41841c44d566a07844b6ea9c1508689f3c76c552cbb394dd6373b.jpg", + "table_caption": [ + "Table 8: Common due date scheduling" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.7844900098230463
BestOfN0.0
Refine0.981513704843915
FunSearch0.956424099109148
AIDE0.9102922923098641
ReEvo0.0
MCTS0.0
EoH0.0
", + "bbox": [ + 519, + 405, + 808, + 546 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 9: Constrained guillotine cutting", + "bbox": [ + 589, + 556, + 841, + 571 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Constrained non-guillotine cutting", + "text_level": 1, + "bbox": [ + 519, + 601, + 784, + 617 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The constrained non-guillotine cutting problem involves optimally arranging rectangular pieces onto a single rectangular stock with fixed dimensions (stock_length and stock_width). Each piece type has defined length, width, value, and minimum and maximum usage constraints. The optimization goal is to maximize the total value of all placed pieces, subject to constraints that each piece is entirely within stock boundaries, pieces do not overlap, each piece type's usage falls within its specified [min, max] range, and pieces may optionally be rotated by $90^{\\circ}$ . The solution returns a set of placements indicating piece type, bottom-left coordinates $(\\mathrm{x},\\mathrm{y})$ , and rotation status. If any constraint is violated, the solution receives no score.", + "bbox": [ + 519, + 619, + 911, + 799 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Container loading", + "text_level": 1, + "bbox": [ + 519, + 813, + 658, + 829 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Solves a container loading problem: Given a 3D container of specified dimensions and multiple box types—each defined by dimensions, orientation constraints, and available quantity—the goal is to optimally place these boxes within", + "bbox": [ + 519, + 832, + 911, + 888 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/4cd0765a6348b5e88b1231416b40b1a5548d74c49891fb0f535b3a488639f904.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.5585076432266227
BestOfN0.8760613343780126
Refine0.99138085452391
FunSearch0.9623447685846964
AIDE0.8555320134962818
ReEvo0.9264764236682984
MCTS0.7944732650186651
EoH0.9106930512513293
", + "bbox": [ + 84, + 65, + 374, + 205 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "the container to maximize the volume utilization ratio. Each box placement must respect orientation constraints (vertical alignment flags), fit entirely within container boundaries, and avoid overlaps. The solution returns precise coordinates and orientations for each box placement, quantified by a volume utilization score calculated as the total volume of placed boxes divided by the container volume. Invalid placements result in a score of 0.0.", + "bbox": [ + 81, + 265, + 478, + 377 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/0978960c07d8958bf634c685b86a78f0a537e3e3f819cabf1c8daefcc47b3df5.jpg", + "table_caption": [ + "Table 10: Constrained non-guillotine cutting" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.09700224776623062
BestOfN0.8163545342051534
Refine0.18895711345505883
FunSearch0.23070987019597894
AIDE0.7592850816892841
ReEvo0.716081346719743
MCTS0.5451472798828618
EoH0.7795824394970114
", + "bbox": [ + 84, + 393, + 383, + 536 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Container loading with weight restrictions", + "text_level": 1, + "bbox": [ + 83, + 609, + 410, + 626 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The Container Loading with Weight Restrictions problem aims to maximize the utilization of a container's volume by selecting and strategically placing boxes inside it. Given a container with specified dimensions (length, width, height) and multiple types of boxes, each characterized by their dimensions, quantities, weights, and load-bearing constraints, the optimization goal is to determine the placement and orientation of these boxes (with each box allowed three possible orientations) that maximizes the ratio of total occupied box volume to container volume. The solution must strictly adhere to spatial constraints (boxes must fit entirely within the container without overlapping), load-bearing constraints (boxes must support the weight of boxes stacked above them according to given limits), and orientation restrictions. The optimization quality is evaluated by the achieved utilization metric, defined as the total volume of successfully placed boxes divided by the container volume; if any constraint is violated, the utilization score is zero.", + "bbox": [ + 81, + 638, + 480, + 888 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/428824232b4a92188e459544fa6dda1bd13bf81b70ba1a5b7ec4cc83d9e076f1.jpg", + "table_caption": [ + "Table 11: Container loading" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.009225308452359507
BestOfN0.13669723873453465
Refine0.07941319051933145
FunSearch0.2919729304847129
AIDE0.12860429344072807
ReEvo0.1420943670465572
MCTS0.04806324649022297
EoH0.051972410039456414
", + "bbox": [ + 519, + 65, + 826, + 207 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Corporate structuring", + "text_level": 1, + "bbox": [ + 516, + 263, + 692, + 281 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Given N countries, each defined by: $\\bullet$ a tax code (1: Exemption, 2: Deduction, 3: Source-by-source Pooling, 4: Worldwide Pooling), $\\bullet$ a foreign income tax rate, $\\bullet$ a domestic income tax rate, and $\\bullet$ a profit, and a withholding tax matrix W (where W[i][j] is the rate on dividends from country i to j), construct a valid tree-structured corporate hierarchy (directed, acyclic, connected) rooted at a designated target (whose parent is 0) such that every country with profit $>0$ appears exactly once.", + "bbox": [ + 514, + 292, + 913, + 419 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For each country i, define S as the set of nodes in its subtree (note the subtree includes itself) with a positive profit. Also consider the set of child nodes C_i. If i is not a root country but in the tree, it will send all its income (after tax) to its parent j. Denote this amount as F[i][j]. Assume the total income after domestic tax and withholding tax for country i is: domestic_iincome_i * (1 - domestic_rate_i) + (\\sum_{k \\in C_i} F[k][i] * (1 - W[k][i])) The extra foreign tax under different tax code is defined as follows: 1. No extra tax. 2. Foreign income tax from the child nodes: foreign_iincome_rate_i * (\\sum_{k \\in C_i} F[k][i] * (1 - W[k][i])) 3. Foreign income tax computed from the source nodes in each child's subtree: $\\sum_{k \\in C_i} \\max(0, F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\\sum_{s \\in S_k} domestic_iincome_s))$ 4. Foreign income tax from all source nodes in the subtree, excluding itself: $\\max(0, \\sum_{k \\in C_i} F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\\sum_{s \\in S_i} domestic_iincome_s) - domestic_iincome_i)$", + "bbox": [ + 514, + 421, + 913, + 678 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/72f88d648949a3f5f81a832559bd20ad7ec54bf2d97f4b2400f475d88ef4a36d.jpg", + "table_caption": [ + "Table 12: Container loading with weight restrictions" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9450572839481785
BestOfN0.9450572839481785
Refine0.9726337326585759
FunSearch0.777775452943618
AIDE0.9450572839481785
ReEvo0.5014939649568603
MCTS0.9844897288603699
EoH0.9431107030735252
", + "bbox": [ + 519, + 696, + 808, + 837 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 13: Corporate structuring", + "bbox": [ + 609, + 848, + 818, + 863 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Crew scheduling", + "text_level": 1, + "bbox": [ + 86, + 68, + 215, + 83 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The Crew Scheduling Problem involves assigning each task—with defined start and finish times—to exactly one crew, aiming to minimize the total transition costs between consecutive tasks. Each crew's schedule must satisfy three constraints: tasks within a crew must not overlap; valid transitions (with associated costs) must exist between every consecutive pair of tasks; and the crew's total duty time (from the start of the first task to the finish of the last) cannot exceed a specified time limit. Additionally, no more than $\\mathrm{K}$ crews can be used to cover all tasks. Solutions violating any of these constraints are considered infeasible and receive no score. The optimization objective is therefore to determine assignments of tasks to no more than $\\mathrm{K}$ crews that minimize the sum of transition costs while strictly adhering to all constraints, yielding a feasible and cost-effective scheduling solution.", + "bbox": [ + 86, + 87, + 478, + 295 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/7544e093b87cf336b81774acd3b9ab621799e0e15a8d0ba485f081d12ce12f7e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.45498811952880935
BestOfN0.4483461488661745
Refine0.6690343590115082
FunSearch0.5536756258756895
AIDE0.44095505708697136
ReEvo0.45225267224663634
MCTS0.4446817469828879
EoH0.5864457661923881
", + "bbox": [ + 84, + 308, + 383, + 449 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Equitable partitioning problem", + "text_level": 1, + "bbox": [ + 86, + 503, + 326, + 520 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The task is to partition a set of individuals—each characterized by multiple binary attributes—into exactly 8 groups such that the distribution of attribute values is as balanced as possible across these groups. For each attribute, count the number of individuals with a '1' in each group. The optimization objective is to minimize the total imbalance, which is defined as follows: for each attribute, calculate the absolute differences between the count in each group and the mean count across all groups, take the average of these differences, and then sum these averages over all attributes. The goal is to determine a group assignment for each individual that achieves the lowest possible total imbalance score.", + "bbox": [ + 86, + 523, + 478, + 689 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/3c12705cc741d7f0c767659c343b383b159ea76d63677ef4b52e1b95e57153c7.jpg", + "table_caption": [ + "Table 14: Crew scheduling" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver1.0
BestOfN1.0
Refine1.0
FunSearch1.0
AIDE0.7777777777777778
ReEvo0.7777777777777778
MCTS1.0
EoH1.0
", + "bbox": [ + 84, + 703, + 374, + 843 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Euclidean Steiner problem", + "text_level": 1, + "bbox": [ + 519, + 68, + 725, + 83 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Given a set of 2D points (terminals), the goal of the Euclidean Steiner Problem is to compute a tree connecting all terminals with minimum total length. The total length is measured as the sum of Euclidean distances (where the Euclidean distance between two points $(x1, y1)$ and $(x2, y2)$ is $sqrt((x1 - x2)^2 + (y1 - y2)^2)$ ). Unlike a Minimum Spanning Tree (MST) computed solely on the given terminals, a Steiner tree may introduce extra points, called Steiner points, to reduce the overall length. In this formulation, it is assumed that the final candidate tree's total length is given by the MST computed on the union of the original terminals and the reported Steiner points. A lower ratio (candidate_tree_length/MST ORIGINAL_length) indicates a better solution.", + "bbox": [ + 519, + 85, + 913, + 279 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/287382a9ba0debd85980e8bc0ba5ea3829b4b560c6945e4ecea7dc2d6c560da7.jpg", + "table_caption": [ + "Table 15: Equitable partitioning problem" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9779703480188361
BestOfN0.6291391910535526
Refine0.688025642110573
FunSearch0.6968176110449371
AIDE0.04483890014026932
ReEvo0.5469067768233761
MCTS0.43093954323065975
EoH0.5917817000598826
", + "bbox": [ + 519, + 292, + 816, + 431 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Flow shop scheduling", + "text_level": 1, + "bbox": [ + 519, + 489, + 684, + 505 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Given $n$ jobs and $m$ machines, the goal of the flow shop scheduling problem is to determine the optimal job sequence that minimizes the makespan, i.e., the total time required to complete all jobs on all machines. Each job follows the same machine order, and the processing times are specified in an $n$ imes $m$ matrix. The output is a permutation of job indices representing the processing order. If the constraints are not satisfied (e.g., invalid job sequencing), the solution receives no score. The objective is to optimize the makespan using the classical flow shop recurrence.", + "bbox": [ + 519, + 508, + 911, + 647 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/0aa1e9e9a4cf46fb810595679d6adbf788c8a69f1e96077a330f035fb42abeff.jpg", + "table_caption": [ + "Table 16: Euclidean Steiner problem" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9222700445897257
BestOfN0.874217493803887
Refine0.8463439348165006
FunSearch0.8537338049420798
AIDE0.9144895115672386
ReEvo0.8424667927400846
MCTS0.9242143967817102
EoH0.940154419652199
", + "bbox": [ + 519, + 660, + 808, + 801 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 17: Flow shop scheduling", + "bbox": [ + 611, + 811, + 818, + 825 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Generalised assignment problem", + "text_level": 1, + "bbox": [ + 519, + 854, + 769, + 871 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Generalized Assignment Problem (GAP)", + "bbox": [ + 519, + 875, + 787, + 888 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The Generalized Assignment Problem (GAP) involves assigning $n$ jobs to $m$ agents such that each job is assigned to exactly one agent, and the resource consumption for each agent does not exceed its capacity. The objective is to optimize the total cost based on the problem type. When formulated as a maximization problem, the goal is to maximize the total cost; when formulated as a minimization problem, the goal is to minimize the total cost. Given a cost matrix (representing the cost of assigning jobs to agents), a consumption matrix (indicating the resource usage per assignment), and capacities (the resource limits for each agent), the task is to find a valid assignment that meets the capacity constraints while optimizing the total cost as specified by the problem indicator.", + "bbox": [ + 81, + 68, + 480, + 263 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/4ae1a2d4fa564cca47a694e279dc0d48198b1277546173bf053c33584be0aa3d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver1.000509368510793
BestOfN1.000152715871272
Refine0.9997973477884884
FunSearch0.9360910283983461
AIDE1.000152715871272
ReEvo1.0002083856508814
MCTS1.0001026538510593
EoH0.9793902133221158
", + "bbox": [ + 84, + 272, + 375, + 414 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Graph colouring", + "text_level": 1, + "bbox": [ + 81, + 465, + 217, + 483 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Given a graph in DIMACS format with vertices, edges, and an adjacency list, the goal is to assign a positive integer color (1..n) to each vertex while ensuring that no two adjacent vertices share the same color. The objective is to minimize the number of distinct colors used. If any two adjacent vertices have the same color, the solution is invalid and receives no score. Otherwise, the score is equal to the number of distinct colors used, with a lower score being better.", + "bbox": [ + 81, + 484, + 480, + 597 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/fd302b72b8f21038db0e5d0441bbdf8d9a3865c61975063a5326b4986fc88f41.jpg", + "table_caption": [ + "Table 18: Generalised assignment problem" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.8679121232535366
BestOfN0.7992347794550977
Refine0.9237393162393163
FunSearch0.8993461774953884
AIDE0.7992347794550977
ReEvo0.8119485901255648
MCTS0.8529682767415909
EoH0.804175457505431
", + "bbox": [ + 84, + 608, + 375, + 750 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Hybrid Reentrant Shop Scheduling", + "text_level": 1, + "bbox": [ + 83, + 800, + 359, + 816 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The problem is a Hybrid Reentrant Shop Scheduling problem where each of $n$ jobs must sequentially undergo three operations: an initialization phase on one of $m$ identical primary machines, a setup phase on a single remote server, and a final main processing phase on the same primary machine used", + "bbox": [ + 81, + 818, + 480, + 891 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "for initialization. Jobs are initialized in a fixed natural order using list scheduling, while the setup phase is processed on the remote server in an order specified by a permutation decision variable. Additionally, each job is assigned to a primary machine for main processing via a batch_assignment, and on each machine, jobs are processed in natural (initialization) order. The objective is to minimize the makespan, defined as the time when the last job completes its main processing, while ensuring that no machine (primary or server) processes more than one job simultaneously and that all operational precedence constraints are satisfied.", + "bbox": [ + 514, + 68, + 915, + 223 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/5819c6cc93559036bfc18a4f34659127bd440c1958315f284088a1ce262e7485.jpg", + "table_caption": [ + "Table 19: Graph colouring" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9057971372430776
BestOfN0.9872450518587456
Refine0.9966666343001128
FunSearch1.0001780484032463
AIDE0.7457203947696327
ReEvo0.9820554515396009
MCTS0.9961239866411462
EoH0.9841146688046011
", + "bbox": [ + 519, + 234, + 808, + 377 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Job shop scheduling", + "text_level": 1, + "bbox": [ + 516, + 431, + 676, + 446 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The job shop scheduling problem requires assigning nonnegative integer start times to a set of operations, structured into multiple jobs, each composed of sequential operations. Each operation is processed on a specific machine for a given processing time. The optimization goal is to minimize the makespan, defined as the maximum completion time across all jobs. Constraints include (i) sequential processing of operations within each job, meaning each operation cannot start before its preceding operation finishes, and (ii) nonoverlapping scheduling of operations on the same machine. If these constraints are violated, the solution receives no score.", + "bbox": [ + 514, + 450, + 915, + 604 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/428741c0122b2692348448a5b913a1bde056066776cbc270958894b812f42727.jpg", + "table_caption": [ + "Table 20: Hybrid Reentrant Shop Scheduling" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.8202016779421567
BestOfN0.7060712883377539
Refine0.7696287350855926
FunSearch0.8192815531664928
AIDE0.6498336005961379
ReEvo0.7982807066317813
MCTS0.7293663754433233
EoH0.7770594374788831
", + "bbox": [ + 519, + 628, + 808, + 768 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table 21: Job shop scheduling", + "bbox": [ + 612, + 779, + 816, + 795 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "MIS", + "text_level": 1, + "bbox": [ + 516, + 825, + 555, + 840 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The Maximum Independent Set (MIS) problem is a fundamental NP-hard optimization problem in graph theory. Given an undirected graph $\\mathrm{G} = (\\mathrm{V},\\mathrm{E})$ , where $\\mathrm{V}$ is a set of vertices", + "bbox": [ + 514, + 845, + 913, + 890 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and $\\mathbf{E}$ is a set of edges, the goal is to find the largest subset $S$ in $V$ such that no two vertices in $S$ are adjacent (i.e., connected by an edge).", + "bbox": [ + 81, + 68, + 482, + 112 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/bbbdc5ff70a880fb3b8dabef315652e54d5e5b9e7c636cf64ebd852fd9bdf4fb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.986
BestOfN0.8461150261004076
Refine0.9078324503859446
FunSearch0.9002038932676987
AIDE0.8425484500134511
ReEvo0.8342509729450779
MCTS0.8433127163177989
EoH0.8763795109859694
", + "bbox": [ + 84, + 125, + 377, + 267 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Multi-Demand Multidimensional Knapsack problem", + "text_level": 1, + "bbox": [ + 81, + 321, + 421, + 354 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The Multi-Demand Multidimensional Knapsack Problem (MDMKP) is a binary optimization problem that extends the classical MKP by incorporating both upper-bound $(<=)$ and lower-bound $(>=)$ constraints. Formally, given n decision variables $x_{j} \\in \\{0,1\\}$ , the goal is to maximize $\\sum_{j=1}^{n} c_{j} x_{j}$ subject to $\\sum_{j=1}^{n} a_{ij} x_{j} \\leq b_{i} f o r i = 1, \\ldots, m$ and $\\sum_{j=1}^{n} a_{ij} x_{j} \\geq b_{i} f o r i = m+1, \\ldots, m+q$ . Instances are generated from standard MKP problems by varying the number of $>=$ constraints (with q taking values 1, m/2, or m) and by using two types of cost coefficients (positive and mixed), thereby producing six distinct variants per base instance. This formulation enables rigorous evaluation of algorithms in contexts where both resource limits and demand fulfillment must be simultaneously addressed.", + "bbox": [ + 81, + 358, + 482, + 559 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/48780e281cba669ba8e060d3cad28d70ccc1e98e257b2bbacbb65c00b55ff8d6.jpg", + "table_caption": [ + "Table 22: MIS" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.8957822313136857
BestOfN0.7144432351611377
Refine0.8913402342031996
FunSearch0.8354799525874899
AIDE0.8805432369541204
ReEvo0.8920786376031828
MCTS0.8994648109682947
EoH0.9082814870567889
", + "bbox": [ + 84, + 571, + 375, + 714 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Multidimensional knapsack problem", + "text_level": 1, + "bbox": [ + 83, + 784, + 369, + 801 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "This problem is a multidimensional knapsack optimization where the objective is to maximize the total profit by selecting decision variables, each associated with a profit and resource consumption across multiple constraints. The decision variables must be chosen such that the sum of resource usage for each constraint does not exceed its corresponding capacity.", + "bbox": [ + 81, + 804, + 480, + 891 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Importantly, if any constraint is violated—that is, if the resource consumption for any constraint exceeds its allowed capacity—the solution is deemed infeasible and earns no score. The challenge lies in identifying the optimal combination of items that yields the highest total profit while strictly satisfying all resource constraints.", + "bbox": [ + 514, + 68, + 915, + 152 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/6f0f26aba14980dd6a4df69a7a8dfef8a18f1ea1fd60bff6626b4965f903c7cb.jpg", + "table_caption": [ + "Table 23: Multi-Demand Multidimensional Knapsack problem" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9903523477639424
BestOfN0.9401685100749627
Refine0.9947726903727786
FunSearch0.9773347714972982
AIDE0.925117898068383
ReEvo1.0018885951740353
MCTS1.0057751617808324
EoH1.0010112897238341
", + "bbox": [ + 517, + 165, + 810, + 308 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Open shop scheduling", + "text_level": 1, + "bbox": [ + 514, + 361, + 691, + 378 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The Open Shop Scheduling Problem involves scheduling a set of jobs across a set of machines with the goal of minimizing the total completion time (makespan). Each job consists of several operations, where each operation must be processed on a specific machine for a given duration. Unlike other scheduling problems, the Open Shop variant has no predetermined order for processing the operations of a job—operations can be scheduled in any order, but a job can only be processed on one machine at a time, and a machine can only process one job at a time. This creates a complex combinatorial optimization challenge where the scheduler must determine both the sequence of operations for each job and the timing of each operation to minimize the overall completion time while ensuring no resource conflicts.", + "bbox": [ + 514, + 381, + 915, + 575 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/c930fd160520eca9fcd102ed9b95b22f35ef4831d26d034c735305a1a74f8924.jpg", + "table_caption": [ + "Table 24: Multidimensional knapsack problem" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.7851209868863173
BestOfN0.9017764948703829
Refine0.9930284498507208
FunSearch0.9930284498507208
AIDE0.9156437907474381
ReEvo0.9825099803205837
MCTS0.8960699709846601
EoH0.9930284498507208
", + "bbox": [ + 517, + 589, + 810, + 731 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 25: Open shop scheduling", + "bbox": [ + 607, + 739, + 821, + 756 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Packing unequal circles", + "text_level": 1, + "bbox": [ + 514, + 785, + 702, + 801 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The problem involves packing a subset of unequal circles into a fixed circular container with radius R_0 and center at the origin, where each circle i has a given radius R_i (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the number of circles packed—that is,", + "bbox": [ + 514, + 804, + 915, + 891 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "maximize $\\sum_{i=1}^{n} \\alpha_{i}$ —subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed R_0; and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii.", + "bbox": [ + 81, + 66, + 480, + 167 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/1088a9dabd21f9bc4a7c6245c1d52571e7ad4220d2bd766e4e5b0fc595d60be6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9075757575757577
BestOfN0.8939393939393939
Refine0.9803030303030303
FunSearch0.9719696969696969
AIDE0.8825757575757576
ReEvo0.8825757575757576
MCTS0.9522727272727273
EoH0.8825757575757576
", + "bbox": [ + 84, + 175, + 377, + 318 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Packing unequal circles area", + "text_level": 1, + "bbox": [ + 81, + 369, + 307, + 386 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The problem involves packing a subset of unequal circles into a fixed circular container with radius $\\mathrm{R\\_0}$ and center at the origin, where each circle $\\mathrm{i}$ has a given radius $\\mathrm{R\\_i}$ (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the total area of all circles packed—that is, maximize $\\sum_{i=1}^{n} \\alpha_i * p_i * R_i^2$ —subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed $\\mathrm{R\\_0}$ ; and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii.", + "bbox": [ + 81, + 388, + 482, + 570 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/afd684331587462588791f64a1fc570718250bb0ea8219f611f7236ac0250ac0.jpg", + "table_caption": [ + "Table 26: Packing unequal circles" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.8767896840297265
BestOfN0.9923476599194556
Refine1.0226692239919217
FunSearch1.0404725950195108
AIDE0.5972138868724692
ReEvo0.9101821460280035
MCTS0.9617483396206504
EoH1.0056059827170811
", + "bbox": [ + 84, + 578, + 377, + 720 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Packing unequal rectangles and squares", + "text_level": 1, + "bbox": [ + 81, + 773, + 393, + 789 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We are given a set of n unequal rectangles (or squares), each with specified dimensions, and a fixed circular container of radius R centered at the origin. The problem is to decide which rectangles to pack and where to position them—by choosing binary selection variables and continuous center coordinates—so that every packed rectangle is entirely contained within the circle and no two packed rectangles overlap.", + "bbox": [ + 81, + 791, + 480, + 891 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For each rectangle, the four corners must lie inside the circle, and if an item is not packed it is forced to a dummy position. The objective is to maximize the number of packed items, i.e., maximize $\\sum_{i=1}^{n} \\text{alpha}_i$ (or a related sum when each alpha_i is binary). Note that the rotation of the rectangular (by 90 degrees) is sometimes allowed and your algorithm should take that into account.", + "bbox": [ + 514, + 68, + 915, + 167 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/16ae26d8657a16b70285bc50a759134f3021cbfb2790d4037a24eac8b541466d.jpg", + "table_caption": [ + "Table 27: Packing unequal circles area" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9134625513058007
BestOfN0.8337025039542202
Refine0.932172162950195
FunSearch0.9228828411608733
AIDE0.7950708457573447
ReEvo0.77954425754769
MCTS0.8028450160315149
EoH0.9228828411608733
", + "bbox": [ + 517, + 176, + 810, + 320 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Packing unequal rectangles and squares area", + "text_level": 1, + "bbox": [ + 514, + 375, + 864, + 392 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We consider the problem of selecting and placing a subset of $n$ unequal rectangles (or squares) into a fixed-size circular container of radius $R$ so as to maximize the total area of the packed items. Each item $i$ has given dimensions $L_{i}$ and $W_{i}$ (with $L_{i} = W_{i}$ for squares) and an associated area $L_{i}W_{i}$ . The decision variables include a binary indicator $\\alpha_{i}$ for whether item $i$ is packed and continuous variables $(x_{i},y_{i})$ for the placement of its center, along with a rotation angle $heta_{i}$ when $90^{\\circ}$ rotations are allowed. The formulation enforces that for every packed item, all four of its rotated corners must lie within the circle, and that no two packed items overlap; if an item is not packed, it is fixed at a dummy position.", + "bbox": [ + 514, + 393, + 915, + 564 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/9bad5f71e7bc3d655bcfadd2bb30a745c49b42f1ff44dc5f582d7ab3bc36ef68.jpg", + "table_caption": [ + "Table 28: Packing unequal rectangles and squares" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.8893527400499813
BestOfN0.9536806816195774
Refine1.0513451711752306
FunSearch1.0839011538182066
AIDE0.8100272732450019
ReEvo0.9435059488868657
MCTS0.995946490673633
EoH0.9566331174271511
", + "bbox": [ + 517, + 575, + 810, + 718 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 29: Packing unequal rectangles and squares area", + "bbox": [ + 532, + 726, + 893, + 742 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Resource constrained shortest path", + "text_level": 1, + "bbox": [ + 514, + 771, + 790, + 787 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This problem involves finding the shortest path from vertex 1 to vertex $n$ in a directed graph while satisfying resource constraints. Specifically, each vertex and arc has associated resource consumptions, and the cumulative consumption for each resource must fall within the provided lowerBounds and upperBounds. The input includes the number of vertices (n), arcs (m), resource types (K), resource consumption at", + "bbox": [ + 514, + 790, + 913, + 891 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "each vertex, and a graph represented as a mapping from vertices to lists of arcs (each arc being a tuple of end vertex, cost, and arc resource consumptions). The optimization objective is to minimize the total arc cost of the path, with the condition that the path is valid—meaning it starts at vertex 1, ends at vertex $n$ , follows defined transitions in the graph, and respects all resource bounds; if any of these constraints are not met, the solution receives no score.", + "bbox": [ + 86, + 68, + 478, + 179 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/95920fac789b7820ecb730f44d27964ef594dfc7be07bcbcd6188d00f543279b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.7508899529136809
BestOfN0.7508899529136808
Refine0.7284494767232047
FunSearch0.7508899529136808
AIDE0.7508899529136808
ReEvo0.7508899529136808
MCTS0.7284494767232047
EoH0.7508899529136808
", + "bbox": [ + 84, + 193, + 374, + 334 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Set covering", + "text_level": 1, + "bbox": [ + 86, + 398, + 181, + 415 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Set Covering Problem. The goal is to select a subset of columns, each with an associated cost, such that every row is covered by at least one chosen column. For each row, the available covering columns are provided (as 1-indexed numbers). The objective is to minimize the total cost of the selected columns, and if even one row is left uncovered, then no score is awarded.", + "bbox": [ + 86, + 422, + 478, + 518 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/c7678567bee486687e7a3bfc0696818b6449fe61f6ca4dfa433cb8b9cc2d7d41.jpg", + "table_caption": [ + "Table 30: Resource constrained shortest path" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.8883906244045974
BestOfN0.8213286754887226
Refine0.9056204467263304
FunSearch0.8887733963981322
AIDE0.8639998129016312
ReEvo0.9360686599803572
MCTS0.8672991644233662
EoH0.8843920544743958
", + "bbox": [ + 84, + 534, + 374, + 675 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Set partitioning", + "text_level": 1, + "bbox": [ + 86, + 739, + 207, + 756 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This problem involves solving a set partitioning instance where the goal is to choose a subset of columns such that each row is covered exactly once while minimizing the total cost. Each column is associated with a cost and covers a specific set of rows. The optimization problem is defined by selecting columns from a given set so that every row is covered precisely once, and the sum of the selected columns' costs is minimized. If the solution fails to cover every row exactly once, then no score is awarded.", + "bbox": [ + 86, + 762, + 478, + 888 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/4f321ae8f77c6ec6e4c0c37679e475311aa04ba6f9ffae5b1424d65d1be924bb.jpg", + "table_caption": [ + "Table 31: Set covering" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9996401983661346
BestOfN0.8991338255841825
Refine0.7999991398515384
FunSearch0.83333333333333334
AIDE0.9
ReEvo0.8991338255841825
MCTS0.8647769492523454
EoH0.9324671589175159
", + "bbox": [ + 519, + 66, + 807, + 205 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "TSP", + "text_level": 1, + "bbox": [ + 519, + 256, + 553, + 270 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The Traveling Salesman Problem (TSP) is a classic combinatorial optimization problem where, given a set of cities with known pairwise distances, the objective is to find the shortest possible tour that visits each city exactly once and returns to the starting city. More formally, given a complete graph $\\mathrm{G} = (\\mathrm{V},\\mathrm{E})$ with vertices $\\mathrm{V}$ representing cities and edges $\\mathrm{E}$ with weights representing distances, we seek to find a Hamiltonian cycle (a closed path visiting each vertex exactly once) of minimum total weight.", + "bbox": [ + 519, + 273, + 913, + 400 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/d4d0b3bfe3fa404ab607b2ba34a1b69a65b540411a4cc6457131ef71821b803f.jpg", + "table_caption": [ + "Table 32: Set partitioning" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.986
BestOfN0.8590303340408165
Refine0.9399577646813952
FunSearch0.9016741050908584
AIDE0.7710495444635409
ReEvo0.8488918718349553
MCTS0.5961113158302597
EoH0.7935463156320405
", + "bbox": [ + 519, + 412, + 807, + 551 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 33: TSP", + "bbox": [ + 668, + 564, + 761, + 575 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Uncapacitated warehouse location", + "text_level": 1, + "bbox": [ + 519, + 604, + 781, + 619 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The Uncapacitated Warehouse Location Problem aims to determine which warehouses to open and how to assign each customer entirely to an open warehouse in order to minimize the total cost. Given a set of potential warehouse locations, each with a fixed opening cost, and a set of customers, each with an associated assignment cost for being served by each warehouse, the objective is to select a subset of warehouses to open and assign every customer completely to one of these open warehouses. The optimization minimizes the sum of fixed warehouse opening costs and the customer assignment costs. Each customer must be assigned to exactly one warehouse; if any customer is left unassigned or assigned to more than one warehouse, the solution is considered infeasible.", + "bbox": [ + 519, + 623, + 913, + 801 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Unconstrained guillotine cutting", + "text_level": 1, + "bbox": [ + 519, + 814, + 766, + 830 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The unconstrained guillotine cutting problem involves selecting and placing a subset of available pieces within a fixed stock rectangle to maximize the total value of the placed pieces. Each piece, defined by its length, width, and value,", + "bbox": [ + 519, + 833, + 913, + 888 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/1c6900dbfa02950750e85d772fc63df86f44ea725e9af1c1346480142cafa5dd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9968157833494645
BestOfN0.98931916166557
Refine1.00000000000002045
FunSearch0.9978398298062331
AIDE0.9994999857664043
ReEvo0.998083746641369
MCTS0.9951604598088827
EoH0.87499999999978142
", + "bbox": [ + 84, + 65, + 374, + 205 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "may be optionally rotated $90^{\\circ}$ if allowed and used at most once. The challenge is to determine both the selection and the positioning of these pieces such that they do not overlap and lie entirely within the stock's boundaries. This optimization problem formalizes the decision variables as the x and y coordinates for the bottom-left placement of each piece and, if rotation is allowed, a binary variable indicating its orientation, while the objective function is to maximize the sum of the values of the pieces successfully placed within the stock.", + "bbox": [ + 86, + 258, + 478, + 397 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/a006c1e5f9cdf229c2d31b9f979e80f46279a28e35df50ba90375221cbe29900.jpg", + "table_caption": [ + "Table 34: Uncapacitated warehouse location" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9725381370960237
BestOfN0.8701275303357732
Refine0.9618177725501762
FunSearch0.9646369625362231
AIDE0.8512970128354943
ReEvo0.9828452190272524
MCTS0.8628525304460628
EoH0.9649480933563296
", + "bbox": [ + 84, + 407, + 375, + 550 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Vehicle routing: period routing", + "text_level": 1, + "bbox": [ + 86, + 607, + 323, + 622 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The Period Vehicle Routing Problem requires planning delivery routes over a multi-day planning period.", + "bbox": [ + 86, + 625, + 478, + 652 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Each customer (other than the depot, whose id is 0) is provided with a list of candidate service schedules. A schedule is represented by a binary vector of length equal to the period (e.g., [1, 0, 1] for a 3-day period), where a 1 in a given position indicates that the customer must be visited on that day. The decision maker must select exactly one candidate schedule for each customer.", + "bbox": [ + 86, + 652, + 478, + 750 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "For every day in the planning period, if a customer's chosen schedule indicates a delivery (i.e., a 1), then exactly one vehicle must visit that customer on that day. Otherwise, the customer should not be visited. The decision maker must also design, for each day, the tours for the vehicles. Each tour is a continuous route that starts at the depot (id 0) and, after visiting a subset of customers, returns to the depot. Each vehicle is only allowed to visit the depot once per day—namely, as its starting and ending point—and it is not allowed to return to the depot in the middle of a tour.", + "bbox": [ + 86, + 750, + 478, + 888 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Moreover, each vehicle route must obey a capacity constraint: the total demand of the customers visited on that tour must not exceed the vehicle capacity each day. Although multiple vehicles are available per day (as specified by the input), not all available vehicles have to be used, but the number of tours in a given day cannot exceed the provided number of vehicles. In addition, the tours on each day must cover exactly those customers who require service per the selected schedules, and no customer may be visited more than once in a given day.", + "bbox": [ + 519, + 69, + 911, + 207 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The objective is to choose a schedule for every customer and plan the daily tours so as to minimize the overall distance traveled by all vehicles during the entire planning period. Distances are measured using Euclidean distance.", + "bbox": [ + 519, + 208, + 911, + 262 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/8f2560e60912e886f7dbdab354daaf81fa6b15cb2ada6a9420535898615ee0e0.jpg", + "table_caption": [ + "Table 35: Unconstrained guillotine cutting" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.12437943290991642
BestOfN0.42032326191804853
Refine0.48371172427664344
FunSearch0.32385035648314586
AIDE0.5362363612554435
ReEvo0.0
MCTS0.0
EoH0.0
", + "bbox": [ + 519, + 273, + 816, + 416 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 36: Vehicle routing: period routing", + "bbox": [ + 581, + 426, + 848, + 440 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "p-median - capacitated", + "text_level": 1, + "bbox": [ + 519, + 468, + 694, + 483 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The Capacitated P-Median Problem is a facility location optimization problem where the objective is to select exactly $p$ customers as medians (facility locations) and assign each customer to one of these medians to minimize the total cost, defined as the sum of the Euclidean distances (rounded down to the nearest integer) between customers and their assigned medians. Each median has a capacity constraint $Q$ , meaning the total demand of the customers assigned to it cannot exceed $Q$ . A feasible solution must respect this capacity constraint for all medians; otherwise, it receives a score of zero. The solution is evaluated by the ratio extscore = rac extbestknown extcomputed_total_cost, where computed_total_cost is the total assignment cost if all constraints are satisfied; otherwise, the score is zero. The output consists of the total cost (if feasible), the selected medians, and the customer assignments.", + "bbox": [ + 519, + 484, + 911, + 693 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "p-median - uncapacitated", + "text_level": 1, + "bbox": [ + 519, + 719, + 714, + 734 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The uncapacitated p-median problem is a combinatorial optimization problem defined on a given graph $\\mathrm{G} = (\\mathrm{V},\\mathrm{E})$ with n vertices and m edges. The objective is to select p medians (facility locations) from the set of vertices such that the total assignment cost is minimized. The assignment cost is computed as the sum of the shortest distances from each vertex to its nearest selected median, where distances are given by a precomputed complete cost matrix (obtained via Floyd's algorithm). Formally, given the cost matrix $D\\in \\mathbb{R}^{n\\times n}$ , the optimization problem seeks to find a subset $S\\subseteq Vwith|S| = p$ that minimizes the function:", + "bbox": [ + 519, + 736, + 911, + 887 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/e211d9279db57498735684778c25b26ba91157f8392851900afa6babc3a600a8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.8996179560649475
BestOfN0.9892886172082498
Refine0.9737771618997864
FunSearch0.9748437166838722
AIDE0.7442228395960961
ReEvo0.9786585768154689
MCTS0.9829650705934849
EoH0.9853458094532425
", + "bbox": [ + 84, + 66, + 374, + 205 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$\\sum_{v\\in V}\\min_{s\\in S}D(v,s)$", + "bbox": [ + 102, + 258, + 253, + 273 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "where $\\mathrm{D}(\\mathrm{v},\\mathrm{s})$ is the shortest-path distance between vertex v and median s. The solution consists of a list of exactly p distinct vertices representing the chosen medians.", + "bbox": [ + 84, + 273, + 477, + 314 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/c84d31b4f0c52d71f3595153fe6a7cd0caf6c2bafccd0acab9a1f99cfdf1decd.jpg", + "table_caption": [ + "Table 37: p-median - capacitated" + ], + "table_footnote": [], + "table_body": "
MethodScore
Classical Solver0.9952341868141825
BestOfN0.9453613019698086
Refine0.9982141349797949
FunSearch0.9996783954983718
AIDE0.9847816841274486
ReEvo0.9983315585722753
MCTS0.9605290267584901
EoH0.9921177098573016
", + "bbox": [ + 84, + 327, + 374, + 468 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 38: p-median - uncapacitated", + "bbox": [ + 166, + 479, + 397, + 493 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_model.json b/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_model.json new file mode 100644 index 0000000000000000000000000000000000000000..76b006dba1e3c7bd8f437be44dc88bb11ff667f7 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_model.json @@ -0,0 +1,4244 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.277, + 0.061, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.04310v3 [cs.CL] 22 Aug 2025" + }, + { + "type": "title", + "bbox": [ + 0.226, + 0.121, + 0.776, + 0.165 + ], + "angle": 0, + "content": "CO-Bench: Benchmarking Language Model Agents in Algorithm Search for Combinatorial Optimization" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.178, + 0.751, + 0.197 + ], + "angle": 0, + "content": "Weiwei Sun* Shengyu Feng* Shanda Li Yiming Yang" + }, + { + "type": "text", + "bbox": [ + 0.406, + 0.201, + 0.593, + 0.216 + ], + "angle": 0, + "content": "Carnegie Mellon University" + }, + { + "type": "text", + "bbox": [ + 0.282, + 0.216, + 0.714, + 0.23 + ], + "angle": 0, + "content": "{weiweis, shengyuf, shandal, yiming}@cs.cmu.edu" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.274, + 0.315, + 0.287 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.295, + 0.465, + 0.511 + ], + "angle": 0, + "content": "Although LLM-based agents have attracted significant attention in domains such as software engineering and machine learning research, their role in advancing combinatorial optimization (CO) remains relatively underexplored. This gap underscores the need for a deeper understanding of their potential in tackling structured, constraint-intensive problems—a pursuit currently limited by the absence of comprehensive benchmarks for systematic investigation. To address this, we introduce CO-Bench, a benchmark suite featuring 36 real-world CO problems drawn from a broad range of domains and complexity levels. CO-Bench includes structured problem formulations and curated data to support rigorous investigation of LLM agents. We evaluate multiple agentic frameworks against established human-designed algorithms, revealing the strengths and limitations of existing LLM agents and identifying promising directions for future research. CO-Bench is publicly available at https://github.com/sunnweiwei/CO-Bench." + }, + { + "type": "title", + "bbox": [ + 0.226, + 0.529, + 0.337, + 0.544 + ], + "angle": 0, + "content": "Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.548, + 0.483, + 0.715 + ], + "angle": 0, + "content": "Combinatorial Optimization (CO) is a foundational problem class in computer science and operation research, focused on finding optimal solutions in discrete, structured, and constraint-rich domains. It underpins a wide range of real-world applications, including logistics (Vogiatzis and Pardalos 2013), production planning (Crama 1997), bioinformatics (Gusfield 1997), etc. Many CO problems are computationally intractable and classified as NP-hard, making exact solutions impractical at scale. As a result, developing effective algorithms often demands significant domain expertise and manual effort—posing a long-standing challenge in both academic research and industrial applications." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.714, + 0.483, + 0.868 + ], + "angle": 0, + "content": "Recent advances in Large Language Models (LLMs) (OpenAI 2024b; DeepSeek-AI 2025a) have positioned LLM-based agents as increasingly promising tools for a variety of prediction and decision-making tasks (Jimenez et al. 2023; Chan et al. 2024; Gottweis et al. 2025). In particular, there is growing interest in applying LLMs to CO problems. Initial investigations have largely focused on solution correctness, evaluated on small-scale test instances (Ramamonjison et al. 2023; Yang et al. 2025a; Xiao et al. 2024a), and are often geared towards solving problems posed by general users. More recent works have begun to explore autonomous LLMs as a new approach." + }, + { + "type": "image", + "bbox": [ + 0.521, + 0.271, + 0.911, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.472, + 0.916, + 0.531 + ], + "angle": 0, + "content": "Figure 1: Overview of CO-Bench. CO-Bench includes 36 problems from 8 categories, and aims to evaluate LLM agents' ability to develop effective and efficient algorithms for solving real-world combinatorial optimization problems." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.556, + 0.916, + 0.682 + ], + "angle": 0, + "content": "agents capable of conducting research and designing more efficient algorithms for complex scientific and industrial challenges. For example, FunSearch (Romera-Paredes et al. 2023) combines LLM prompting with evolutionary search to discover heuristics that outperform human-designed counterparts in the Cap Set and Bin Packing problems. Subsequent methods (Liu et al. 2024; Ye et al. 2024; Novikov et al. 2025) further improve computational efficiency and broaden applicability to domains such as routing and scheduling." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.682, + 0.916, + 0.792 + ], + "angle": 0, + "content": "Despite these advancements, most existing efforts focus on narrow components (e.g., priority functions) within established algorithms, across a limited set of tasks (typically 4-7 problems), and often rely on heavily handcrafted, problem-specific prompts and templates (Romera-Paredes et al. 2023; Ye et al. 2024). Furthermore, there remains a lack of systematic evaluation of how these agents perform across a broader and more diverse collection of real-world CO problems." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.792, + 0.916, + 0.89 + ], + "angle": 0, + "content": "To address this gap, we introduce CO-Bench, a comprehensive benchmark designed to evaluate LLM agents in the context of efficient CO algorithm development. CO-Bench comprises real-world CO problems spanning a wide range of domains and complexities. Figure 1 illustrates the problem categories and examples, while Table 1 compares CO-Bench with existing CO benchmarks. Compared to prior bench" + }, + { + "type": "page_footnote", + "bbox": [ + 0.106, + 0.875, + 0.318, + 0.89 + ], + "angle": 0, + "content": "*These authors contributed equally." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.069, + 0.482, + 0.207 + ], + "angle": 0, + "content": "marks, CO-Bench offers broader problem coverage, and emphasizes end-to-end evaluation of LLM-based research agents, focusing on their ability to design efficient, potentially novel algorithms from abstract problem descriptions. This design enables reproducible and scalable evaluation of agent performance, including comparisons with human-designed classical CO solver under equivalent time constraints. By doing so, CO-Bench introduces new challenges for LLM agent development, such as the discovery of algorithms that extend beyond current human knowledge of CO." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.208, + 0.481, + 0.429 + ], + "angle": 0, + "content": "Using CO-Bench, we benchmark 15 LLMs and 9 agentic frameworks, comparing their performances against both human-designed classical algorithms and the best-known solutions reported in the literature. Our results show that reasoning models (e.g., o3-mini and Claude-3.7-sonnet) consistently outperform standard no-reasoning LLMs. When integrated into agentic frameworks like FunSearch, LLMs further improve through trial-and-error exploration. Notably, on 25 problems, LLM-generated algorithms outperformed classical solvers, and on 3 problems, they surpassed the best-known solutions. However, our analysis also reveals current limitations, such as limited algorithmic novelty and insufficient handling of feasibility constraints. These findings highlight both the promise and challenges of LLM-driven research in CO and suggest key directions for advancing autonomous algorithm design." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.429, + 0.481, + 0.444 + ], + "angle": 0, + "content": "In summary, this paper makes the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.447, + 0.481, + 0.5 + ], + "angle": 0, + "content": "(i) We introduce CO-Bench, the first comprehensive benchmark to evaluate the capability of LLMs to develop algorithms for diverse and challenging real-world CO problems" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.504, + 0.481, + 0.587 + ], + "angle": 0, + "content": "(ii) We benchmark 15 LLMs and 9 agentic frameworks, analyzing their performance relative to expert-designed pipelines. Our results highlight the strengths of agent-generated algorithms, while also revealing limitations in planning, feasibility checking, and the generation of efficient solution." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.447, + 0.481, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.23, + 0.601, + 0.335, + 0.617 + ], + "angle": 0, + "content": "Preliminary" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.62, + 0.307, + 0.635 + ], + "angle": 0, + "content": "Combinatorial Optimization" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.637, + 0.481, + 0.707 + ], + "angle": 0, + "content": "For each CO problem \\( c \\) (for example, Traveling salesman problem), we follow Papadimitriou and Steiglitz (1982) to formulate it as a constrained optimization problem in the discrete space. Consider an instance \\( p \\), the optimization problem could be expressed as" + }, + { + "type": "equation", + "bbox": [ + 0.19, + 0.712, + 0.48, + 0.735 + ], + "angle": 0, + "content": "\\[\n\\min _ {x \\in S _ {c} (p)} f _ {c} (x; p) + g _ {c} (x; p), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.74, + 0.48, + 0.837 + ], + "angle": 0, + "content": "where \\( S_{c}(p) \\) represents the solution space, e.g., \\( \\mathbf{Z}^{m} \\times \\mathbb{R}^{n} \\) for \\( d \\) discrete variables and \\( n \\) continuous variables, \\( f_{c}(x;p) \\) corresponds to the objective function, and \\( g_{c}(x;p) \\) stands for the constraint violation, which is 0 for feasible solutions and \\( +\\infty \\) otherwise. To avoid the clutter, we simply denote \\( h_c(x;p) = f_c(x;p) + g_c(x;p) \\) in the following text and omit \\( c \\) if the context is clear." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.838, + 0.481, + 0.866 + ], + "angle": 0, + "content": "Given an algorithm set \\(\\mathcal{A}\\) and a problem instance distribution \\(D\\), the algorithm search problem is defined as" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.871, + 0.48, + 0.893 + ], + "angle": 0, + "content": "\\[\n\\min _ {A \\in \\mathcal {A}} \\mathbb {E} _ {p \\sim D, x \\sim A (p)} [ h (x; p) ]. \\tag {2}\n\\]" + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.066, + 0.915, + 0.201 + ], + "angle": 0, + "content": "
DatasetAlgorithm DevProblem NumInstance NumLargest Variables
NPHardEvalX990024
NL4OPTX52893
OptiBenchX460518
ComplexORX201009
ReEvo75971,000
CO-Bench366,48211,000
" + }, + { + "type": "table_caption", + "bbox": [ + 0.516, + 0.209, + 0.915, + 0.28 + ], + "angle": 0, + "content": "Table 1: Data statistics for CO-Bench and related CO benchmarks, including the indicator for algorithm development support, the number of problem types, the number of test-set problem instances, and the largest number of test-set variables (e.g., the number of nodes in the largest graph)." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.307, + 0.915, + 0.447 + ], + "angle": 0, + "content": "In contrast to previous neural CO solvers (Bengio, Lodi, and Prouvost 2020) that directly parameterize \\( A \\) with a neural network, we focus on symbolic searching space where \\( A \\) consists of all algorithms that could be represented by a Python Program, with a maximum number of \\( d \\) tokens, where \\( d \\) is typically decided by the output length limit of an LLM. Considering the popularity of randomized algorithms (Motwani and Raghavan 2013) for CO, we treat the output of an algorithm \\( A(p) \\) as a distribution of solutions, while deterministic algorithms would correspond to the point distributions." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.447, + 0.913, + 0.489 + ], + "angle": 0, + "content": "The main endeavor of this work is focused on the shaping of the algorithm set \\(\\mathcal{A}\\), the curation of the data distribution \\(D\\) and the definition of \\(h\\) on our collected CO problems." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.501, + 0.62, + 0.517 + ], + "angle": 0, + "content": "LLM Agents" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.52, + 0.915, + 0.549 + ], + "angle": 0, + "content": "Given a CO problem \\( c \\), a candidate algorithm could be generated by an LLM as" + }, + { + "type": "equation", + "bbox": [ + 0.64, + 0.557, + 0.913, + 0.573 + ], + "angle": 0, + "content": "\\[\nA \\sim M (\\text {t e x t i f y} (c); \\theta), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.58, + 0.915, + 0.678 + ], + "angle": 0, + "content": "where \\(M\\) denotes an LLM with parameters \\(\\theta\\). However, one-time generation usually leads to infeasible code or suboptimal algorithms (Madaan et al. 2023), and agentic frameworks address this by enabling iterative refinement through structured interactions with external tools (e.g., a coding environment). Formally, an agent performs reasoning-action iterations (Yao et al. 2022):" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.686, + 0.914, + 0.702 + ], + "angle": 0, + "content": "\\[\nr _ {t + 1} \\sim M \\left(\\operatorname {t e x t i f y} _ {r} \\left(c, A _ {t}, H _ {t}\\right); \\theta\\right), \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.602, + 0.704, + 0.914, + 0.72 + ], + "angle": 0, + "content": "\\[\na _ {t + 1} \\sim M \\left(\\text {t e x t i f y} _ {a} \\left(r _ {t + 1}, H _ {t}\\right); \\theta\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.728, + 0.915, + 0.814 + ], + "angle": 0, + "content": "where \\( r_t \\) is the reasoning step, \\( a_t \\) is the action step (e.g., executing code, evaluating results), and \\( H_t = (r_i, a_i, \\text{result}(a_i))_{i=1}^{t-1} \\) maintains the interaction history. Thus, an LLM agent is formally defined as an LLM \\( M(\\cdot; \\theta) \\) guided by a structured workflow specifying iterative external interactions to enhance its outputs." + }, + { + "type": "title", + "bbox": [ + 0.669, + 0.827, + 0.762, + 0.842 + ], + "angle": 0, + "content": "CO-Bench" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.847, + 0.915, + 0.89 + ], + "angle": 0, + "content": "We introduce CO-Bench, a comprehensive benchmark designed to evaluate the algorithm development ability of LLM agents on combinatorial optimization (CO) problems. The" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.069, + 0.482, + 0.155 + ], + "angle": 0, + "content": "benchmark consists of 36 problems mainly sourced from OR-Library (Beasley 1990), an established archive containing datasets accumulated by researchers across over 30 years of operations research. These problems span a wide range of realistic CO challenges in academia and industrial applications." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.166, + 0.201, + 0.18 + ], + "angle": 0, + "content": "Data Curation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.186, + 0.482, + 0.229 + ], + "angle": 0, + "content": "Problem Selection We first perform rigorous filtering and cleaning, and select 36 CO problems that cover diverse domains and complexities, including:" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.233, + 0.48, + 0.335 + ], + "angle": 0, + "content": "- Packing problems: Bin packing (Falkenauer 1996), Multi-Demand Multidimensional Knapsack problem (Cappanera and Trubian 2001), Multidimensional knapsack problem (Petersen 1967), Container loading (Bischoff and Ratcliff 1995; Ivancic 1988), Container loading with weight restrictions (Ratcliff and Bischoff 1998; Bischoff 2006), Packing unequal circles (López and Beasley 2016), Packing unequal rectangles and squares number / area (López and Beasley 2018)." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.338, + 0.48, + 0.389 + ], + "angle": 0, + "content": "- Cutting problems: Assortment problem (Beasley 1985a), Constrained / unconstrained guillotine cutting (Christofides and Whitlock 1977; Beasley 1985b), Constrained non-guillotine cutting (Beasley 1985c, 2004)." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.393, + 0.48, + 0.443 + ], + "angle": 0, + "content": "- Facility location problems: Capacitated / Uncapacitated warehouse location (Beasley 1988, 1993), Capacitated / Uncapacitated p-median problem (Beasley 1985d; Osman and Christofides 1994)." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.447, + 0.48, + 0.524 + ], + "angle": 0, + "content": "- Scheduling problems: Aircraft landing (Beasley et al. 2000, 2004), Crew scheduling (Beasley and Cao 1996), Common due date scheduling (Biskup and Feldmann 2001), Flow shop scheduling (Taillard 1993), Hybrid Reentrant Shop Scheduling (Chakhlevitch and Glass 2009), Job shop scheduling (Taillard 1993), Open shop scheduling (Taillard 1993)." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.527, + 0.48, + 0.578 + ], + "angle": 0, + "content": "- Routing problems: Traveling salesman problem (Laporte 1992), Period vehicle routing problem (Christofides and Beasley 1984), Resource constrained shortest path (Beasley and Christofides 1989)." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.582, + 0.48, + 0.607 + ], + "angle": 0, + "content": "- Assignment problems: Constrained / unconstrained assignment (Osman 1995; and 1990)." + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.611, + 0.48, + 0.638 + ], + "angle": 0, + "content": "- Tree problems: Euclidean Steiner (Beasley 1992), Corporate structuring (Anken and Beasley 2012)" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.641, + 0.48, + 0.704 + ], + "angle": 0, + "content": "- Graph and set problems: Maximal Independent Set (Erdos and Renyi 1984), Graph colouring (Fleurent and Ferland 1996), Equitable partitioning (Mingers and O'Brien 1995), Set partitioning (Chu and Beasley 1998), Set covering (Beasley and Jornsten 1992)." + }, + { + "type": "list", + "bbox": [ + 0.103, + 0.233, + 0.48, + 0.704 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.713, + 0.482, + 0.853 + ], + "angle": 0, + "content": "Data Annotation For each problem, we manually annotate the following components: (1) Problem description: a formal definition of the optimization problem in natural language, accompanied by a clearly specified solve function as the starter code; (2) Data loading function: a load_data function to load and preprocess raw data from the test files; (3) Evaluation function: an eval_func function that rigorously and robustly evaluates the quality of a solution. Additionally, each problem comprises a development set and a test set, each containing several problem instances." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.861, + 0.482, + 0.89 + ], + "angle": 0, + "content": "Evaluation Framework We develop a rigorous and efficient evaluation framework to assess the performance of" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.069, + 0.916, + 0.25 + ], + "angle": 0, + "content": "LLM agents in simulated, time-constrained competition scenarios (Chan et al. 2024). Specifically, LLM agents operate within a sandbox environment with access to a Linux machine. For each problem, agents are provided with a problem description, development datasets, and an API endpoint for submitting their solutions (i.e. codebases) to receive evaluation feedback. An independent evaluation system, which is protected by built-in safeguards, scores the submitted solutions on the development set in parallel. After a limited number of research steps, the agent submits its final solution for evaluation on the test set. During the agent development process, both eval_func and test data are invisible. Figure 2 shows the evaluation pipeline in CO-Bench." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.256, + 0.916, + 0.41 + ], + "angle": 0, + "content": "Designing Classical Solver Baselines To investigate how existing LLM agents perform compared to classical solvers, we establish a classical solver baseline. Specifically, the authors of this paper—who have extensive experience in related areas and are familiar with the problems in CO-Bench—spent approximately 30 minutes per problem testing and selecting the most effective classical solvers (e.g., LKH for TSP, CPLEX for scheduling, Gurobi for MIS) and tuning their hyperparameters on the development set. This process ensures that the classical solver baseline is well-tuned and competitive for each problem in CO-Bench." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.421, + 0.671, + 0.435 + ], + "angle": 0, + "content": "Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.439, + 0.913, + 0.497 + ], + "angle": 0, + "content": "Avg Score The main evaluation metric is similar to the Primal Gap (Berthold 2006), defined as the normalized score of the primal bound \\( h(x; p) \\) against a pre-computed optimal (or best-known) objective value \\( h_p^* \\):" + }, + { + "type": "equation", + "bbox": [ + 0.606, + 0.504, + 0.913, + 0.541 + ], + "angle": 0, + "content": "\\[\ns (x, p) = \\frac {\\operatorname* {m i n} \\left\\{\\left| h (x , p) \\right| , \\left| h _ {p} ^ {*} \\right| \\right\\}}{\\operatorname* {m a x} \\left\\{\\left| h (x , p) \\right| , \\left| h _ {p} ^ {*} \\right| \\right\\}}, \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.547, + 0.915, + 0.646 + ], + "angle": 0, + "content": "A higher value indicates better performance and a score of 1 signifies the performance identical to the optimal or best-known solution. Program errors or infeasible solutions lead to a score of 0.0. The score of a solver on a given problem is computed by averaging its scores across all test instances. The overall benchmark score is then obtained by averaging these problem-level scores across all 36 problems." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.652, + 0.913, + 0.748 + ], + "angle": 0, + "content": "Valid Solution We compute the percentage of problems for which the generated code is correct on all test instances. Any raised error—such as constraint violation or timeout—is treated as an invalid signal. If any test instance for a given problem results in an invalid signal, the entire solution for that problem is considered invalid, even if it produces valid results on other test instances." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.756, + 0.915, + 0.798 + ], + "angle": 0, + "content": "Above Classical Given the performance of classical solver, we calculate the portion of problems where the model outperforms the classical solver baseline." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.805, + 0.915, + 0.89 + ], + "angle": 0, + "content": "Survival Rate The survival rate measures that, for each problem, the percentage of test instances where the model's solution is above \\(99\\%\\) of the reference score (reported optimal or best-known solution from literature). This serve as a challenge metric as the model can only get credit when it is very close or better than previous-best algorithm." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.129, + 0.066, + 0.873, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.248, + 0.916, + 0.293 + ], + "angle": 0, + "content": "Figure 2: CO-Bench is an evaluation environment for AI agents. Each problem has an associated description and a development dataset. Following the setup in Chan et al. (2024), the agent-generated code implements an algorithm design, which is further graded and compared against the best-known solution and human expert solution." + }, + { + "type": "title", + "bbox": [ + 0.196, + 0.316, + 0.367, + 0.334 + ], + "angle": 0, + "content": "Experimental Setup" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.335, + 0.269, + 0.349 + ], + "angle": 0, + "content": "Benchmarked Methods" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.352, + 0.481, + 0.396 + ], + "angle": 0, + "content": "On CO-Bench, we evaluate various LLMs combined with different agentic frameworks, and compare them with existing human-designed CO solvers." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.4, + 0.483, + 0.54 + ], + "angle": 0, + "content": "LLMs We conduct experiments on 5 open-source models and 10 proprietary models. These include instruction-tuned models such as Llama-3.3-70B-Instruct (Meta 2024), Qwen-2.5-Code-32B-Instruct (Hui et al. 2024), DeepSeek-V3 (DeepSeek-AI 2024), and GPT-4o (OpenAI 2024a), as well as frontier reasoning models, including o3-mini (OpenAI 2025), Claude-3.7-Sonnet-Thinking (Anthropic 2025), DeepSeek-R1 (DeepSeek-AI 2025b), Grok-3-Thinking (xAI 2025), QwQ-32B (Qwen 2025), and Gemini 2.5 Pro (DeepMind 2025)." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.545, + 0.483, + 0.63 + ], + "angle": 0, + "content": "Agentic frameworks For the aforementioned LLMs, we apply various agentic frameworks to evaluate their performance across different strategies. These range from simple approaches, such as direct generation, to more sophisticated frameworks that augment LLM with additional tools, workflows, and test-time compute:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.631, + 0.483, + 0.673 + ], + "angle": 0, + "content": "- Direct Answer: The simplest approach, where the LLM directly generates a solution to the combinatorial optimization problem without further refinement." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.675, + 0.483, + 0.717 + ], + "angle": 0, + "content": "- BestOfN Sampling (Chen et al. 2021): Generate \\( N \\) candidate solutions, evaluate each on a development set, and select the solution with the best performance." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.718, + 0.483, + 0.761 + ], + "angle": 0, + "content": "- Chain of Experts (Xiao et al. 2024a): A multi-agent prompting framework where agents of different roles cooperate to debug and deliver one solution." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.762, + 0.483, + 0.833 + ], + "angle": 0, + "content": "- Greedy Refinement (Shinn et al. 2023; Madaan et al. 2023): Iteratively prompt the LLM to refine the current best solution based on the evaluation results of the development set, repeating this refinement process for \\(N\\) steps." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.833, + 0.482, + 0.89 + ], + "angle": 0, + "content": "- FunSearch (Romera-Paredes et al. 2023): Prompt the LLM to either draft a new solution or refine an existing one, followed by employing an evolutionary algorithm to iteratively select and improve candidate solutions." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.631, + 0.483, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.317, + 0.916, + 0.36 + ], + "angle": 0, + "content": "- EoH (Liu et al. 2024): Evolve both thoughts and codes in an evolutionary search framework for generating high-performance heuristics." + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.362, + 0.915, + 0.432 + ], + "angle": 0, + "content": "- AIDE (Jiang et al. 2025): A representative method for machine learning engineering tasks, which stores existing solutions in a tree structure and selectively prompts the LLM to draft new solutions, debug or improve previously stored solutions." + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.434, + 0.915, + 0.478 + ], + "angle": 0, + "content": "- ReEvo (Ye et al. 2024): A recent evolutionary algorithm that incorporates short-term and long-term reflection modules, as well as a multi-agentic framework." + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.479, + 0.913, + 0.549 + ], + "angle": 0, + "content": "- MSTC-AHD (Zheng et al. 2025): A Monte Carlo Tree Search (MCTS)-based agentic pipeline that organizes all LLM-generated heuristics in a tree structure and uses the MCTS algorithm with progressive widening technique to guide the evolution of heuristics." + }, + { + "type": "list", + "bbox": [ + 0.525, + 0.317, + 0.916, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.564, + 0.702, + 0.58 + ], + "angle": 0, + "content": "Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.582, + 0.914, + 0.652 + ], + "angle": 0, + "content": "For benchmark evaluation, we limit the solving time of each test instance to 10 seconds on a single CPU, such that the exact solving of the problem (achieving the optimal solution) is impossible on most test instances. Test instances that result in a timeout or error receive a score of 0." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.652, + 0.915, + 0.833 + ], + "angle": 0, + "content": "For agent implementation, we use o3-mini-medium as the default base model. Since the original implementations of these agents may use different evaluation setups, we adapt their approaches to our benchmark setting (i.e., end-to-end algorithm search) by adjusting the prompts and tools. For all agents, we set the number of iteration steps to 64. In each step, the agent generates a code block as a candidate algorithm and obtains its evaluation score on the development set. After 64 iterations, the agent produces 64 candidate algorithms, from which the best-performing solution on the development set is selected for final benchmark evaluation. All evaluations are conducted on a single CPU core of a dual AMD EPYC 7313 16-Core processor." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.843, + 0.625, + 0.857 + ], + "angle": 0, + "content": "Main Results" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.861, + 0.915, + 0.891 + ], + "angle": 0, + "content": "Figure 3 presents the results of LLMs and agents on the test set. We highlight the following key findings." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.087, + 0.066, + 0.913, + 0.52 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.53, + 0.918, + 0.59 + ], + "angle": 0, + "content": "Figure 3: Overall Performance. LLM Agents are all based on o3-mini-medium. Avg Score refers to the average normalized objective scores across all problems. Valid Solution indicates the percentage of test-set problems for which the solutions are feasible. Above Classical represents the percentage of test instances where the model outperforms the classical solver baseline. Survival Rate measures the percentage of test instances where the model's score exceeds \\(99\\%\\) of the reference score." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.614, + 0.483, + 0.742 + ], + "angle": 0, + "content": "Direct generation performance is limited. LLMs show significantly lower average scores compared to the classical solver. They often fail to generate valid solutions (i.e., bug-free code that satisfies all constraints within the time limit), rarely outperform the classical solver on individual instances, and often fail to produce optimal solutions. Reasoning-capable models tend to perform better than nonreasoning ones. The best-performing LLM for one-shot generation is Claude-3.7 Sonnet, with an average score of 0.65." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.745, + 0.483, + 0.872 + ], + "angle": 0, + "content": "Agentic systems substantially improve LLM performance. Compared to direct generation, the agentic pipeline achieves considerably higher scores across all metrics. Among the evaluated frameworks, FunSearch attains the highest average score of 0.842, outperforming the classical solver (0.797). It also surpasses the solver on over half the test instances (see \"Above Classical\" score) and achieves a higher survival rate. These results highlight the effectiveness of LLM-based agents in solving CO problems." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.875, + 0.482, + 0.892 + ], + "angle": 0, + "content": "Agent performance varies widely. Some advanced agentic" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.614, + 0.916, + 0.699 + ], + "angle": 0, + "content": "frameworks, such as AIDE, underperform compared to simpler strategies like BestOfN on most metrics, though they show higher valid solution rates—possibly due to their debugging capabilities. This indicates that current planning mechanisms in agents are still underdeveloped and may not reliably outperform random sampling." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.699, + 0.916, + 0.77 + ], + "angle": 0, + "content": "Valid solution rates still lag behind classical solvers. According to the Valid Solution metric, the best-performing agents achieve a success rate of 0.555—lower than that of the classical solver (0.611). This suggests that current agents often struggle with solution feasibility and reliability." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.784, + 0.694, + 0.8 + ], + "angle": 0, + "content": "Agents Error Analysis" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.805, + 0.916, + 0.891 + ], + "angle": 0, + "content": "To investigate why the agents' valid solution scores are low, Figure 4 shows the types of errors among invalid solutions for five agents. We observe that code errors (i.e., bugs that prevent compilation) are the least frequent issue. The dominant error type varies across agents: Greedy Refine and ReEvo exhibit more constraint violations, while FunSearch, AIDE, and" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.129, + 0.064, + 0.873, + 0.201 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.209, + 0.86, + 0.226 + ], + "angle": 0, + "content": "Figure 4: Agents Error Analysis. Distribution of three types of errors among invalid solutions for five agents." + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.251, + 0.476, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.462, + 0.48, + 0.491 + ], + "angle": 0, + "content": "Figure 5: Avg Score vs. the number of iteration steps (in total 64 steps) during the algorithm development." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.519, + 0.481, + 0.562 + ], + "angle": 0, + "content": "BoN encounter more timeout errors. This highlights agents' limitations in satisfying constraints and generating efficient algorithms within time limits." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.575, + 0.342, + 0.592 + ], + "angle": 0, + "content": "Performance over Iteration Steps" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.596, + 0.48, + 0.667 + ], + "angle": 0, + "content": "Figure 5 illustrates the performance of several representative LLM agents across different iteration steps. At each step, the agent generates a new algorithm and receives evaluation results on the development set. We also include the performance of the classical solver baseline for comparison." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.667, + 0.481, + 0.792 + ], + "angle": 0, + "content": "All agents exhibit the ability to improve their performance with more iteration steps. FunSearch consistently achieves the best results, reaching a score of 0.8423 and converging after around 50 steps. Notably, both FunSearch and Refine discover algorithms that outperform the classical solver within approximately 10 steps. However, performance tends to saturate after 30 steps, with further search yielding diminishing returns. Enabling more consistent improvements under longer search budgets presents an interesting future direction." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.792, + 0.482, + 0.89 + ], + "angle": 0, + "content": "Figure 6 shows an example trajectory of algorithm development by Greedy Refinement (o3-mini) on TSP over multiple search steps. In the early stages, the agent enhances code efficiency by adopting vectorized data structures and utilizing a K-D tree. It then increases the number of search iterations and introduces perturbations to escape local optima. Finally, the agent integrates simulated annealing to balance exploration" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.252, + 0.912, + 0.413 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.516, + 0.421, + 0.916, + 0.493 + ], + "angle": 0, + "content": "Figure 6: Trajectory of algorithm development for Greedy Refinement on TSP over 64 steps. The curve and highlighted dots indicate the best-ever score and the steps where improvements occurred. The algorithmic ideas behind each improvement step are summarized in corresponding boxes." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.519, + 0.914, + 0.577 + ], + "angle": 0, + "content": "and exploitation and applies adaptive heuristics for different instance sizes. This example demonstrates that LLMs excel in applying established techniques to improve efficiency and implementation quality, but failing at algorithmic novelty." + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.588, + 0.755, + 0.604 + ], + "angle": 0, + "content": "Comparison to Neural Solvers" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.607, + 0.916, + 0.816 + ], + "angle": 0, + "content": "Table 2 compares the performance of agents with representative neural solvers on TSP and MIS, two well-studied CO problems. We include DIMES (Qiu, Sun, and Yang 2022), DIFUSCO (Sun and Yang 2023), and T2T (Li et al. 2023) as neural baselines. For the method with multiple variants, we only include their best results on each dataset. We also consider a hybrid method, LEHD + ReEvo (Ye et al. 2024), which combines the neural solver with LLM-designed heuristics. We report both the objective values (the tour length for TSP and set size for MIS) and the solving time. The results show that the agents such as Greedy Refine and FunSearch achieve competitive performance on both problems, often outperforming existing neural solvers under similar time budget and approaching the best results achieved by previous solvers given extended search time." + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.828, + 0.657, + 0.843 + ], + "angle": 0, + "content": "Solution Analysis" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.847, + 0.915, + 0.89 + ], + "angle": 0, + "content": "In Figure 7, we plot the percentage of algorithms developed by the Greedy Refinement agent for the 36 CO problems that utilize existing solvers (e.g., code importing ortools," + } + ], + [ + { + "type": "table", + "bbox": [ + 0.148, + 0.066, + 0.85, + 0.213 + ], + "angle": 0, + "content": "
TSP-500TSP-1000TSP-10000ER-SmallER-Large
Len ↓Time ↓Len ↓Time ↓Len ↓Time ↓Size ↑Time ↓Size ↑Time ↓
Gurobi16.5545.6h----41.3850.0m--
DIMES18.841.1m26.362.4m85.754.8m42.0612.0m332.8012.5m
DIFUSCO16.6511.5m23.4548.1m73.896.72h41.1226.6m--
T2T16.6116.0m23.3054.6m--41.3729.7m--
LEHD + ReEvo16.78-23.82-------
Greedy Refine (o3-mini)17.3719.1m24.4019.1m77.652.5m42.3520.1m354.002.5m
FunSearch (o3-mini)17.2019.1m25.3119.1m80.182.5m41.651.9m356.502.1m
" + }, + { + "type": "table_caption", + "bbox": [ + 0.155, + 0.223, + 0.84, + 0.239 + ], + "angle": 0, + "content": "Table 2: Objective values and solving time of different solvers on TSP and MIS, with varying data sizes." + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.271, + 0.447, + 0.436 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.447, + 0.483, + 0.504 + ], + "angle": 0, + "content": "Figure 7: Percentage of algorithms developed by the Greedy Refinement agent that rely on existing solvers (e.g., code importing ortools, pulp) over 64 iteration steps. We observe an increasing use of existing solvers." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.531, + 0.48, + 0.614 + ], + "angle": 0, + "content": "pulp). The percentages are shown across 64 iteration steps. We observe an increasing trend in the use of existing solvers in the agent's solutions. After 64 iterations, the final usage rate reaches \\(25\\%\\) (i.e., solutions for 9 problems use existing solvers). The solvers used throughout all steps and problems are limited to three: ortools, pulp, and scipy." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.614, + 0.481, + 0.727 + ], + "angle": 0, + "content": "This suggests that while existing LLM agents are capable of developing algorithms without relying on existing solvers for most problems, there is a growing tendency to do so over time. Moreover, the solvers used are basic general-purpose tools rather than state-of-the-art solvers specifically designed for each problem (e.g., LKH for TSP), indicating that the agent lacks the necessary knowledge to select the best-performing solver." + }, + { + "type": "title", + "bbox": [ + 0.221, + 0.739, + 0.343, + 0.754 + ], + "angle": 0, + "content": "Related Work" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.759, + 0.368, + 0.775 + ], + "angle": 0, + "content": "Automatic Algorithm Search for CO" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.778, + 0.483, + 0.89 + ], + "angle": 0, + "content": "Automating algorithm search for combinatorial optimization (CO) has emerged as a significant research direction in the machine learning community. Traditional machine learning solvers primarily parameterize CO algorithms as trainable neural networks (Bengio, Lodi, and Prouvost 2020; Cappart et al. 2023). Although effective in capturing data distributions, these neural approaches often struggle to generate feasible solutions, necessitating integration with human-" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.265, + 0.916, + 0.473 + ], + "angle": 0, + "content": "designed heuristics such as branch-and-bound (Gasse et al. 2019) and tree search (Böther et al. 2022). To address this limitation, Kuang et al. (2024a,b) propose to decompose CO algorithms into symbolic operators and conduct searches in the symbolic space. However, designing these unit symbolic operators demands substantial human expertise, limiting generalizability and comprehensive coverage of all algorithm types. Recent advances in Large Language Models (LLMs) and LLM-based agents have significantly mitigated this challenge by enabling symbolic searching in programming language formats (Romera-Paredes et al. 2023; Ye et al. 2024; Liu et al. 2024). Building on these developments, CO-Bench aims to extend the success of these methods to more real-world CO problems and facilitate further research in this domain." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.487, + 0.73, + 0.501 + ], + "angle": 0, + "content": "CO Benchmarks for LLMs" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.506, + 0.916, + 0.744 + ], + "angle": 0, + "content": "Existing CO benchmarks can be roughly classified into two categories. The first type formulates CO problems as question-answering tasks (Fan et al. 2024; Tang et al. 2025). Although LLMs have the potential to solve CO problems via natural language reasoning, their excessive parameter size makes them inefficient CO solvers in general. Therefore, the second type of benchmarks evaluates the tool-using ability of LLMs, e.g., calling an existing CO solver, to address CO problems (Xiao et al. 2024b; Ahmaditeshnizi, Gao, and Udell 2024; Yang et al. 2025b). However, these benchmarks only evaluate the correctness of the generated algorithm on small-scale CO problems, whose problem parameters could be fully expressed in natural language. In contrast, CO-Bench targets scientific and industrial challenges, emphasizing the evaluation of algorithm efficiency on diverse, large-scale CO instances. This results in a more demanding benchmark, well-suited for assessing powerful reasoning models and agents." + }, + { + "type": "title", + "bbox": [ + 0.666, + 0.758, + 0.765, + 0.772 + ], + "angle": 0, + "content": "Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.778, + 0.916, + 0.89 + ], + "angle": 0, + "content": "This work introduces CO-Bench, the first benchmark designed to evaluate the ability of LLMs in the search of combinatorial optimization (CO) algorithms. Our systematic evaluation reveals that reasoning-focused LLMs, especially when paired with agentic frameworks, can automatically discover effective algorithms that rival or surpass the classical solvers designed by human experts, with competitive searching time. However, we also identify key limitations of current LLM" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.084, + 0.069, + 0.482, + 0.14 + ], + "angle": 0, + "content": "agents such as they struggle to understand the problem constraints. These shortcomings highlight the need for future research to enhance agents' problem comprehension and creative reasoning abilities in CO tasks, enabling more robust and autonomous scientific discovery." + }, + { + "type": "title", + "bbox": [ + 0.234, + 0.153, + 0.331, + 0.168 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.172, + 0.482, + 0.27 + ], + "angle": 0, + "content": "Ahmaditeshnizi, A.; Gao, W.; and Udell, M. 2024. OptiMUS: Scalable Optimization Modeling with (MI)LP Solvers and Large Language Models. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 577-596. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.273, + 0.482, + 0.315 + ], + "angle": 0, + "content": "and, J. E. B. 1990. Linear Programming on Cray Supercomputers. Journal of the Operational Research Society, 41(2): 133-139." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.318, + 0.482, + 0.36 + ], + "angle": 0, + "content": "Anken, F.; and Beasley, J. E. 2012. Corporate structure optimisation for multinational companies. Omega-international Journal of Management Science, 40: 230-243." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.363, + 0.482, + 0.392 + ], + "angle": 0, + "content": "Anthropic. 2025. Claude Sonnet. https://www.anthropic.com/claude/sonnet. Accessed: 2025-03-24." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.395, + 0.482, + 0.436 + ], + "angle": 0, + "content": "Beasley, J. E. 1985a. An algorithm for the two-dimensional assortment problem. European Journal of Operational Research, 19: 253-261." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.44, + 0.482, + 0.482 + ], + "angle": 0, + "content": "Beasley, J. E. 1985b. Algorithms for Unconstrained Two-Dimensional Guillotine Cutting. Journal of the Operational Research Society, 36: 297-306." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.485, + 0.482, + 0.526 + ], + "angle": 0, + "content": "Beasley, J. E. 1985c. An Exact Two-Dimensional Non-Guillotine Cutting Tree Search Procedure. Oper. Res., 33: 49-64." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.53, + 0.482, + 0.572 + ], + "angle": 0, + "content": "Beasley, J. E. 1985d. A note on solving large p-median problems. European Journal of Operational Research, 21: 270-273." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.576, + 0.482, + 0.617 + ], + "angle": 0, + "content": "Beasley, J. E. 1988. An algorithm for solving large capacitated warehouse location problems. European Journal of Operational Research, 33: 314-325." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.621, + 0.482, + 0.663 + ], + "angle": 0, + "content": "Beasley, J. E. 1990. OR-Library: Distributing Test Problems by Electronic Mail. Journal of the Operational Research Society, 41: 1069-1072." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.666, + 0.482, + 0.707 + ], + "angle": 0, + "content": "Beasley, J. E. 1992. A heuristic for Euclidean and rectilinear Steiner problems. European Journal of Operational Research, 58: 284-292." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.711, + 0.482, + 0.753 + ], + "angle": 0, + "content": "Beasley, J. E. 1993. Lagrangean heuristics for location problems. European Journal of Operational Research, 65: 383-399." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.757, + 0.482, + 0.798 + ], + "angle": 0, + "content": "Beasley, J. E. 2004. A population heuristic for constrained two-dimensional non-guillotine cutting. *Eur. J. Oper. Res.*, 156: 601-627." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.802, + 0.482, + 0.843 + ], + "angle": 0, + "content": "Beasley, J. E.; and Cao, B. 1996. A tree search algorithm for the crew scheduling problem. European Journal of Operational Research, 94: 517-526." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.847, + 0.482, + 0.888 + ], + "angle": 0, + "content": "Beasley, J. E.; and Christofides, N. 1989. An algorithm for the resource constrained shortest path problem. Networks, 19: 379-394." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.172, + 0.482, + 0.888 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.069, + 0.913, + 0.111 + ], + "angle": 0, + "content": "Beasley, J. E.; and Jornsten, K. 1992. Enhancing an algorithm for set covering problems. European Journal of Operational Research, 58: 293-300." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.114, + 0.913, + 0.156 + ], + "angle": 0, + "content": "Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2000. Scheduling Aircraft Landings - The Static Case. Transp. Sci., 34: 180-197." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.159, + 0.913, + 0.215 + ], + "angle": 0, + "content": "Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2004. Displacement problem and dynamically scheduling aircraft landings. Journal of the Operational Research Society, 55: 54-64." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.218, + 0.913, + 0.259 + ], + "angle": 0, + "content": "Bengio, Y.; Lodi, A.; and Prouvost, A. 2020. Machine Learning for Combinatorial Optimization: a Methodological Tour d'Horizon. arXiv:1811.06128." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.263, + 0.913, + 0.291 + ], + "angle": 0, + "content": "Berthold, T. 2006. Primal heuristics for mixed integer programs. Ph.D. thesis, Zuse Institute Berlin (ZIB)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.294, + 0.913, + 0.335 + ], + "angle": 0, + "content": "Bischoff, E. E. 2006. Three-dimensional packing of items with limited load bearing strength. Eur. J. Oper. Res., 168: 952-966." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.339, + 0.913, + 0.381 + ], + "angle": 0, + "content": "Bischoff, E. E.; and Ratcliff, M. S. W. 1995. Issues in the development of approaches to container loading. Omega-international Journal of Management Science, 23: 377-390." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.384, + 0.913, + 0.426 + ], + "angle": 0, + "content": "Biskup, D.; and Feldmann, M. 2001. Benchmarks for scheduling on a single machine against restrictive and unrestricted common due dates. Comput. Oper. Res., 28: 787-801." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.429, + 0.913, + 0.485 + ], + "angle": 0, + "content": "Böther, M.; Kißig, O.; Taraz, M.; Cohen, S.; Seidel, K.; and Friedrich, T. 2022. What's Wrong with Deep Learning in Tree Search for Combinatorial Optimization. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.488, + 0.913, + 0.53 + ], + "angle": 0, + "content": "Cappanera, P.; and Trubian, M. 2001. A Local-Search-Based Heuristic for the Demand-Constrained Multidimensional Knapsack Problem. INFORMS J. Comput., 17: 82-98." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.532, + 0.913, + 0.588 + ], + "angle": 0, + "content": "Cappart, Q.; ChA©telat, D.; Khalil, E. B.; Lodi, A.; Morris, C.; and VeliAkoviA‡, P. 2023. Combinatorial Optimization and Reasoning with Graph Neural Networks. Journal of Machine Learning Research, 24(130): 1-61." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.591, + 0.913, + 0.633 + ], + "angle": 0, + "content": "Chakhlevitch, K.; and Glass, C. A. 2009. Scheduling reentrant jobs on parallel machines with a remote server. Comput. Oper. Res., 36: 2580-2589." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.636, + 0.913, + 0.705 + ], + "angle": 0, + "content": "Chan, J. S.; Chowdhury, N.; Jaffe, O.; Aung, J.; Sherburn, D.; Mays, E.; Starace, G.; Liu, K.; Maksin, L.; Patwardhan, T. A.; Weng, L.; and Mkadry, A. 2024. MLE-bench: Evaluating Machine Learning Agents on Machine Learning Engineering. ArXiv, abs/2410.07095." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.709, + 0.913, + 0.888 + ], + "angle": 0, + "content": "Chen, M.; Tworek, J.; Jun, H.; Yuan, Q.; Ponde, H.; Kaplan, J.; Edwards, H.; Burda, Y.; Joseph, N.; Brockman, G.; Ray, A.; Puri, R.; Krueger, G.; Petrov, M.; Khlaaf, H.; Sastry, G.; Mishkin, P.; Chan, B.; Gray, S.; Ryder, N.; Pavlov, M.; Power, A.; Kaiser, L.; Bavarian, M.; Winter, C.; Tillet, P.; Such, F. P.; Cummings, D. W.; Plappert, M.; Chantzis, F.; Barnes, E.; Herbert-Voss, A.; Guss, W. H.; Nichol, A.; Babuschkin, I.; Balaji, S.; Jain, S.; Carr, A.; Leike, J.; Achiam, J.; Misra, V.; Morikawa, E.; Radford, A.; Knight, M. M.; Brundage, M.; Murati, M.; Mayer, K.; Welinder, P.; McGrew, B.; Amodei, D.; McCandlish, S.; Sutskever, I.; and Zaremba, W. 2021. Evaluating Large Language Models Trained on Code. ArXiv, abs/2107.03374." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.069, + 0.913, + 0.888 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.069, + 0.482, + 0.098 + ], + "angle": 0, + "content": "Christofides, N.; and Beasley, J. E. 1984. The period routing problem. Networks, 14: 237-256." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.1, + 0.482, + 0.129 + ], + "angle": 0, + "content": "Christofides, N.; and Whitlock, C. 1977. An Algorithm for Two-Dimensional Cutting Problems. Oper. Res., 25: 30-44." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.131, + 0.482, + 0.173 + ], + "angle": 0, + "content": "Chu, P. C.; and Beasley, J. E. 1998. Constraint Handling in Genetic Algorithms: The Set Partitioning Problem. Journal of Heuristics, 4: 323-357." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.175, + 0.482, + 0.218 + ], + "angle": 0, + "content": "Crama, Y. 1997. Combinatorial optimization models for production scheduling in automated manufacturing systems. European Journal of Operational Research, 99(1): 136-153." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.219, + 0.482, + 0.262 + ], + "angle": 0, + "content": "DeepMind, G. 2025. Flash Thinking: Behind the Scenes of Gemini. https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-03-24." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.263, + 0.482, + 0.292 + ], + "angle": 0, + "content": "DeepSeek-AI. 2024. DeepSeek-V3 Technical Report. ArXiv, abs/2412.19437." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.295, + 0.482, + 0.336 + ], + "angle": 0, + "content": "DeepSeek-AI. 2025a. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv:2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.339, + 0.482, + 0.38 + ], + "angle": 0, + "content": "DeepSeek-AI. 2025b. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. ArXiv, abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.383, + 0.482, + 0.424 + ], + "angle": 0, + "content": "Erdos, P. L.; and Rényi, A. 1984. On the evolution of random graphs. Transactions of the American Mathematical Society, 286: 257-257." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.428, + 0.482, + 0.457 + ], + "angle": 0, + "content": "Falkenauer, E. 1996. A hybrid grouping genetic algorithm for bin packing. Journal of Heuristics, 2: 5-30." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.459, + 0.482, + 0.557 + ], + "angle": 0, + "content": "Fan, L.; Hua, W.; Li, L.; Ling, H.; and Zhang, Y. 2024. NPHardEval: Dynamic Benchmark on Reasoning Ability of Large Language Models via Complexity Classes. In Ku, L.-W.; Martins, A.; and Srikumar, V., eds., Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 4092-4114. Bangkok, Thailand: Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.558, + 0.482, + 0.6 + ], + "angle": 0, + "content": "Fleurent, C.; and Ferland, J. A. 1996. Genetic and hybrid algorithms for graph coloring. Annals of Operations Research, 63: 437-461." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.603, + 0.482, + 0.659 + ], + "angle": 0, + "content": "Gasse, M.; Chételat, D.; Ferroni, N.; Charlin, L.; and Lodi, A. 2019. Exact Combinatorial Optimization with Graph Convolutional Neural Networks. In Advances in Neural Information Processing Systems 32." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.661, + 0.482, + 0.786 + ], + "angle": 0, + "content": "Gottweis, J.; Weng, W.-H.; Daryin, A.; Tu, T.; Palepu, A.; Sirkovic, P.; Myaskovsky, A.; Weissenberger, F.; Rong, K.; Tanno, R.; Saab, K.; Popovici, D.; Blum, J.; Zhang, F.; Chou, K.; Hassidim, A.; Gokturk, B.; Vahdat, A.; Kohli, P.; Matias, Y.; Carroll, A.; Kulkarni, K.; Tomaev, N.; Guan, Y.; Dhillon, V.; Vaishnav, E. D.; Lee, B.; Costa, T. R. D.; Penad'es, J. R.; Peltz, G.; Xu, Y.; Pawlosky, A.; Karthikesalingam, A.; and Natarajan, V. 2025. Towards an AI co-scientist. *ArXiv*, abs/2502.18864." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.789, + 0.482, + 0.831 + ], + "angle": 0, + "content": "Gusfield, D. 1997. Algorithms on stings, trees, and sequences: Computer science and computational biology. *Acm Sigact News*, 28(4): 41-60." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.833, + 0.482, + 0.89 + ], + "angle": 0, + "content": "Hui, B.; Yang, J.; Cui, Z.; Yang, J.; Liu, D.; Zhang, L.; Liu, T.; Zhang, J.; Yu, B.; Dang, K.; Yang, A.; Men, R.; Huang, F.; Quan, S.; Ren, X.; Ren, X.; Zhou, J.; and Lin, J. 2024. Qwen2.5-Coder Technical Report. ArXiv, abs/2409.12186." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.069, + 0.482, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.069, + 0.914, + 0.098 + ], + "angle": 0, + "content": "Ivancic, N. J. 1988. An integer programming based heuristic approach to the three-dimensional packing problem." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.1, + 0.915, + 0.142 + ], + "angle": 0, + "content": "Jiang, Z.; Schmidt, D.; Srikanth, D.; Xu, D.; Kaplan, I.; Jacenko, D.; and Wu, Y. 2025. AIDE: AI-Driven Exploration in the Space of Code. ArXiv, abs/2502.13138." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.144, + 0.915, + 0.2 + ], + "angle": 0, + "content": "Jimenez, C. E.; Yang, J.; Wettig, A.; Yao, S.; Pei, K.; Press, O.; and Narasimhan, K. 2023. SWE-bench: Can Language Models Resolve Real-World GitHub Issues? ArXiv, abs/2310.06770." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.203, + 0.915, + 0.273 + ], + "angle": 0, + "content": "Kuang, Y.; Wang, J.; Liu, H.; Zhu, F.; Li, X.; Zeng, J.; HAO, J.; Li, B.; and Wu, F. 2024a. Rethinking Branching on Exact Combinatorial Optimization Solver: The First Deep Symbolic Discovery Framework. In *The Twelfth International Conference on Learning Representations*." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.275, + 0.915, + 0.387 + ], + "angle": 0, + "content": "Kuang, Y.; Wang, J.; Zhou, Y.; Li, X.; Zhu, F.; Hao, J.; and Wu, F. 2024b. Towards General Algorithm Discovery for Combinatorial Optimization: Learning Symbolic Branching Policy from Bipartite Graph. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 25623-25641. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.389, + 0.913, + 0.432 + ], + "angle": 0, + "content": "Laporte, G. 1992. The traveling salesman problem: An overview of exact and approximate algorithms. European Journal of Operational Research, 59(2): 231-247." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.434, + 0.913, + 0.49 + ], + "angle": 0, + "content": "Li, Y.; Guo, J.; Wang, R.; and Yan, J. 2023. From Distribution Learning in Training to Gradient Search in Testing for Combinatorial Optimization. In Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.493, + 0.913, + 0.548 + ], + "angle": 0, + "content": "Liu, F.; Tong, X.; Yuan, M.; Lin, X.; Luo, F.; Wang, Z.; Lu, Z.; and Zhang, Q. 2024. Evolution of Heuristics: Towards Efficient Automatic Algorithm Design Using Large Language Model. In ICML." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.551, + 0.913, + 0.594 + ], + "angle": 0, + "content": "López, C. O.; and Beasley, J. E. 2016. A formulation space search heuristic for packing unequal circles in a fixed size circular container. Eur. J. Oper. Res., 251: 64-73." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.596, + 0.913, + 0.639 + ], + "angle": 0, + "content": "López, C. O.; and Beasley, J. E. 2018. Packing unequal rectangles and squares in a fixed size circular container using formulation space search. Comput. Oper. Res., 94: 106-117." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.641, + 0.913, + 0.71 + ], + "angle": 0, + "content": "Madaan, A.; Tandon, N.; Gupta, P.; Hallinan, S.; Gao, L.; Wegreffe, S.; Alon, U.; Dziri, N.; Prabhumoye, S.; Yang, Y.; Welleck, S.; Majumder, B. P.; Gupta, S.; Yazdanbakhsh, A.; and Clark, P. 2023. Self-Refine: Iterative Refinement with Self-Feedback. ArXiv, abs/2303.17651." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.712, + 0.913, + 0.741 + ], + "angle": 0, + "content": "Meta. 2024. The Llama 3 Herd of Models. ArXiv, abs/2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.744, + 0.913, + 0.799 + ], + "angle": 0, + "content": "Mingers, J. C.; and O'Brien, F. A. 1995. Creating student groups with similar characteristics: A heuristic approach. Omega-international Journal of Management Science, 23: 313-321." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.802, + 0.913, + 0.844 + ], + "angle": 0, + "content": "Motwani, R.; and Raghavan, P. 2013. Randomized Algorithms. USA: Cambridge University Press. ISBN 0511814070." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.847, + 0.913, + 0.89 + ], + "angle": 0, + "content": "Novikov, A.; V~u, N.; Eisenberger, M.; Dupont, E.; Huang, P.-S.; Wagner, A. Z.; Shirobokov, S.; Kozlovskii, B. M.; Ruiz, F. J. R.; Mehrabian, A.; Kumar, M. P.; See, A.; Chaudhuri, S.;" + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.069, + 0.915, + 0.89 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.069, + 0.482, + 0.113 + ], + "angle": 0, + "content": "Holland, G.; Davies, A.; Nowozin, S.; Kohli, P.; Balog, M.; and Deepmind, G. 2025. AlphaEvolve: A coding agent for scientific and algorithmic discovery. *ArXiv*, abs/2506.13131." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.114, + 0.482, + 0.142 + ], + "angle": 0, + "content": "OpenAI. 2024a. GPT-4o System Card. ArXiv, abs/2410.21276." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.145, + 0.482, + 0.161 + ], + "angle": 0, + "content": "OpenAI. 2024b. OpenAI o1 System Card. arXiv:2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.163, + 0.392, + 0.178 + ], + "angle": 0, + "content": "OpenAI. 2025. OpenAI o3-mini System Card." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.181, + 0.482, + 0.223 + ], + "angle": 0, + "content": "Osman, I. H. 1995. Heuristics for the generalised assignment problem: simulated annealing and tabu search approaches. Operations-Research-Spektrum, 17: 211-225." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.225, + 0.482, + 0.281 + ], + "angle": 0, + "content": "Osman, I. H.; and Christofides, N. 1994. Capacitated clustering problems by hybrid simulated annealing and tabu search. International Transactions in Operational Research, 1: 317-336." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.284, + 0.482, + 0.327 + ], + "angle": 0, + "content": "Papadimitriou, C.; and Steiglitz, K. 1982. Combinatorial Optimization: Algorithms and Complexity, volume 32. Courier Corporation. ISBN 0-13-152462-3." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.329, + 0.482, + 0.373 + ], + "angle": 0, + "content": "Petersen, C. C. 1967. Computational Experience with Variants of the Balas Algorithm Applied to the Selection of R&D Projects. Management Science, 13: 736-750." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.375, + 0.482, + 0.432 + ], + "angle": 0, + "content": "Qiu, R.; Sun, Z.; and Yang, Y. 2022. DIMES: A Differentiable Meta Solver for Combinatorial Optimization Problems. In Oh, A. H.; Agarwal, A.; Belgrave, D.; and Cho, K., eds., Advances in Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.434, + 0.482, + 0.476 + ], + "angle": 0, + "content": "Qwen. 2025. QwQ-32B: Embracing the Power of Reinforcement Learning. https://qwenlm.github.io/blog/qwq-32b/. Accessed: 2025-03-24." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.479, + 0.482, + 0.564 + ], + "angle": 0, + "content": "Ramamonjison, R.; Yu, T. T.; Li, R.; Li, H.; Carenini, G.; Ghaddar, B.; He, S.; Mostajabdaveh, M.; Banitalebi-Dehkordi, A.; Zhou, Z.; and Zhang, Y. 2023. NL4Opt Competition: Formulating Optimization Problems Based on Their Natural Language Descriptions. In Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.566, + 0.482, + 0.608 + ], + "angle": 0, + "content": "Ratcliff, M. S. W.; and Bischoff, E. E. 1998. Allowing for weight considerations in container loading. Operations-Research-Spektrum, 20: 65-71." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.611, + 0.482, + 0.694 + ], + "angle": 0, + "content": "Romera-Paredes, B.; Barekatain, M.; Novikov, A.; Balog, M.; Kumar, M. P.; Dupont, E.; Ruiz, F. J. R.; Ellenberg, J. S.; Wang, P.; Fawzi, O.; Kohli, P.; Fawzi, A.; Grochow, J.; Lodi, A.; Mouret, J.-B.; Ringer, T.; and Yu, T. 2023. Mathematical discoveries from program search with large language models. Nature, 625: 468 - 475." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.698, + 0.482, + 0.755 + ], + "angle": 0, + "content": "Shinn, N.; Cassano, F.; Labash, B.; Gopinath, A.; Narasimhan, K.; and Yao, S. 2023. Reflexion: language agents with verbal reinforcement learning. In Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.757, + 0.482, + 0.799 + ], + "angle": 0, + "content": "Sun, Z.; and Yang, Y. 2023. DIFUSCO: Graph-based Diffusion Solvers for Combinatorial Optimization. ArXiv, abs/2302.08224." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.802, + 0.482, + 0.831 + ], + "angle": 0, + "content": "Taillard, E. 1993. Benchmarks for basic scheduling problems. European Journal of Operational Research, 64(2): 278-285." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.833, + 0.482, + 0.89 + ], + "angle": 0, + "content": "Tang, J.; Zhang, Q.; Li, Y.; Chen, N.; and Li, J. 2025. GraphArena: Evaluating and Improving Large Language Models on Graph Computation. In International Conference on Learning Representations." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.069, + 0.482, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.069, + 0.916, + 0.139 + ], + "angle": 0, + "content": "Vogiatzis, C.; and Pardalos, P. 2013. Combinatorial optimization in transportation and logistics networks, volume 2-5, 673-722. Germany: Springer. ISBN 9781441979964. Publisher Copyright: \\(\\text{©}\\) Springer Science+Business Media New York 2013. All rights are reserved." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.141, + 0.915, + 0.169 + ], + "angle": 0, + "content": "xAI. 2025. Grok-3 and the Next Phase of xAI. https://x.ai/news/grok-3. Accessed: 2025-03-24." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.172, + 0.915, + 0.242 + ], + "angle": 0, + "content": "Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024a. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.244, + 0.915, + 0.315 + ], + "angle": 0, + "content": "Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024b. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.317, + 0.915, + 0.387 + ], + "angle": 0, + "content": "Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025a. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.389, + 0.915, + 0.46 + ], + "angle": 0, + "content": "Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025b. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.461, + 0.913, + 0.504 + ], + "angle": 0, + "content": "Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2022. ReAct: Synergizing Reasoning and Acting in Language Models. ArXiv, abs/2210.03629." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.506, + 0.915, + 0.577 + ], + "angle": 0, + "content": "Ye, H.; Wang, J.; Cao, Z.; Berto, F.; Hua, C.; Kim, H.; Park, J.; and Song, G. 2024. ReEvo: Large Language Models as Hyper-Heuristics with Reflective Evolution. In The Thirty-eighth Annual Conference on Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.578, + 0.913, + 0.621 + ], + "angle": 0, + "content": "Zheng, Z.; Xie, Z.; Wang, Z.; and Hooi, B. 2025. Monte Carlo Tree Search for Comprehensive Exploration in LLM-Based Automatic Heuristic Design. ArXiv, abs/2501.08603." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.069, + 0.916, + 0.621 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.145, + 0.068, + 0.419, + 0.086 + ], + "angle": 0, + "content": "Problem Description and Scores" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.089, + 0.214, + 0.107 + ], + "angle": 0, + "content": "Aircraft landing" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.11, + 0.481, + 0.318 + ], + "angle": 0, + "content": "The problem is to schedule landing times for a set of planes across one or more runways such that each landing occurs within its prescribed time window and all pairwise separation requirements are satisfied; specifically, if plane i lands at or before plane j on the same runway, then the gap between their landing times must be at least the specified separation time provided in the input. In a multiple-runway setting, each plane must also be assigned to one runway, and if planes land on different runways, the separation requirement (which may differ) is applied accordingly. Each plane has an earliest, target, and latest landing time, with penalties incurred proportionally for landing before (earliness) or after (lateness) its target time. The objective is to minimize the total penalty cost while ensuring that no constraints are violated—if any constraint is breached, the solution receives no score." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.332, + 0.378, + 0.474 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.5985295365478638
BestOfN0.8057479826999232
Refine0.7503157815146175
FunSearch0.9688863336568327
AIDE0.800637046201484
ReEvo0.9134454710810906
MCTS0.801655240273729
EoH0.8019818529389835
" + }, + { + "type": "table_caption", + "bbox": [ + 0.198, + 0.483, + 0.365, + 0.499 + ], + "angle": 0, + "content": "Table 3: Aircraft landing" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.53, + 0.248, + 0.546 + ], + "angle": 0, + "content": "Assignment problem" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.55, + 0.481, + 0.691 + ], + "angle": 0, + "content": "The Assignment Problem involves optimally assigning \\( n \\) items to \\( n \\) agents based on a provided \\( n \\) imes \\( n \\) cost matrix, where each entry \\( extcost\\_matrix[i][j] \\) denotes the cost of assigning item \\( i + 1 \\) to agent \\( j + 1 \\). The goal is to identify a permutation—each item assigned exactly one agent—that minimizes the total assignment cost. Formally, this is an optimization problem to find a permutation \\( \\pi \\) of agents such that the total cost \\( \\sum i = 1^n extcost\\_matrix[i - 1][\\pi(i) - 1] \\) is minimized. The solution returned includes both the minimal total cost and the corresponding optimal assignments." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.704, + 0.273, + 0.846 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver1
BestOfN1
Refine1
FunSearch1
AIDE1
ReEvo1
MCTS1
EoH1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.183, + 0.855, + 0.381, + 0.87 + ], + "angle": 0, + "content": "Table 4: Assignment problem" + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.069, + 0.68, + 0.085 + ], + "angle": 0, + "content": "Assortment problem" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.09, + 0.915, + 0.284 + ], + "angle": 0, + "content": "This optimization problem involves arranging a set of rectangular pieces within available stock rectangles to minimize the overall waste area percentage. Each stock rectangle has a defined area, and each piece—which may be rotated by \\(90^{\\circ}\\)—must be fully contained within a stock without overlapping with other pieces. Additionally, each piece type has specific total minimum and maximum placement limits. You have access to an unlimited number of stocks for each type, but you may use at most two stock types. The objective is to achieve the lowest possible waste area percentage, defined as the ratio of unused area to the total stock area. Solutions must ensure efficient resource utilization while satisfying all geometric and quantity constraints. Any violation of these constraints results in no score." + }, + { + "type": "table", + "bbox": [ + 0.519, + 0.296, + 0.821, + 0.439 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.3222852468406736
BestOfN0.36161788534475603
Refine0.10475936163370339
FunSearch0.3622886282031154
AIDE0.1698107561339298
ReEvo0.24290833308629933
MCTS0.1757439194813797
EoH0.2519474328966603
" + }, + { + "type": "table_caption", + "bbox": [ + 0.618, + 0.448, + 0.813, + 0.463 + ], + "angle": 0, + "content": "Table 5: Assortment problem" + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.498, + 0.755, + 0.515 + ], + "angle": 0, + "content": "Bin packing - one-dimensional" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.52, + 0.915, + 0.688 + ], + "angle": 0, + "content": "The **one-dimensional bin packing problem** seeks to minimize the number of bins required to pack a given set of items while ensuring that the sum of item sizes within each bin does not exceed the specified bin capacity. Given a test case with an identifier ('id'), a fixed 'bin_capacity', and a list of 'num_items' with their respective sizes ('items'), the objective is to find a packing arrangement that uses the least number of bins. The solution is evaluated based on the total 'num_bins' used, with invalid solutions (e.g., missing or duplicated items, or bins exceeding capacity) incurring a inf heavy penalty. The output must include the number of bins used and a valid assignment of item indices to bins." + }, + { + "type": "table", + "bbox": [ + 0.519, + 0.702, + 0.812, + 0.845 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9628049317089281
BestOfN0.8933315064694979
Refine0.9870315022407082
FunSearch0.9557154223933677
AIDE0.8366913237780297
ReEvo0.9492158360156572
MCTS0.9396436307329097
EoH0.9693475618912389
" + }, + { + "type": "table_caption", + "bbox": [ + 0.584, + 0.853, + 0.848, + 0.869 + ], + "angle": 0, + "content": "Table 6: Bin packing - one-dimensional" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.088, + 0.069, + 0.331, + 0.084 + ], + "angle": 0, + "content": "Capacitated warehouse location" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.087, + 0.48, + 0.337 + ], + "angle": 0, + "content": "The Capacitated Warehouse Location Problem with Splittable Demand aims to determine which warehouses to open and how to allocate portions of customer demands among these warehouses in order to minimize total costs. Given a set of potential warehouse locations, each with a fixed opening cost and capacity limit, and a set of customers with individual demands and associated per-unit assignment costs to each warehouse, the objective is to decide which warehouses to open and how to distribute each customer's demand among these open warehouses. The allocation must satisfy the constraint that the sum of portions assigned to each customer equals their total demand, and that the total demand allocated to any warehouse does not exceed its capacity. The optimization seeks to minimize the sum of fixed warehouse opening costs and the total per-unit assignment costs. However, if any solution violates these constraints (i.e., a customer's demand is not fully satisfied or a warehouse's capacity is exceeded), then no score is provided." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.351, + 0.375, + 0.491 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.6976400141361688
BestOfN0.0
Refine0.7518838886310322
FunSearch0.7196713948459038
AIDE0.6647355906610447
ReEvo0.6715266955394039
MCTS0.6891495773105485
EoH0.7502493181324346
" + }, + { + "type": "table_caption", + "bbox": [ + 0.15, + 0.503, + 0.414, + 0.516 + ], + "angle": 0, + "content": "Table 7: Capacitated warehouse location" + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.546, + 0.314, + 0.562 + ], + "angle": 0, + "content": "Common due date scheduling" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.565, + 0.48, + 0.69 + ], + "angle": 0, + "content": "Given floor, where \\( h \\) is a predefined fraction (defaulting to 0.6). The goal is to determine an optimal job sequence that minimizes the penalty, calculated as follows: for each job, if its completion time \\( C \\) is earlier than \\( d \\), an earliness penalty of \\( aimes(d - C) \\) is incurred; if \\( C \\) exceeds \\( d \\), a tardiness penalty of \\( bimes(C - d) \\) is applied; otherwise, no penalty is incurred. The problem requires finding a permutation of job indices (1-based) that minimizes the total penalty. The evaluation metric sums these penalties for a given schedule." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.704, + 0.375, + 0.845 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9187662046144239
BestOfN0.97731110557282
Refine0.9776844987221935
FunSearch0.976604327923604
AIDE0.6291657473867996
ReEvo0.9743199070415761
MCTS0.8838457578182489
EoH0.9773286503168127
" + }, + { + "type": "table_caption", + "bbox": [ + 0.157, + 0.856, + 0.408, + 0.87 + ], + "angle": 0, + "content": "Table 8: Common due date scheduling" + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.069, + 0.75, + 0.084 + ], + "angle": 0, + "content": "Constrained guillotine cutting" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.087, + 0.913, + 0.393 + ], + "angle": 0, + "content": "The problem involves optimizing the guillotine feasible placement of a set of rectangular pieces on a given stock sheet to maximize total value. Each piece type is characterized by its length, width, an upper bound on the number of times it may appear in the final cutting pattern, and an assigned value. Orientation of the pieces is fixed (the edges of the pieces are parallel to the edges of the sheet). The task is to select and place pieces such that each lies completely within the boundaries of the stock sheet, no two pieces overlap, and the number of pieces of any type does not exceed its specified maximum. A set of placements is considered guillotine feasible if there exists at least one straight cut (vertical or horizontal) that does not slice through any rectangle, and the property holds recursively on the resulting subregions. Empty regions or regions exactly matching a placed piece are considered valid. The objective is to maximize the sum of the values of the placed pieces; however, if any spatial or count constraint is violated, the solution is deemed invalid. The output is defined as a dictionary reporting the total value and a list of placements, with each placement specified by the piece type index, x and y coordinates, placed dimensions, and orientation flag." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.406, + 0.809, + 0.547 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.7844900098230463
BestOfN0.0
Refine0.981513704843915
FunSearch0.956424099109148
AIDE0.9102922923098641
ReEvo0.0
MCTS0.0
EoH0.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.59, + 0.558, + 0.842, + 0.572 + ], + "angle": 0, + "content": "Table 9: Constrained guillotine cutting" + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.602, + 0.785, + 0.618 + ], + "angle": 0, + "content": "Constrained non-guillotine cutting" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.621, + 0.913, + 0.8 + ], + "angle": 0, + "content": "The constrained non-guillotine cutting problem involves optimally arranging rectangular pieces onto a single rectangular stock with fixed dimensions (stock_length and stock_width). Each piece type has defined length, width, value, and minimum and maximum usage constraints. The optimization goal is to maximize the total value of all placed pieces, subject to constraints that each piece is entirely within stock boundaries, pieces do not overlap, each piece type's usage falls within its specified [min, max] range, and pieces may optionally be rotated by \\(90^{\\circ}\\). The solution returns a set of placements indicating piece type, bottom-left coordinates \\((\\mathrm{x},\\mathrm{y})\\), and rotation status. If any constraint is violated, the solution receives no score." + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.814, + 0.66, + 0.83 + ], + "angle": 0, + "content": "Container loading" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.833, + 0.913, + 0.889 + ], + "angle": 0, + "content": "Solves a container loading problem: Given a 3D container of specified dimensions and multiple box types—each defined by dimensions, orientation constraints, and available quantity—the goal is to optimally place these boxes within" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.086, + 0.066, + 0.375, + 0.207 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.5585076432266227
BestOfN0.8760613343780126
Refine0.99138085452391
FunSearch0.9623447685846964
AIDE0.8555320134962818
ReEvo0.9264764236682984
MCTS0.7944732650186651
EoH0.9106930512513293
" + }, + { + "type": "table_caption", + "bbox": [ + 0.134, + 0.217, + 0.428, + 0.233 + ], + "angle": 0, + "content": "Table 10: Constrained non-guillotine cutting" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.266, + 0.48, + 0.378 + ], + "angle": 0, + "content": "the container to maximize the volume utilization ratio. Each box placement must respect orientation constraints (vertical alignment flags), fit entirely within container boundaries, and avoid overlaps. The solution returns precise coordinates and orientations for each box placement, quantified by a volume utilization score calculated as the total volume of placed boxes divided by the container volume. Invalid placements result in a score of 0.0." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.395, + 0.385, + 0.537 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.09700224776623062
BestOfN0.8163545342051534
Refine0.18895711345505883
FunSearch0.23070987019597894
AIDE0.7592850816892841
ReEvo0.716081346719743
MCTS0.5451472798828618
EoH0.7795824394970114
" + }, + { + "type": "table_caption", + "bbox": [ + 0.188, + 0.546, + 0.375, + 0.561 + ], + "angle": 0, + "content": "Table 11: Container loading" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.61, + 0.411, + 0.627 + ], + "angle": 0, + "content": "Container loading with weight restrictions" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.639, + 0.481, + 0.89 + ], + "angle": 0, + "content": "The Container Loading with Weight Restrictions problem aims to maximize the utilization of a container's volume by selecting and strategically placing boxes inside it. Given a container with specified dimensions (length, width, height) and multiple types of boxes, each characterized by their dimensions, quantities, weights, and load-bearing constraints, the optimization goal is to determine the placement and orientation of these boxes (with each box allowed three possible orientations) that maximizes the ratio of total occupied box volume to container volume. The solution must strictly adhere to spatial constraints (boxes must fit entirely within the container without overlapping), load-bearing constraints (boxes must support the weight of boxes stacked above them according to given limits), and orientation restrictions. The optimization quality is evaluated by the achieved utilization metric, defined as the total volume of successfully placed boxes divided by the container volume; if any constraint is violated, the utilization score is zero." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.066, + 0.828, + 0.208 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.009225308452359507
BestOfN0.13669723873453465
Refine0.07941319051933145
FunSearch0.2919729304847129
AIDE0.12860429344072807
ReEvo0.1420943670465572
MCTS0.04806324649022297
EoH0.051972410039456414
" + }, + { + "type": "table_caption", + "bbox": [ + 0.542, + 0.217, + 0.887, + 0.233 + ], + "angle": 0, + "content": "Table 12: Container loading with weight restrictions" + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.265, + 0.693, + 0.282 + ], + "angle": 0, + "content": "Corporate structuring" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.293, + 0.915, + 0.42 + ], + "angle": 0, + "content": "Given N countries, each defined by: \\(\\bullet\\) a tax code (1: Exemption, 2: Deduction, 3: Source-by-source Pooling, 4: Worldwide Pooling), \\(\\bullet\\) a foreign income tax rate, \\(\\bullet\\) a domestic income tax rate, and \\(\\bullet\\) a profit, and a withholding tax matrix W (where W[i][j] is the rate on dividends from country i to j), construct a valid tree-structured corporate hierarchy (directed, acyclic, connected) rooted at a designated target (whose parent is 0) such that every country with profit \\(>0\\) appears exactly once." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.422, + 0.915, + 0.679 + ], + "angle": 0, + "content": "For each country i, define S as the set of nodes in its subtree (note the subtree includes itself) with a positive profit. Also consider the set of child nodes C_i. If i is not a root country but in the tree, it will send all its income (after tax) to its parent j. Denote this amount as F[i][j]. Assume the total income after domestic tax and withholding tax for country i is: domestic_iincome_i * (1 - domestic_rate_i) + (\\sum_{k \\in C_i} F[k][i] * (1 - W[k][i])) The extra foreign tax under different tax code is defined as follows: 1. No extra tax. 2. Foreign income tax from the child nodes: foreign_iincome_rate_i * (\\sum_{k \\in C_i} F[k][i] * (1 - W[k][i])) 3. Foreign income tax computed from the source nodes in each child's subtree: \\(\\sum_{k \\in C_i} \\max(0, F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\\sum_{s \\in S_k} domestic_iincome_s))\\) 4. Foreign income tax from all source nodes in the subtree, excluding itself: \\(\\max(0, \\sum_{k \\in C_i} F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\\sum_{s \\in S_i} domestic_iincome_s) - domestic_iincome_i)\\)" + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.697, + 0.81, + 0.838 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9450572839481785
BestOfN0.9450572839481785
Refine0.9726337326585759
FunSearch0.777775452943618
AIDE0.9450572839481785
ReEvo0.5014939649568603
MCTS0.9844897288603699
EoH0.9431107030735252
" + }, + { + "type": "table_caption", + "bbox": [ + 0.61, + 0.849, + 0.82, + 0.864 + ], + "angle": 0, + "content": "Table 13: Corporate structuring" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.087, + 0.069, + 0.216, + 0.084 + ], + "angle": 0, + "content": "Crew scheduling" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.088, + 0.48, + 0.296 + ], + "angle": 0, + "content": "The Crew Scheduling Problem involves assigning each task—with defined start and finish times—to exactly one crew, aiming to minimize the total transition costs between consecutive tasks. Each crew's schedule must satisfy three constraints: tasks within a crew must not overlap; valid transitions (with associated costs) must exist between every consecutive pair of tasks; and the crew's total duty time (from the start of the first task to the finish of the last) cannot exceed a specified time limit. Additionally, no more than \\( \\mathrm{K} \\) crews can be used to cover all tasks. Solutions violating any of these constraints are considered infeasible and receive no score. The optimization objective is therefore to determine assignments of tasks to no more than \\( \\mathrm{K} \\) crews that minimize the sum of transition costs while strictly adhering to all constraints, yielding a feasible and cost-effective scheduling solution." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.309, + 0.384, + 0.45 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.45498811952880935
BestOfN0.4483461488661745
Refine0.6690343590115082
FunSearch0.5536756258756895
AIDE0.44095505708697136
ReEvo0.45225267224663634
MCTS0.4446817469828879
EoH0.5864457661923881
" + }, + { + "type": "table_caption", + "bbox": [ + 0.195, + 0.462, + 0.37, + 0.475 + ], + "angle": 0, + "content": "Table 14: Crew scheduling" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.504, + 0.327, + 0.521 + ], + "angle": 0, + "content": "Equitable partitioning problem" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.524, + 0.48, + 0.69 + ], + "angle": 0, + "content": "The task is to partition a set of individuals—each characterized by multiple binary attributes—into exactly 8 groups such that the distribution of attribute values is as balanced as possible across these groups. For each attribute, count the number of individuals with a '1' in each group. The optimization objective is to minimize the total imbalance, which is defined as follows: for each attribute, calculate the absolute differences between the count in each group and the mean count across all groups, take the average of these differences, and then sum these averages over all attributes. The goal is to determine a group assignment for each individual that achieves the lowest possible total imbalance score." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.704, + 0.375, + 0.844 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver1.0
BestOfN1.0
Refine1.0
FunSearch1.0
AIDE0.7777777777777778
ReEvo0.7777777777777778
MCTS1.0
EoH1.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.148, + 0.855, + 0.416, + 0.87 + ], + "angle": 0, + "content": "Table 15: Equitable partitioning problem" + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.069, + 0.726, + 0.084 + ], + "angle": 0, + "content": "Euclidean Steiner problem" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.087, + 0.914, + 0.28 + ], + "angle": 0, + "content": "Given a set of 2D points (terminals), the goal of the Euclidean Steiner Problem is to compute a tree connecting all terminals with minimum total length. The total length is measured as the sum of Euclidean distances (where the Euclidean distance between two points \\((x1, y1)\\) and \\((x2, y2)\\) is \\(sqrt((x1 - x2)^2 + (y1 - y2)^2)\\)). Unlike a Minimum Spanning Tree (MST) computed solely on the given terminals, a Steiner tree may introduce extra points, called Steiner points, to reduce the overall length. In this formulation, it is assumed that the final candidate tree's total length is given by the MST computed on the union of the original terminals and the reported Steiner points. A lower ratio (candidate_tree_length/MST ORIGINAL_length) indicates a better solution." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.293, + 0.818, + 0.433 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9779703480188361
BestOfN0.6291391910535526
Refine0.688025642110573
FunSearch0.6968176110449371
AIDE0.04483890014026932
ReEvo0.5469067768233761
MCTS0.43093954323065975
EoH0.5917817000598826
" + }, + { + "type": "table_caption", + "bbox": [ + 0.596, + 0.444, + 0.836, + 0.458 + ], + "angle": 0, + "content": "Table 16: Euclidean Steiner problem" + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.49, + 0.686, + 0.506 + ], + "angle": 0, + "content": "Flow shop scheduling" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.509, + 0.912, + 0.648 + ], + "angle": 0, + "content": "Given \\( n \\) jobs and \\( m \\) machines, the goal of the flow shop scheduling problem is to determine the optimal job sequence that minimizes the makespan, i.e., the total time required to complete all jobs on all machines. Each job follows the same machine order, and the processing times are specified in an \\( n \\)imes \\( m \\) matrix. The output is a permutation of job indices representing the processing order. If the constraints are not satisfied (e.g., invalid job sequencing), the solution receives no score. The objective is to optimize the makespan using the classical flow shop recurrence." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.661, + 0.809, + 0.802 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9222700445897257
BestOfN0.874217493803887
Refine0.8463439348165006
FunSearch0.8537338049420798
AIDE0.9144895115672386
ReEvo0.8424667927400846
MCTS0.9242143967817102
EoH0.940154419652199
" + }, + { + "type": "table_caption", + "bbox": [ + 0.612, + 0.813, + 0.82, + 0.827 + ], + "angle": 0, + "content": "Table 17: Flow shop scheduling" + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.856, + 0.77, + 0.872 + ], + "angle": 0, + "content": "Generalised assignment problem" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.875, + 0.789, + 0.889 + ], + "angle": 0, + "content": "Generalized Assignment Problem (GAP)" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.069, + 0.482, + 0.264 + ], + "angle": 0, + "content": "The Generalized Assignment Problem (GAP) involves assigning \\( n \\) jobs to \\( m \\) agents such that each job is assigned to exactly one agent, and the resource consumption for each agent does not exceed its capacity. The objective is to optimize the total cost based on the problem type. When formulated as a maximization problem, the goal is to maximize the total cost; when formulated as a minimization problem, the goal is to minimize the total cost. Given a cost matrix (representing the cost of assigning jobs to agents), a consumption matrix (indicating the resource usage per assignment), and capacities (the resource limits for each agent), the task is to find a valid assignment that meets the capacity constraints while optimizing the total cost as specified by the problem indicator." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.273, + 0.376, + 0.415 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver1.000509368510793
BestOfN1.000152715871272
Refine0.9997973477884884
FunSearch0.9360910283983461
AIDE1.000152715871272
ReEvo1.0002083856508814
MCTS1.0001026538510593
EoH0.9793902133221158
" + }, + { + "type": "table_caption", + "bbox": [ + 0.14, + 0.424, + 0.424, + 0.44 + ], + "angle": 0, + "content": "Table 18: Generalised assignment problem" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.467, + 0.218, + 0.484 + ], + "angle": 0, + "content": "Graph colouring" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.485, + 0.481, + 0.598 + ], + "angle": 0, + "content": "Given a graph in DIMACS format with vertices, edges, and an adjacency list, the goal is to assign a positive integer color (1..n) to each vertex while ensuring that no two adjacent vertices share the same color. The objective is to minimize the number of distinct colors used. If any two adjacent vertices have the same color, the solution is invalid and receives no score. Otherwise, the score is equal to the number of distinct colors used, with a lower score being better." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.609, + 0.376, + 0.751 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.8679121232535366
BestOfN0.7992347794550977
Refine0.9237393162393163
FunSearch0.8993461774953884
AIDE0.7992347794550977
ReEvo0.8119485901255648
MCTS0.8529682767415909
EoH0.804175457505431
" + }, + { + "type": "table_caption", + "bbox": [ + 0.192, + 0.76, + 0.371, + 0.777 + ], + "angle": 0, + "content": "Table 19: Graph colouring" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.801, + 0.36, + 0.818 + ], + "angle": 0, + "content": "Hybrid Reentrant Shop Scheduling" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.819, + 0.481, + 0.892 + ], + "angle": 0, + "content": "The problem is a Hybrid Reentrant Shop Scheduling problem where each of \\( n \\) jobs must sequentially undergo three operations: an initialization phase on one of \\( m \\) identical primary machines, a setup phase on a single remote server, and a final main processing phase on the same primary machine used" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.069, + 0.916, + 0.224 + ], + "angle": 0, + "content": "for initialization. Jobs are initialized in a fixed natural order using list scheduling, while the setup phase is processed on the remote server in an order specified by a permutation decision variable. Additionally, each job is assigned to a primary machine for main processing via a batch_assignment, and on each machine, jobs are processed in natural (initialization) order. The objective is to minimize the makespan, defined as the time when the last job completes its main processing, while ensuring that no machine (primary or server) processes more than one job simultaneously and that all operational precedence constraints are satisfied." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.235, + 0.81, + 0.378 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9057971372430776
BestOfN0.9872450518587456
Refine0.9966666343001128
FunSearch1.0001780484032463
AIDE0.7457203947696327
ReEvo0.9820554515396009
MCTS0.9961239866411462
EoH0.9841146688046011
" + }, + { + "type": "table_caption", + "bbox": [ + 0.565, + 0.387, + 0.866, + 0.403 + ], + "angle": 0, + "content": "Table 20: Hybrid Reentrant Shop Scheduling" + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.432, + 0.677, + 0.448 + ], + "angle": 0, + "content": "Job shop scheduling" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.451, + 0.916, + 0.606 + ], + "angle": 0, + "content": "The job shop scheduling problem requires assigning nonnegative integer start times to a set of operations, structured into multiple jobs, each composed of sequential operations. Each operation is processed on a specific machine for a given processing time. The optimization goal is to minimize the makespan, defined as the maximum completion time across all jobs. Constraints include (i) sequential processing of operations within each job, meaning each operation cannot start before its preceding operation finishes, and (ii) nonoverlapping scheduling of operations on the same machine. If these constraints are violated, the solution receives no score." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.629, + 0.81, + 0.77 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.8202016779421567
BestOfN0.7060712883377539
Refine0.7696287350855926
FunSearch0.8192815531664928
AIDE0.6498336005961379
ReEvo0.7982807066317813
MCTS0.7293663754433233
EoH0.7770594374788831
" + }, + { + "type": "table_caption", + "bbox": [ + 0.614, + 0.78, + 0.818, + 0.796 + ], + "angle": 0, + "content": "Table 21: Job shop scheduling" + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.827, + 0.557, + 0.841 + ], + "angle": 0, + "content": "MIS" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.846, + 0.915, + 0.891 + ], + "angle": 0, + "content": "The Maximum Independent Set (MIS) problem is a fundamental NP-hard optimization problem in graph theory. Given an undirected graph \\( \\mathrm{G} = (\\mathrm{V},\\mathrm{E}) \\), where \\( \\mathrm{V} \\) is a set of vertices" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.083, + 0.069, + 0.483, + 0.113 + ], + "angle": 0, + "content": "and \\( \\mathbf{E} \\) is a set of edges, the goal is to find the largest subset \\( S \\) in \\( V \\) such that no two vertices in \\( S \\) are adjacent (i.e., connected by an edge)." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.126, + 0.378, + 0.268 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.986
BestOfN0.8461150261004076
Refine0.9078324503859446
FunSearch0.9002038932676987
AIDE0.8425484500134511
ReEvo0.8342509729450779
MCTS0.8433127163177989
EoH0.8763795109859694
" + }, + { + "type": "table_caption", + "bbox": [ + 0.232, + 0.277, + 0.333, + 0.292 + ], + "angle": 0, + "content": "Table 22: MIS" + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.323, + 0.423, + 0.355 + ], + "angle": 0, + "content": "Multi-Demand Multidimensional Knapsack problem" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.359, + 0.483, + 0.56 + ], + "angle": 0, + "content": "The Multi-Demand Multidimensional Knapsack Problem (MDMKP) is a binary optimization problem that extends the classical MKP by incorporating both upper-bound \\((<=)\\) and lower-bound \\((>=)\\) constraints. Formally, given n decision variables \\(x_{j} \\in \\{0,1\\}\\), the goal is to maximize \\(\\sum_{j=1}^{n} c_{j} x_{j}\\) subject to \\(\\sum_{j=1}^{n} a_{ij} x_{j} \\leq b_{i} f o r i = 1, \\ldots, m\\) and \\(\\sum_{j=1}^{n} a_{ij} x_{j} \\geq b_{i} f o r i = m+1, \\ldots, m+q\\). Instances are generated from standard MKP problems by varying the number of \\(>=\\) constraints (with q taking values 1, m/2, or m) and by using two types of cost coefficients (positive and mixed), thereby producing six distinct variants per base instance. This formulation enables rigorous evaluation of algorithms in contexts where both resource limits and demand fulfillment must be simultaneously addressed." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.573, + 0.377, + 0.715 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.8957822313136857
BestOfN0.7144432351611377
Refine0.8913402342031996
FunSearch0.8354799525874899
AIDE0.8805432369541204
ReEvo0.8920786376031828
MCTS0.8994648109682947
EoH0.9082814870567889
" + }, + { + "type": "table_caption", + "bbox": [ + 0.083, + 0.724, + 0.483, + 0.752 + ], + "angle": 0, + "content": "Table 23: Multi-Demand Multidimensional Knapsack problem" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.785, + 0.37, + 0.802 + ], + "angle": 0, + "content": "Multidimensional knapsack problem" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.805, + 0.482, + 0.892 + ], + "angle": 0, + "content": "This problem is a multidimensional knapsack optimization where the objective is to maximize the total profit by selecting decision variables, each associated with a profit and resource consumption across multiple constraints. The decision variables must be chosen such that the sum of resource usage for each constraint does not exceed its corresponding capacity." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.069, + 0.916, + 0.153 + ], + "angle": 0, + "content": "Importantly, if any constraint is violated—that is, if the resource consumption for any constraint exceeds its allowed capacity—the solution is deemed infeasible and earns no score. The challenge lies in identifying the optimal combination of items that yields the highest total profit while strictly satisfying all resource constraints." + }, + { + "type": "table", + "bbox": [ + 0.519, + 0.166, + 0.812, + 0.309 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9903523477639424
BestOfN0.9401685100749627
Refine0.9947726903727786
FunSearch0.9773347714972982
AIDE0.925117898068383
ReEvo1.0018885951740353
MCTS1.0057751617808324
EoH1.0010112897238341
" + }, + { + "type": "table_caption", + "bbox": [ + 0.56, + 0.318, + 0.871, + 0.334 + ], + "angle": 0, + "content": "Table 24: Multidimensional knapsack problem" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.362, + 0.692, + 0.379 + ], + "angle": 0, + "content": "Open shop scheduling" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.382, + 0.916, + 0.577 + ], + "angle": 0, + "content": "The Open Shop Scheduling Problem involves scheduling a set of jobs across a set of machines with the goal of minimizing the total completion time (makespan). Each job consists of several operations, where each operation must be processed on a specific machine for a given duration. Unlike other scheduling problems, the Open Shop variant has no predetermined order for processing the operations of a job—operations can be scheduled in any order, but a job can only be processed on one machine at a time, and a machine can only process one job at a time. This creates a complex combinatorial optimization challenge where the scheduler must determine both the sequence of operations for each job and the timing of each operation to minimize the overall completion time while ensuring no resource conflicts." + }, + { + "type": "table", + "bbox": [ + 0.519, + 0.59, + 0.812, + 0.732 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.7851209868863173
BestOfN0.9017764948703829
Refine0.9930284498507208
FunSearch0.9930284498507208
AIDE0.9156437907474381
ReEvo0.9825099803205837
MCTS0.8960699709846601
EoH0.9930284498507208
" + }, + { + "type": "table_caption", + "bbox": [ + 0.608, + 0.741, + 0.822, + 0.757 + ], + "angle": 0, + "content": "Table 25: Open shop scheduling" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.786, + 0.704, + 0.802 + ], + "angle": 0, + "content": "Packing unequal circles" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.805, + 0.916, + 0.892 + ], + "angle": 0, + "content": "The problem involves packing a subset of unequal circles into a fixed circular container with radius R_0 and center at the origin, where each circle i has a given radius R_i (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the number of circles packed—that is," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.068, + 0.482, + 0.168 + ], + "angle": 0, + "content": "maximize \\(\\sum_{i=1}^{n} \\alpha_{i}\\)—subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed R_0; and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.176, + 0.378, + 0.319 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9075757575757577
BestOfN0.8939393939393939
Refine0.9803030303030303
FunSearch0.9719696969696969
AIDE0.8825757575757576
ReEvo0.8825757575757576
MCTS0.9522727272727273
EoH0.8825757575757576
" + }, + { + "type": "table_caption", + "bbox": [ + 0.169, + 0.327, + 0.395, + 0.344 + ], + "angle": 0, + "content": "Table 26: Packing unequal circles" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.371, + 0.308, + 0.387 + ], + "angle": 0, + "content": "Packing unequal circles area" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.389, + 0.483, + 0.571 + ], + "angle": 0, + "content": "The problem involves packing a subset of unequal circles into a fixed circular container with radius \\( \\mathrm{R\\_0} \\) and center at the origin, where each circle \\( \\mathrm{i} \\) has a given radius \\( \\mathrm{R\\_i} \\) (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the total area of all circles packed—that is, maximize \\( \\sum_{i=1}^{n} \\alpha_i * p_i * R_i^2 \\)—subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed \\( \\mathrm{R\\_0} \\); and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.579, + 0.378, + 0.721 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.8767896840297265
BestOfN0.9923476599194556
Refine1.0226692239919217
FunSearch1.0404725950195108
AIDE0.5972138868724692
ReEvo0.9101821460280035
MCTS0.9617483396206504
EoH1.0056059827170811
" + }, + { + "type": "table_caption", + "bbox": [ + 0.153, + 0.73, + 0.411, + 0.746 + ], + "angle": 0, + "content": "Table 27: Packing unequal circles area" + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.774, + 0.395, + 0.79 + ], + "angle": 0, + "content": "Packing unequal rectangles and squares" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.792, + 0.482, + 0.892 + ], + "angle": 0, + "content": "We are given a set of n unequal rectangles (or squares), each with specified dimensions, and a fixed circular container of radius R centered at the origin. The problem is to decide which rectangles to pack and where to position them—by choosing binary selection variables and continuous center coordinates—so that every packed rectangle is entirely contained within the circle and no two packed rectangles overlap." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.069, + 0.916, + 0.168 + ], + "angle": 0, + "content": "For each rectangle, the four corners must lie inside the circle, and if an item is not packed it is forced to a dummy position. The objective is to maximize the number of packed items, i.e., maximize \\(\\sum_{i=1}^{n} \\text{alpha}_i\\) (or a related sum when each alpha_i is binary). Note that the rotation of the rectangular (by 90 degrees) is sometimes allowed and your algorithm should take that into account." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.178, + 0.812, + 0.321 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9134625513058007
BestOfN0.8337025039542202
Refine0.932172162950195
FunSearch0.9228828411608733
AIDE0.7950708457573447
ReEvo0.77954425754769
MCTS0.8028450160315149
EoH0.9228828411608733
" + }, + { + "type": "table_caption", + "bbox": [ + 0.55, + 0.329, + 0.88, + 0.345 + ], + "angle": 0, + "content": "Table 28: Packing unequal rectangles and squares" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.376, + 0.866, + 0.393 + ], + "angle": 0, + "content": "Packing unequal rectangles and squares area" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.395, + 0.916, + 0.565 + ], + "angle": 0, + "content": "We consider the problem of selecting and placing a subset of \\( n \\) unequal rectangles (or squares) into a fixed-size circular container of radius \\( R \\) so as to maximize the total area of the packed items. Each item \\( i \\) has given dimensions \\( L_{i} \\) and \\( W_{i} \\) (with \\( L_{i} = W_{i} \\) for squares) and an associated area \\( L_{i}W_{i} \\). The decision variables include a binary indicator \\( \\alpha_{i} \\) for whether item \\( i \\) is packed and continuous variables \\( (x_{i},y_{i}) \\) for the placement of its center, along with a rotation angle \\( heta_{i} \\) when \\( 90^{\\circ} \\) rotations are allowed. The formulation enforces that for every packed item, all four of its rotated corners must lie within the circle, and that no two packed items overlap; if an item is not packed, it is fixed at a dummy position." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.576, + 0.812, + 0.719 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.8893527400499813
BestOfN0.9536806816195774
Refine1.0513451711752306
FunSearch1.0839011538182066
AIDE0.8100272732450019
ReEvo0.9435059488868657
MCTS0.995946490673633
EoH0.9566331174271511
" + }, + { + "type": "table_caption", + "bbox": [ + 0.534, + 0.727, + 0.895, + 0.743 + ], + "angle": 0, + "content": "Table 29: Packing unequal rectangles and squares area" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.772, + 0.791, + 0.789 + ], + "angle": 0, + "content": "Resource constrained shortest path" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.791, + 0.915, + 0.892 + ], + "angle": 0, + "content": "This problem involves finding the shortest path from vertex 1 to vertex \\( n \\) in a directed graph while satisfying resource constraints. Specifically, each vertex and arc has associated resource consumptions, and the cumulative consumption for each resource must fall within the provided lowerBounds and upperBounds. The input includes the number of vertices (n), arcs (m), resource types (K), resource consumption at" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.087, + 0.069, + 0.48, + 0.18 + ], + "angle": 0, + "content": "each vertex, and a graph represented as a mapping from vertices to lists of arcs (each arc being a tuple of end vertex, cost, and arc resource consumptions). The optimization objective is to minimize the total arc cost of the path, with the condition that the path is valid—meaning it starts at vertex 1, ends at vertex \\( n \\), follows defined transitions in the graph, and respects all resource bounds; if any of these constraints are not met, the solution receives no score." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.194, + 0.375, + 0.335 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.7508899529136809
BestOfN0.7508899529136808
Refine0.7284494767232047
FunSearch0.7508899529136808
AIDE0.7508899529136808
ReEvo0.7508899529136808
MCTS0.7284494767232047
EoH0.7508899529136808
" + }, + { + "type": "table_caption", + "bbox": [ + 0.137, + 0.346, + 0.428, + 0.36 + ], + "angle": 0, + "content": "Table 30: Resource constrained shortest path" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.4, + 0.182, + 0.416 + ], + "angle": 0, + "content": "Set covering" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.423, + 0.48, + 0.52 + ], + "angle": 0, + "content": "Set Covering Problem. The goal is to select a subset of columns, each with an associated cost, such that every row is covered by at least one chosen column. For each row, the available covering columns are provided (as 1-indexed numbers). The objective is to minimize the total cost of the selected columns, and if even one row is left uncovered, then no score is awarded." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.535, + 0.375, + 0.676 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.8883906244045974
BestOfN0.8213286754887226
Refine0.9056204467263304
FunSearch0.8887733963981322
AIDE0.8639998129016312
ReEvo0.9360686599803572
MCTS0.8672991644233662
EoH0.8843920544743958
" + }, + { + "type": "table_caption", + "bbox": [ + 0.209, + 0.687, + 0.356, + 0.701 + ], + "angle": 0, + "content": "Table 31: Set covering" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.74, + 0.208, + 0.757 + ], + "angle": 0, + "content": "Set partitioning" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.763, + 0.48, + 0.889 + ], + "angle": 0, + "content": "This problem involves solving a set partitioning instance where the goal is to choose a subset of columns such that each row is covered exactly once while minimizing the total cost. Each column is associated with a cost and covers a specific set of rows. The optimization problem is defined by selecting columns from a given set so that every row is covered precisely once, and the sum of the selected columns' costs is minimized. If the solution fails to cover every row exactly once, then no score is awarded." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.067, + 0.808, + 0.206 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9996401983661346
BestOfN0.8991338255841825
Refine0.7999991398515384
FunSearch0.83333333333333334
AIDE0.9
ReEvo0.8991338255841825
MCTS0.8647769492523454
EoH0.9324671589175159
" + }, + { + "type": "table_caption", + "bbox": [ + 0.633, + 0.218, + 0.799, + 0.232 + ], + "angle": 0, + "content": "Table 32: Set partitioning" + }, + { + "type": "title", + "bbox": [ + 0.52, + 0.257, + 0.555, + 0.271 + ], + "angle": 0, + "content": "TSP" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.275, + 0.914, + 0.401 + ], + "angle": 0, + "content": "The Traveling Salesman Problem (TSP) is a classic combinatorial optimization problem where, given a set of cities with known pairwise distances, the objective is to find the shortest possible tour that visits each city exactly once and returns to the starting city. More formally, given a complete graph \\( \\mathrm{G} = (\\mathrm{V},\\mathrm{E}) \\) with vertices \\( \\mathrm{V} \\) representing cities and edges \\( \\mathrm{E} \\) with weights representing distances, we seek to find a Hamiltonian cycle (a closed path visiting each vertex exactly once) of minimum total weight." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.413, + 0.808, + 0.553 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.986
BestOfN0.8590303340408165
Refine0.9399577646813952
FunSearch0.9016741050908584
AIDE0.7710495444635409
ReEvo0.8488918718349553
MCTS0.5961113158302597
EoH0.7935463156320405
" + }, + { + "type": "table_caption", + "bbox": [ + 0.669, + 0.565, + 0.763, + 0.577 + ], + "angle": 0, + "content": "Table 33: TSP" + }, + { + "type": "title", + "bbox": [ + 0.52, + 0.606, + 0.782, + 0.621 + ], + "angle": 0, + "content": "Uncapacitated warehouse location" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.624, + 0.914, + 0.803 + ], + "angle": 0, + "content": "The Uncapacitated Warehouse Location Problem aims to determine which warehouses to open and how to assign each customer entirely to an open warehouse in order to minimize the total cost. Given a set of potential warehouse locations, each with a fixed opening cost, and a set of customers, each with an associated assignment cost for being served by each warehouse, the objective is to select a subset of warehouses to open and assign every customer completely to one of these open warehouses. The optimization minimizes the sum of fixed warehouse opening costs and the customer assignment costs. Each customer must be assigned to exactly one warehouse; if any customer is left unassigned or assigned to more than one warehouse, the solution is considered infeasible." + }, + { + "type": "title", + "bbox": [ + 0.52, + 0.815, + 0.767, + 0.831 + ], + "angle": 0, + "content": "Unconstrained guillotine cutting" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.834, + 0.914, + 0.889 + ], + "angle": 0, + "content": "The unconstrained guillotine cutting problem involves selecting and placing a subset of available pieces within a fixed stock rectangle to maximize the total value of the placed pieces. Each piece, defined by its length, width, and value," + } + ], + [ + { + "type": "table", + "bbox": [ + 0.086, + 0.066, + 0.375, + 0.207 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9968157833494645
BestOfN0.98931916166557
Refine1.00000000000002045
FunSearch0.9978398298062331
AIDE0.9994999857664043
ReEvo0.998083746641369
MCTS0.9951604598088827
EoH0.87499999999978142
" + }, + { + "type": "table_caption", + "bbox": [ + 0.138, + 0.219, + 0.426, + 0.231 + ], + "angle": 0, + "content": "Table 34: Uncapacitated warehouse location" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.26, + 0.479, + 0.398 + ], + "angle": 0, + "content": "may be optionally rotated \\(90^{\\circ}\\) if allowed and used at most once. The challenge is to determine both the selection and the positioning of these pieces such that they do not overlap and lie entirely within the stock's boundaries. This optimization problem formalizes the decision variables as the x and y coordinates for the bottom-left placement of each piece and, if rotation is allowed, a binary variable indicating its orientation, while the objective function is to maximize the sum of the values of the pieces successfully placed within the stock." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.409, + 0.376, + 0.551 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9725381370960237
BestOfN0.8701275303357732
Refine0.9618177725501762
FunSearch0.9646369625362231
AIDE0.8512970128354943
ReEvo0.9828452190272524
MCTS0.8628525304460628
EoH0.9649480933563296
" + }, + { + "type": "table_caption", + "bbox": [ + 0.145, + 0.562, + 0.42, + 0.575 + ], + "angle": 0, + "content": "Table 35: Unconstrained guillotine cutting" + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.608, + 0.324, + 0.623 + ], + "angle": 0, + "content": "Vehicle routing: period routing" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.626, + 0.479, + 0.653 + ], + "angle": 0, + "content": "The Period Vehicle Routing Problem requires planning delivery routes over a multi-day planning period." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.654, + 0.479, + 0.75 + ], + "angle": 0, + "content": "Each customer (other than the depot, whose id is 0) is provided with a list of candidate service schedules. A schedule is represented by a binary vector of length equal to the period (e.g., [1, 0, 1] for a 3-day period), where a 1 in a given position indicates that the customer must be visited on that day. The decision maker must select exactly one candidate schedule for each customer." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.751, + 0.479, + 0.889 + ], + "angle": 0, + "content": "For every day in the planning period, if a customer's chosen schedule indicates a delivery (i.e., a 1), then exactly one vehicle must visit that customer on that day. Otherwise, the customer should not be visited. The decision maker must also design, for each day, the tours for the vehicles. Each tour is a continuous route that starts at the depot (id 0) and, after visiting a subset of customers, returns to the depot. Each vehicle is only allowed to visit the depot once per day—namely, as its starting and ending point—and it is not allowed to return to the depot in the middle of a tour." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.07, + 0.913, + 0.208 + ], + "angle": 0, + "content": "Moreover, each vehicle route must obey a capacity constraint: the total demand of the customers visited on that tour must not exceed the vehicle capacity each day. Although multiple vehicles are available per day (as specified by the input), not all available vehicles have to be used, but the number of tours in a given day cannot exceed the provided number of vehicles. In addition, the tours on each day must cover exactly those customers who require service per the selected schedules, and no customer may be visited more than once in a given day." + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.209, + 0.913, + 0.263 + ], + "angle": 0, + "content": "The objective is to choose a schedule for every customer and plan the daily tours so as to minimize the overall distance traveled by all vehicles during the entire planning period. Distances are measured using Euclidean distance." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.275, + 0.818, + 0.417 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.12437943290991642
BestOfN0.42032326191804853
Refine0.48371172427664344
FunSearch0.32385035648314586
AIDE0.5362363612554435
ReEvo0.0
MCTS0.0
EoH0.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.583, + 0.428, + 0.849, + 0.441 + ], + "angle": 0, + "content": "Table 36: Vehicle routing: period routing" + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.469, + 0.696, + 0.484 + ], + "angle": 0, + "content": "p-median - capacitated" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.486, + 0.913, + 0.694 + ], + "angle": 0, + "content": "The Capacitated P-Median Problem is a facility location optimization problem where the objective is to select exactly \\( p \\) customers as medians (facility locations) and assign each customer to one of these medians to minimize the total cost, defined as the sum of the Euclidean distances (rounded down to the nearest integer) between customers and their assigned medians. Each median has a capacity constraint \\( Q \\), meaning the total demand of the customers assigned to it cannot exceed \\( Q \\). A feasible solution must respect this capacity constraint for all medians; otherwise, it receives a score of zero. The solution is evaluated by the ratio extscore = rac extbestknown extcomputed_total_cost, where computed_total_cost is the total assignment cost if all constraints are satisfied; otherwise, the score is zero. The output consists of the total cost (if feasible), the selected medians, and the customer assignments." + }, + { + "type": "title", + "bbox": [ + 0.521, + 0.72, + 0.715, + 0.735 + ], + "angle": 0, + "content": "p-median - uncapacitated" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.737, + 0.913, + 0.888 + ], + "angle": 0, + "content": "The uncapacitated p-median problem is a combinatorial optimization problem defined on a given graph \\( \\mathrm{G} = (\\mathrm{V},\\mathrm{E}) \\) with n vertices and m edges. The objective is to select p medians (facility locations) from the set of vertices such that the total assignment cost is minimized. The assignment cost is computed as the sum of the shortest distances from each vertex to its nearest selected median, where distances are given by a precomputed complete cost matrix (obtained via Floyd's algorithm). Formally, given the cost matrix \\( D\\in \\mathbb{R}^{n\\times n} \\), the optimization problem seeks to find a subset \\( S\\subseteq Vwith|S| = p \\) that minimizes the function:" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.086, + 0.067, + 0.375, + 0.207 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.8996179560649475
BestOfN0.9892886172082498
Refine0.9737771618997864
FunSearch0.9748437166838722
AIDE0.7442228395960961
ReEvo0.9786585768154689
MCTS0.9829650705934849
EoH0.9853458094532425
" + }, + { + "type": "table_caption", + "bbox": [ + 0.176, + 0.219, + 0.39, + 0.232 + ], + "angle": 0, + "content": "Table 37: p-median - capacitated" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.259, + 0.254, + 0.274 + ], + "angle": 0, + "content": "\\(\\sum_{v\\in V}\\min_{s\\in S}D(v,s)\\)" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.275, + 0.478, + 0.315 + ], + "angle": 0, + "content": "where \\( \\mathrm{D}(\\mathrm{v},\\mathrm{s}) \\) is the shortest-path distance between vertex v and median s. The solution consists of a list of exactly p distinct vertices representing the chosen medians." + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.328, + 0.375, + 0.469 + ], + "angle": 0, + "content": "
MethodScore
Classical Solver0.9952341868141825
BestOfN0.9453613019698086
Refine0.9982141349797949
FunSearch0.9996783954983718
AIDE0.9847816841274486
ReEvo0.9983315585722753
MCTS0.9605290267584901
EoH0.9921177098573016
" + }, + { + "type": "table_caption", + "bbox": [ + 0.167, + 0.48, + 0.398, + 0.494 + ], + "angle": 0, + "content": "Table 38: p-median - uncapacitated" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_origin.pdf b/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..60bf6e6b270fcdd5ed480f62adb992ddb7a240f5 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/968affa1-c14c-4643-a77b-b08b870e8c9e_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:740880abd67561016c689c91531c7a377f170c41dd8b2c913f67095a05bba4f2 +size 2569331 diff --git a/data/2025/2504_04xxx/2504.04310/full.md b/data/2025/2504_04xxx/2504.04310/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ec030835eeecad7d8ccf34d839e6f26774aea6c0 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/full.md @@ -0,0 +1,649 @@ +# CO-Bench: Benchmarking Language Model Agents in Algorithm Search for Combinatorial Optimization + +Weiwei Sun* Shengyu Feng* Shanda Li Yiming Yang + +Carnegie Mellon University + +{weiweis, shengyuf, shandal, yiming}@cs.cmu.edu + +# Abstract + +Although LLM-based agents have attracted significant attention in domains such as software engineering and machine learning research, their role in advancing combinatorial optimization (CO) remains relatively underexplored. This gap underscores the need for a deeper understanding of their potential in tackling structured, constraint-intensive problems—a pursuit currently limited by the absence of comprehensive benchmarks for systematic investigation. To address this, we introduce CO-Bench, a benchmark suite featuring 36 real-world CO problems drawn from a broad range of domains and complexity levels. CO-Bench includes structured problem formulations and curated data to support rigorous investigation of LLM agents. We evaluate multiple agentic frameworks against established human-designed algorithms, revealing the strengths and limitations of existing LLM agents and identifying promising directions for future research. CO-Bench is publicly available at https://github.com/sunnweiwei/CO-Bench. + +# Introduction + +Combinatorial Optimization (CO) is a foundational problem class in computer science and operation research, focused on finding optimal solutions in discrete, structured, and constraint-rich domains. It underpins a wide range of real-world applications, including logistics (Vogiatzis and Pardalos 2013), production planning (Crama 1997), bioinformatics (Gusfield 1997), etc. Many CO problems are computationally intractable and classified as NP-hard, making exact solutions impractical at scale. As a result, developing effective algorithms often demands significant domain expertise and manual effort—posing a long-standing challenge in both academic research and industrial applications. + +Recent advances in Large Language Models (LLMs) (OpenAI 2024b; DeepSeek-AI 2025a) have positioned LLM-based agents as increasingly promising tools for a variety of prediction and decision-making tasks (Jimenez et al. 2023; Chan et al. 2024; Gottweis et al. 2025). In particular, there is growing interest in applying LLMs to CO problems. Initial investigations have largely focused on solution correctness, evaluated on small-scale test instances (Ramamonjison et al. 2023; Yang et al. 2025a; Xiao et al. 2024a), and are often geared towards solving problems posed by general users. More recent works have begun to explore autonomous LLMs as a new approach. + +![](images/86195873cb96386557e499b2d62386ffe100f9c92ca0b6fd4133962c06d1ff1b.jpg) +Figure 1: Overview of CO-Bench. CO-Bench includes 36 problems from 8 categories, and aims to evaluate LLM agents' ability to develop effective and efficient algorithms for solving real-world combinatorial optimization problems. + +agents capable of conducting research and designing more efficient algorithms for complex scientific and industrial challenges. For example, FunSearch (Romera-Paredes et al. 2023) combines LLM prompting with evolutionary search to discover heuristics that outperform human-designed counterparts in the Cap Set and Bin Packing problems. Subsequent methods (Liu et al. 2024; Ye et al. 2024; Novikov et al. 2025) further improve computational efficiency and broaden applicability to domains such as routing and scheduling. + +Despite these advancements, most existing efforts focus on narrow components (e.g., priority functions) within established algorithms, across a limited set of tasks (typically 4-7 problems), and often rely on heavily handcrafted, problem-specific prompts and templates (Romera-Paredes et al. 2023; Ye et al. 2024). Furthermore, there remains a lack of systematic evaluation of how these agents perform across a broader and more diverse collection of real-world CO problems. + +To address this gap, we introduce CO-Bench, a comprehensive benchmark designed to evaluate LLM agents in the context of efficient CO algorithm development. CO-Bench comprises real-world CO problems spanning a wide range of domains and complexities. Figure 1 illustrates the problem categories and examples, while Table 1 compares CO-Bench with existing CO benchmarks. Compared to prior bench + +marks, CO-Bench offers broader problem coverage, and emphasizes end-to-end evaluation of LLM-based research agents, focusing on their ability to design efficient, potentially novel algorithms from abstract problem descriptions. This design enables reproducible and scalable evaluation of agent performance, including comparisons with human-designed classical CO solver under equivalent time constraints. By doing so, CO-Bench introduces new challenges for LLM agent development, such as the discovery of algorithms that extend beyond current human knowledge of CO. + +Using CO-Bench, we benchmark 15 LLMs and 9 agentic frameworks, comparing their performances against both human-designed classical algorithms and the best-known solutions reported in the literature. Our results show that reasoning models (e.g., o3-mini and Claude-3.7-sonnet) consistently outperform standard no-reasoning LLMs. When integrated into agentic frameworks like FunSearch, LLMs further improve through trial-and-error exploration. Notably, on 25 problems, LLM-generated algorithms outperformed classical solvers, and on 3 problems, they surpassed the best-known solutions. However, our analysis also reveals current limitations, such as limited algorithmic novelty and insufficient handling of feasibility constraints. These findings highlight both the promise and challenges of LLM-driven research in CO and suggest key directions for advancing autonomous algorithm design. + +In summary, this paper makes the following contributions: + +(i) We introduce CO-Bench, the first comprehensive benchmark to evaluate the capability of LLMs to develop algorithms for diverse and challenging real-world CO problems +(ii) We benchmark 15 LLMs and 9 agentic frameworks, analyzing their performance relative to expert-designed pipelines. Our results highlight the strengths of agent-generated algorithms, while also revealing limitations in planning, feasibility checking, and the generation of efficient solution. + +# Preliminary + +# Combinatorial Optimization + +For each CO problem $c$ (for example, Traveling salesman problem), we follow Papadimitriou and Steiglitz (1982) to formulate it as a constrained optimization problem in the discrete space. Consider an instance $p$ , the optimization problem could be expressed as + +$$ +\min _ {x \in S _ {c} (p)} f _ {c} (x; p) + g _ {c} (x; p), \tag {1} +$$ + +where $S_{c}(p)$ represents the solution space, e.g., $\mathbf{Z}^{m} \times \mathbb{R}^{n}$ for $d$ discrete variables and $n$ continuous variables, $f_{c}(x;p)$ corresponds to the objective function, and $g_{c}(x;p)$ stands for the constraint violation, which is 0 for feasible solutions and $+\infty$ otherwise. To avoid the clutter, we simply denote $h_c(x;p) = f_c(x;p) + g_c(x;p)$ in the following text and omit $c$ if the context is clear. + +Given an algorithm set $\mathcal{A}$ and a problem instance distribution $D$ , the algorithm search problem is defined as + +$$ +\min _ {A \in \mathcal {A}} \mathbb {E} _ {p \sim D, x \sim A (p)} [ h (x; p) ]. \tag {2} +$$ + +
DatasetAlgorithm DevProblem NumInstance NumLargest Variables
NPHardEvalX990024
NL4OPTX52893
OptiBenchX460518
ComplexORX201009
ReEvo75971,000
CO-Bench366,48211,000
+ +Table 1: Data statistics for CO-Bench and related CO benchmarks, including the indicator for algorithm development support, the number of problem types, the number of test-set problem instances, and the largest number of test-set variables (e.g., the number of nodes in the largest graph). + +In contrast to previous neural CO solvers (Bengio, Lodi, and Prouvost 2020) that directly parameterize $A$ with a neural network, we focus on symbolic searching space where $A$ consists of all algorithms that could be represented by a Python Program, with a maximum number of $d$ tokens, where $d$ is typically decided by the output length limit of an LLM. Considering the popularity of randomized algorithms (Motwani and Raghavan 2013) for CO, we treat the output of an algorithm $A(p)$ as a distribution of solutions, while deterministic algorithms would correspond to the point distributions. + +The main endeavor of this work is focused on the shaping of the algorithm set $\mathcal{A}$ , the curation of the data distribution $D$ and the definition of $h$ on our collected CO problems. + +# LLM Agents + +Given a CO problem $c$ , a candidate algorithm could be generated by an LLM as + +$$ +A \sim M (\text {t e x t i f y} (c); \theta), \tag {3} +$$ + +where $M$ denotes an LLM with parameters $\theta$ . However, one-time generation usually leads to infeasible code or suboptimal algorithms (Madaan et al. 2023), and agentic frameworks address this by enabling iterative refinement through structured interactions with external tools (e.g., a coding environment). Formally, an agent performs reasoning-action iterations (Yao et al. 2022): + +$$ +r _ {t + 1} \sim M \left(\operatorname {t e x t i f y} _ {r} \left(c, A _ {t}, H _ {t}\right); \theta\right), \tag {4} +$$ + +$$ +a _ {t + 1} \sim M \left(\text {t e x t i f y} _ {a} \left(r _ {t + 1}, H _ {t}\right); \theta\right), \tag {5} +$$ + +where $r_t$ is the reasoning step, $a_t$ is the action step (e.g., executing code, evaluating results), and $H_t = (r_i, a_i, \text{result}(a_i))_{i=1}^{t-1}$ maintains the interaction history. Thus, an LLM agent is formally defined as an LLM $M(\cdot; \theta)$ guided by a structured workflow specifying iterative external interactions to enhance its outputs. + +# CO-Bench + +We introduce CO-Bench, a comprehensive benchmark designed to evaluate the algorithm development ability of LLM agents on combinatorial optimization (CO) problems. The + +benchmark consists of 36 problems mainly sourced from OR-Library (Beasley 1990), an established archive containing datasets accumulated by researchers across over 30 years of operations research. These problems span a wide range of realistic CO challenges in academia and industrial applications. + +# Data Curation + +Problem Selection We first perform rigorous filtering and cleaning, and select 36 CO problems that cover diverse domains and complexities, including: + +- Packing problems: Bin packing (Falkenauer 1996), Multi-Demand Multidimensional Knapsack problem (Cappanera and Trubian 2001), Multidimensional knapsack problem (Petersen 1967), Container loading (Bischoff and Ratcliff 1995; Ivancic 1988), Container loading with weight restrictions (Ratcliff and Bischoff 1998; Bischoff 2006), Packing unequal circles (López and Beasley 2016), Packing unequal rectangles and squares number / area (López and Beasley 2018). +- Cutting problems: Assortment problem (Beasley 1985a), Constrained / unconstrained guillotine cutting (Christofides and Whitlock 1977; Beasley 1985b), Constrained non-guillotine cutting (Beasley 1985c, 2004). +- Facility location problems: Capacitated / Uncapacitated warehouse location (Beasley 1988, 1993), Capacitated / Uncapacitated p-median problem (Beasley 1985d; Osman and Christofides 1994). +- Scheduling problems: Aircraft landing (Beasley et al. 2000, 2004), Crew scheduling (Beasley and Cao 1996), Common due date scheduling (Biskup and Feldmann 2001), Flow shop scheduling (Taillard 1993), Hybrid Reentrant Shop Scheduling (Chakhlevitch and Glass 2009), Job shop scheduling (Taillard 1993), Open shop scheduling (Taillard 1993). +- Routing problems: Traveling salesman problem (Laporte 1992), Period vehicle routing problem (Christofides and Beasley 1984), Resource constrained shortest path (Beasley and Christofides 1989). +- Assignment problems: Constrained / unconstrained assignment (Osman 1995; and 1990). +- Tree problems: Euclidean Steiner (Beasley 1992), Corporate structuring (Anken and Beasley 2012) +- Graph and set problems: Maximal Independent Set (Erdos and Renyi 1984), Graph colouring (Fleurent and Ferland 1996), Equitable partitioning (Mingers and O'Brien 1995), Set partitioning (Chu and Beasley 1998), Set covering (Beasley and Jornsten 1992). + +Data Annotation For each problem, we manually annotate the following components: (1) Problem description: a formal definition of the optimization problem in natural language, accompanied by a clearly specified solve function as the starter code; (2) Data loading function: a load_data function to load and preprocess raw data from the test files; (3) Evaluation function: an eval_func function that rigorously and robustly evaluates the quality of a solution. Additionally, each problem comprises a development set and a test set, each containing several problem instances. + +Evaluation Framework We develop a rigorous and efficient evaluation framework to assess the performance of + +LLM agents in simulated, time-constrained competition scenarios (Chan et al. 2024). Specifically, LLM agents operate within a sandbox environment with access to a Linux machine. For each problem, agents are provided with a problem description, development datasets, and an API endpoint for submitting their solutions (i.e. codebases) to receive evaluation feedback. An independent evaluation system, which is protected by built-in safeguards, scores the submitted solutions on the development set in parallel. After a limited number of research steps, the agent submits its final solution for evaluation on the test set. During the agent development process, both eval_func and test data are invisible. Figure 2 shows the evaluation pipeline in CO-Bench. + +Designing Classical Solver Baselines To investigate how existing LLM agents perform compared to classical solvers, we establish a classical solver baseline. Specifically, the authors of this paper—who have extensive experience in related areas and are familiar with the problems in CO-Bench—spent approximately 30 minutes per problem testing and selecting the most effective classical solvers (e.g., LKH for TSP, CPLEX for scheduling, Gurobi for MIS) and tuning their hyperparameters on the development set. This process ensures that the classical solver baseline is well-tuned and competitive for each problem in CO-Bench. + +# Evaluation Metrics + +Avg Score The main evaluation metric is similar to the Primal Gap (Berthold 2006), defined as the normalized score of the primal bound $h(x; p)$ against a pre-computed optimal (or best-known) objective value $h_p^*$ : + +$$ +s (x, p) = \frac {\operatorname* {m i n} \left\{\left| h (x , p) \right| , \left| h _ {p} ^ {*} \right| \right\}}{\operatorname* {m a x} \left\{\left| h (x , p) \right| , \left| h _ {p} ^ {*} \right| \right\}}, \tag {6} +$$ + +A higher value indicates better performance and a score of 1 signifies the performance identical to the optimal or best-known solution. Program errors or infeasible solutions lead to a score of 0.0. The score of a solver on a given problem is computed by averaging its scores across all test instances. The overall benchmark score is then obtained by averaging these problem-level scores across all 36 problems. + +Valid Solution We compute the percentage of problems for which the generated code is correct on all test instances. Any raised error—such as constraint violation or timeout—is treated as an invalid signal. If any test instance for a given problem results in an invalid signal, the entire solution for that problem is considered invalid, even if it produces valid results on other test instances. + +Above Classical Given the performance of classical solver, we calculate the portion of problems where the model outperforms the classical solver baseline. + +Survival Rate The survival rate measures that, for each problem, the percentage of test instances where the model's solution is above $99\%$ of the reference score (reported optimal or best-known solution from literature). This serve as a challenge metric as the model can only get credit when it is very close or better than previous-best algorithm. + +![](images/a9a3f71437619cfb208811e67ebfaeb37a592dd218250bc67a376effc29926bd.jpg) +Figure 2: CO-Bench is an evaluation environment for AI agents. Each problem has an associated description and a development dataset. Following the setup in Chan et al. (2024), the agent-generated code implements an algorithm design, which is further graded and compared against the best-known solution and human expert solution. + +# Experimental Setup + +# Benchmarked Methods + +On CO-Bench, we evaluate various LLMs combined with different agentic frameworks, and compare them with existing human-designed CO solvers. + +LLMs We conduct experiments on 5 open-source models and 10 proprietary models. These include instruction-tuned models such as Llama-3.3-70B-Instruct (Meta 2024), Qwen-2.5-Code-32B-Instruct (Hui et al. 2024), DeepSeek-V3 (DeepSeek-AI 2024), and GPT-4o (OpenAI 2024a), as well as frontier reasoning models, including o3-mini (OpenAI 2025), Claude-3.7-Sonnet-Thinking (Anthropic 2025), DeepSeek-R1 (DeepSeek-AI 2025b), Grok-3-Thinking (xAI 2025), QwQ-32B (Qwen 2025), and Gemini 2.5 Pro (DeepMind 2025). + +Agentic frameworks For the aforementioned LLMs, we apply various agentic frameworks to evaluate their performance across different strategies. These range from simple approaches, such as direct generation, to more sophisticated frameworks that augment LLM with additional tools, workflows, and test-time compute: + +- Direct Answer: The simplest approach, where the LLM directly generates a solution to the combinatorial optimization problem without further refinement. +- BestOfN Sampling (Chen et al. 2021): Generate $N$ candidate solutions, evaluate each on a development set, and select the solution with the best performance. +- Chain of Experts (Xiao et al. 2024a): A multi-agent prompting framework where agents of different roles cooperate to debug and deliver one solution. +- Greedy Refinement (Shinn et al. 2023; Madaan et al. 2023): Iteratively prompt the LLM to refine the current best solution based on the evaluation results of the development set, repeating this refinement process for $N$ steps. +- FunSearch (Romera-Paredes et al. 2023): Prompt the LLM to either draft a new solution or refine an existing one, followed by employing an evolutionary algorithm to iteratively select and improve candidate solutions. + +- EoH (Liu et al. 2024): Evolve both thoughts and codes in an evolutionary search framework for generating high-performance heuristics. +- AIDE (Jiang et al. 2025): A representative method for machine learning engineering tasks, which stores existing solutions in a tree structure and selectively prompts the LLM to draft new solutions, debug or improve previously stored solutions. +- ReEvo (Ye et al. 2024): A recent evolutionary algorithm that incorporates short-term and long-term reflection modules, as well as a multi-agentic framework. +- MSTC-AHD (Zheng et al. 2025): A Monte Carlo Tree Search (MCTS)-based agentic pipeline that organizes all LLM-generated heuristics in a tree structure and uses the MCTS algorithm with progressive widening technique to guide the evolution of heuristics. + +# Implementation Details + +For benchmark evaluation, we limit the solving time of each test instance to 10 seconds on a single CPU, such that the exact solving of the problem (achieving the optimal solution) is impossible on most test instances. Test instances that result in a timeout or error receive a score of 0. + +For agent implementation, we use o3-mini-medium as the default base model. Since the original implementations of these agents may use different evaluation setups, we adapt their approaches to our benchmark setting (i.e., end-to-end algorithm search) by adjusting the prompts and tools. For all agents, we set the number of iteration steps to 64. In each step, the agent generates a code block as a candidate algorithm and obtains its evaluation score on the development set. After 64 iterations, the agent produces 64 candidate algorithms, from which the best-performing solution on the development set is selected for final benchmark evaluation. All evaluations are conducted on a single CPU core of a dual AMD EPYC 7313 16-Core processor. + +# Main Results + +Figure 3 presents the results of LLMs and agents on the test set. We highlight the following key findings. + +![](images/e28e2f58b1de465d6a56fd4bf5fc752436137e04e2ed0ea6c5ad22e5eae41853.jpg) +Figure 3: Overall Performance. LLM Agents are all based on o3-mini-medium. Avg Score refers to the average normalized objective scores across all problems. Valid Solution indicates the percentage of test-set problems for which the solutions are feasible. Above Classical represents the percentage of test instances where the model outperforms the classical solver baseline. Survival Rate measures the percentage of test instances where the model's score exceeds $99\%$ of the reference score. + +Direct generation performance is limited. LLMs show significantly lower average scores compared to the classical solver. They often fail to generate valid solutions (i.e., bug-free code that satisfies all constraints within the time limit), rarely outperform the classical solver on individual instances, and often fail to produce optimal solutions. Reasoning-capable models tend to perform better than nonreasoning ones. The best-performing LLM for one-shot generation is Claude-3.7 Sonnet, with an average score of 0.65. + +Agentic systems substantially improve LLM performance. Compared to direct generation, the agentic pipeline achieves considerably higher scores across all metrics. Among the evaluated frameworks, FunSearch attains the highest average score of 0.842, outperforming the classical solver (0.797). It also surpasses the solver on over half the test instances (see "Above Classical" score) and achieves a higher survival rate. These results highlight the effectiveness of LLM-based agents in solving CO problems. + +Agent performance varies widely. Some advanced agentic + +frameworks, such as AIDE, underperform compared to simpler strategies like BestOfN on most metrics, though they show higher valid solution rates—possibly due to their debugging capabilities. This indicates that current planning mechanisms in agents are still underdeveloped and may not reliably outperform random sampling. + +Valid solution rates still lag behind classical solvers. According to the Valid Solution metric, the best-performing agents achieve a success rate of 0.555—lower than that of the classical solver (0.611). This suggests that current agents often struggle with solution feasibility and reliability. + +# Agents Error Analysis + +To investigate why the agents' valid solution scores are low, Figure 4 shows the types of errors among invalid solutions for five agents. We observe that code errors (i.e., bugs that prevent compilation) are the least frequent issue. The dominant error type varies across agents: Greedy Refine and ReEvo exhibit more constraint violations, while FunSearch, AIDE, and + +![](images/761b895f5325873705d7f6de307e584bd678aed22c73aa6d947d3748d99f59e0.jpg) +Figure 4: Agents Error Analysis. Distribution of three types of errors among invalid solutions for five agents. + +![](images/85dd609a586c94b65a492ef0a650c2e384cd798f6f13c90be03fb5f20d969b8d.jpg) +Figure 5: Avg Score vs. the number of iteration steps (in total 64 steps) during the algorithm development. + +BoN encounter more timeout errors. This highlights agents' limitations in satisfying constraints and generating efficient algorithms within time limits. + +# Performance over Iteration Steps + +Figure 5 illustrates the performance of several representative LLM agents across different iteration steps. At each step, the agent generates a new algorithm and receives evaluation results on the development set. We also include the performance of the classical solver baseline for comparison. + +All agents exhibit the ability to improve their performance with more iteration steps. FunSearch consistently achieves the best results, reaching a score of 0.8423 and converging after around 50 steps. Notably, both FunSearch and Refine discover algorithms that outperform the classical solver within approximately 10 steps. However, performance tends to saturate after 30 steps, with further search yielding diminishing returns. Enabling more consistent improvements under longer search budgets presents an interesting future direction. + +Figure 6 shows an example trajectory of algorithm development by Greedy Refinement (o3-mini) on TSP over multiple search steps. In the early stages, the agent enhances code efficiency by adopting vectorized data structures and utilizing a K-D tree. It then increases the number of search iterations and introduces perturbations to escape local optima. Finally, the agent integrates simulated annealing to balance exploration + +![](images/954c0315897a14185b49ad98c139c33d7c283f5cff1989886d08731c96bbf2cf.jpg) +Figure 6: Trajectory of algorithm development for Greedy Refinement on TSP over 64 steps. The curve and highlighted dots indicate the best-ever score and the steps where improvements occurred. The algorithmic ideas behind each improvement step are summarized in corresponding boxes. + +and exploitation and applies adaptive heuristics for different instance sizes. This example demonstrates that LLMs excel in applying established techniques to improve efficiency and implementation quality, but failing at algorithmic novelty. + +# Comparison to Neural Solvers + +Table 2 compares the performance of agents with representative neural solvers on TSP and MIS, two well-studied CO problems. We include DIMES (Qiu, Sun, and Yang 2022), DIFUSCO (Sun and Yang 2023), and T2T (Li et al. 2023) as neural baselines. For the method with multiple variants, we only include their best results on each dataset. We also consider a hybrid method, LEHD + ReEvo (Ye et al. 2024), which combines the neural solver with LLM-designed heuristics. We report both the objective values (the tour length for TSP and set size for MIS) and the solving time. The results show that the agents such as Greedy Refine and FunSearch achieve competitive performance on both problems, often outperforming existing neural solvers under similar time budget and approaching the best results achieved by previous solvers given extended search time. + +# Solution Analysis + +In Figure 7, we plot the percentage of algorithms developed by the Greedy Refinement agent for the 36 CO problems that utilize existing solvers (e.g., code importing ortools, + +
TSP-500TSP-1000TSP-10000ER-SmallER-Large
Len ↓Time ↓Len ↓Time ↓Len ↓Time ↓Size ↑Time ↓Size ↑Time ↓
Gurobi16.5545.6h----41.3850.0m--
DIMES18.841.1m26.362.4m85.754.8m42.0612.0m332.8012.5m
DIFUSCO16.6511.5m23.4548.1m73.896.72h41.1226.6m--
T2T16.6116.0m23.3054.6m--41.3729.7m--
LEHD + ReEvo16.78-23.82-------
Greedy Refine (o3-mini)17.3719.1m24.4019.1m77.652.5m42.3520.1m354.002.5m
FunSearch (o3-mini)17.2019.1m25.3119.1m80.182.5m41.651.9m356.502.1m
+ +Table 2: Objective values and solving time of different solvers on TSP and MIS, with varying data sizes. + +![](images/1d43eb5705a42b8bfe01034dfd0a3d46833c0ca788e8039ac91c760f5fe8f163.jpg) +Figure 7: Percentage of algorithms developed by the Greedy Refinement agent that rely on existing solvers (e.g., code importing ortools, pulp) over 64 iteration steps. We observe an increasing use of existing solvers. + +pulp). The percentages are shown across 64 iteration steps. We observe an increasing trend in the use of existing solvers in the agent's solutions. After 64 iterations, the final usage rate reaches $25\%$ (i.e., solutions for 9 problems use existing solvers). The solvers used throughout all steps and problems are limited to three: ortools, pulp, and scipy. + +This suggests that while existing LLM agents are capable of developing algorithms without relying on existing solvers for most problems, there is a growing tendency to do so over time. Moreover, the solvers used are basic general-purpose tools rather than state-of-the-art solvers specifically designed for each problem (e.g., LKH for TSP), indicating that the agent lacks the necessary knowledge to select the best-performing solver. + +# Related Work + +# Automatic Algorithm Search for CO + +Automating algorithm search for combinatorial optimization (CO) has emerged as a significant research direction in the machine learning community. Traditional machine learning solvers primarily parameterize CO algorithms as trainable neural networks (Bengio, Lodi, and Prouvost 2020; Cappart et al. 2023). Although effective in capturing data distributions, these neural approaches often struggle to generate feasible solutions, necessitating integration with human- + +designed heuristics such as branch-and-bound (Gasse et al. 2019) and tree search (Böther et al. 2022). To address this limitation, Kuang et al. (2024a,b) propose to decompose CO algorithms into symbolic operators and conduct searches in the symbolic space. However, designing these unit symbolic operators demands substantial human expertise, limiting generalizability and comprehensive coverage of all algorithm types. Recent advances in Large Language Models (LLMs) and LLM-based agents have significantly mitigated this challenge by enabling symbolic searching in programming language formats (Romera-Paredes et al. 2023; Ye et al. 2024; Liu et al. 2024). Building on these developments, CO-Bench aims to extend the success of these methods to more real-world CO problems and facilitate further research in this domain. + +# CO Benchmarks for LLMs + +Existing CO benchmarks can be roughly classified into two categories. The first type formulates CO problems as question-answering tasks (Fan et al. 2024; Tang et al. 2025). Although LLMs have the potential to solve CO problems via natural language reasoning, their excessive parameter size makes them inefficient CO solvers in general. Therefore, the second type of benchmarks evaluates the tool-using ability of LLMs, e.g., calling an existing CO solver, to address CO problems (Xiao et al. 2024b; Ahmaditeshnizi, Gao, and Udell 2024; Yang et al. 2025b). However, these benchmarks only evaluate the correctness of the generated algorithm on small-scale CO problems, whose problem parameters could be fully expressed in natural language. In contrast, CO-Bench targets scientific and industrial challenges, emphasizing the evaluation of algorithm efficiency on diverse, large-scale CO instances. This results in a more demanding benchmark, well-suited for assessing powerful reasoning models and agents. + +# Conclusion + +This work introduces CO-Bench, the first benchmark designed to evaluate the ability of LLMs in the search of combinatorial optimization (CO) algorithms. Our systematic evaluation reveals that reasoning-focused LLMs, especially when paired with agentic frameworks, can automatically discover effective algorithms that rival or surpass the classical solvers designed by human experts, with competitive searching time. However, we also identify key limitations of current LLM + +agents such as they struggle to understand the problem constraints. These shortcomings highlight the need for future research to enhance agents' problem comprehension and creative reasoning abilities in CO tasks, enabling more robust and autonomous scientific discovery. + +# References + +Ahmaditeshnizi, A.; Gao, W.; and Udell, M. 2024. OptiMUS: Scalable Optimization Modeling with (MI)LP Solvers and Large Language Models. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 577-596. PMLR. +and, J. E. B. 1990. Linear Programming on Cray Supercomputers. Journal of the Operational Research Society, 41(2): 133-139. +Anken, F.; and Beasley, J. E. 2012. Corporate structure optimisation for multinational companies. Omega-international Journal of Management Science, 40: 230-243. +Anthropic. 2025. Claude Sonnet. https://www.anthropic.com/claude/sonnet. Accessed: 2025-03-24. +Beasley, J. E. 1985a. An algorithm for the two-dimensional assortment problem. European Journal of Operational Research, 19: 253-261. +Beasley, J. E. 1985b. Algorithms for Unconstrained Two-Dimensional Guillotine Cutting. Journal of the Operational Research Society, 36: 297-306. +Beasley, J. E. 1985c. An Exact Two-Dimensional Non-Guillotine Cutting Tree Search Procedure. Oper. Res., 33: 49-64. +Beasley, J. E. 1985d. A note on solving large p-median problems. European Journal of Operational Research, 21: 270-273. +Beasley, J. E. 1988. An algorithm for solving large capacitated warehouse location problems. European Journal of Operational Research, 33: 314-325. +Beasley, J. E. 1990. OR-Library: Distributing Test Problems by Electronic Mail. Journal of the Operational Research Society, 41: 1069-1072. +Beasley, J. E. 1992. A heuristic for Euclidean and rectilinear Steiner problems. European Journal of Operational Research, 58: 284-292. +Beasley, J. E. 1993. Lagrangean heuristics for location problems. European Journal of Operational Research, 65: 383-399. +Beasley, J. E. 2004. A population heuristic for constrained two-dimensional non-guillotine cutting. *Eur. J. Oper. Res.*, 156: 601-627. +Beasley, J. E.; and Cao, B. 1996. A tree search algorithm for the crew scheduling problem. European Journal of Operational Research, 94: 517-526. +Beasley, J. E.; and Christofides, N. 1989. An algorithm for the resource constrained shortest path problem. Networks, 19: 379-394. + +Beasley, J. E.; and Jornsten, K. 1992. Enhancing an algorithm for set covering problems. European Journal of Operational Research, 58: 293-300. +Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2000. Scheduling Aircraft Landings - The Static Case. Transp. Sci., 34: 180-197. +Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2004. Displacement problem and dynamically scheduling aircraft landings. Journal of the Operational Research Society, 55: 54-64. +Bengio, Y.; Lodi, A.; and Prouvost, A. 2020. Machine Learning for Combinatorial Optimization: a Methodological Tour d'Horizon. arXiv:1811.06128. +Berthold, T. 2006. Primal heuristics for mixed integer programs. Ph.D. thesis, Zuse Institute Berlin (ZIB). +Bischoff, E. E. 2006. Three-dimensional packing of items with limited load bearing strength. Eur. J. Oper. Res., 168: 952-966. +Bischoff, E. E.; and Ratcliff, M. S. W. 1995. Issues in the development of approaches to container loading. Omega-international Journal of Management Science, 23: 377-390. +Biskup, D.; and Feldmann, M. 2001. Benchmarks for scheduling on a single machine against restrictive and unrestricted common due dates. Comput. Oper. Res., 28: 787-801. +Böther, M.; Kißig, O.; Taraz, M.; Cohen, S.; Seidel, K.; and Friedrich, T. 2022. What's Wrong with Deep Learning in Tree Search for Combinatorial Optimization. In International Conference on Learning Representations. +Cappanera, P.; and Trubian, M. 2001. A Local-Search-Based Heuristic for the Demand-Constrained Multidimensional Knapsack Problem. INFORMS J. Comput., 17: 82-98. +Cappart, Q.; ChA©telat, D.; Khalil, E. B.; Lodi, A.; Morris, C.; and VeliAkoviA‡, P. 2023. Combinatorial Optimization and Reasoning with Graph Neural Networks. Journal of Machine Learning Research, 24(130): 1-61. +Chakhlevitch, K.; and Glass, C. A. 2009. Scheduling reentrant jobs on parallel machines with a remote server. Comput. Oper. Res., 36: 2580-2589. +Chan, J. S.; Chowdhury, N.; Jaffe, O.; Aung, J.; Sherburn, D.; Mays, E.; Starace, G.; Liu, K.; Maksin, L.; Patwardhan, T. A.; Weng, L.; and Mkadry, A. 2024. MLE-bench: Evaluating Machine Learning Agents on Machine Learning Engineering. ArXiv, abs/2410.07095. +Chen, M.; Tworek, J.; Jun, H.; Yuan, Q.; Ponde, H.; Kaplan, J.; Edwards, H.; Burda, Y.; Joseph, N.; Brockman, G.; Ray, A.; Puri, R.; Krueger, G.; Petrov, M.; Khlaaf, H.; Sastry, G.; Mishkin, P.; Chan, B.; Gray, S.; Ryder, N.; Pavlov, M.; Power, A.; Kaiser, L.; Bavarian, M.; Winter, C.; Tillet, P.; Such, F. P.; Cummings, D. W.; Plappert, M.; Chantzis, F.; Barnes, E.; Herbert-Voss, A.; Guss, W. H.; Nichol, A.; Babuschkin, I.; Balaji, S.; Jain, S.; Carr, A.; Leike, J.; Achiam, J.; Misra, V.; Morikawa, E.; Radford, A.; Knight, M. M.; Brundage, M.; Murati, M.; Mayer, K.; Welinder, P.; McGrew, B.; Amodei, D.; McCandlish, S.; Sutskever, I.; and Zaremba, W. 2021. Evaluating Large Language Models Trained on Code. ArXiv, abs/2107.03374. + +Christofides, N.; and Beasley, J. E. 1984. The period routing problem. Networks, 14: 237-256. +Christofides, N.; and Whitlock, C. 1977. An Algorithm for Two-Dimensional Cutting Problems. Oper. Res., 25: 30-44. +Chu, P. C.; and Beasley, J. E. 1998. Constraint Handling in Genetic Algorithms: The Set Partitioning Problem. Journal of Heuristics, 4: 323-357. +Crama, Y. 1997. Combinatorial optimization models for production scheduling in automated manufacturing systems. European Journal of Operational Research, 99(1): 136-153. +DeepMind, G. 2025. Flash Thinking: Behind the Scenes of Gemini. https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-03-24. +DeepSeek-AI. 2024. DeepSeek-V3 Technical Report. ArXiv, abs/2412.19437. +DeepSeek-AI. 2025a. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv:2501.12948. +DeepSeek-AI. 2025b. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. ArXiv, abs/2501.12948. +Erdos, P. L.; and Rényi, A. 1984. On the evolution of random graphs. Transactions of the American Mathematical Society, 286: 257-257. +Falkenauer, E. 1996. A hybrid grouping genetic algorithm for bin packing. Journal of Heuristics, 2: 5-30. +Fan, L.; Hua, W.; Li, L.; Ling, H.; and Zhang, Y. 2024. NPHardEval: Dynamic Benchmark on Reasoning Ability of Large Language Models via Complexity Classes. In Ku, L.-W.; Martins, A.; and Srikumar, V., eds., Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 4092-4114. Bangkok, Thailand: Association for Computational Linguistics. +Fleurent, C.; and Ferland, J. A. 1996. Genetic and hybrid algorithms for graph coloring. Annals of Operations Research, 63: 437-461. +Gasse, M.; Chételat, D.; Ferroni, N.; Charlin, L.; and Lodi, A. 2019. Exact Combinatorial Optimization with Graph Convolutional Neural Networks. In Advances in Neural Information Processing Systems 32. +Gottweis, J.; Weng, W.-H.; Daryin, A.; Tu, T.; Palepu, A.; Sirkovic, P.; Myaskovsky, A.; Weissenberger, F.; Rong, K.; Tanno, R.; Saab, K.; Popovici, D.; Blum, J.; Zhang, F.; Chou, K.; Hassidim, A.; Gokturk, B.; Vahdat, A.; Kohli, P.; Matias, Y.; Carroll, A.; Kulkarni, K.; Tomaev, N.; Guan, Y.; Dhillon, V.; Vaishnav, E. D.; Lee, B.; Costa, T. R. D.; Penad'es, J. R.; Peltz, G.; Xu, Y.; Pawlosky, A.; Karthikesalingam, A.; and Natarajan, V. 2025. Towards an AI co-scientist. *ArXiv*, abs/2502.18864. +Gusfield, D. 1997. Algorithms on stings, trees, and sequences: Computer science and computational biology. *Acm Sigact News*, 28(4): 41-60. +Hui, B.; Yang, J.; Cui, Z.; Yang, J.; Liu, D.; Zhang, L.; Liu, T.; Zhang, J.; Yu, B.; Dang, K.; Yang, A.; Men, R.; Huang, F.; Quan, S.; Ren, X.; Ren, X.; Zhou, J.; and Lin, J. 2024. Qwen2.5-Coder Technical Report. ArXiv, abs/2409.12186. + +Ivancic, N. J. 1988. An integer programming based heuristic approach to the three-dimensional packing problem. +Jiang, Z.; Schmidt, D.; Srikanth, D.; Xu, D.; Kaplan, I.; Jacenko, D.; and Wu, Y. 2025. AIDE: AI-Driven Exploration in the Space of Code. ArXiv, abs/2502.13138. +Jimenez, C. E.; Yang, J.; Wettig, A.; Yao, S.; Pei, K.; Press, O.; and Narasimhan, K. 2023. SWE-bench: Can Language Models Resolve Real-World GitHub Issues? ArXiv, abs/2310.06770. +Kuang, Y.; Wang, J.; Liu, H.; Zhu, F.; Li, X.; Zeng, J.; HAO, J.; Li, B.; and Wu, F. 2024a. Rethinking Branching on Exact Combinatorial Optimization Solver: The First Deep Symbolic Discovery Framework. In *The Twelfth International Conference on Learning Representations*. +Kuang, Y.; Wang, J.; Zhou, Y.; Li, X.; Zhu, F.; Hao, J.; and Wu, F. 2024b. Towards General Algorithm Discovery for Combinatorial Optimization: Learning Symbolic Branching Policy from Bipartite Graph. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 25623-25641. PMLR. +Laporte, G. 1992. The traveling salesman problem: An overview of exact and approximate algorithms. European Journal of Operational Research, 59(2): 231-247. +Li, Y.; Guo, J.; Wang, R.; and Yan, J. 2023. From Distribution Learning in Training to Gradient Search in Testing for Combinatorial Optimization. In Neural Information Processing Systems. +Liu, F.; Tong, X.; Yuan, M.; Lin, X.; Luo, F.; Wang, Z.; Lu, Z.; and Zhang, Q. 2024. Evolution of Heuristics: Towards Efficient Automatic Algorithm Design Using Large Language Model. In ICML. +López, C. O.; and Beasley, J. E. 2016. A formulation space search heuristic for packing unequal circles in a fixed size circular container. Eur. J. Oper. Res., 251: 64-73. +López, C. O.; and Beasley, J. E. 2018. Packing unequal rectangles and squares in a fixed size circular container using formulation space search. Comput. Oper. Res., 94: 106-117. +Madaan, A.; Tandon, N.; Gupta, P.; Hallinan, S.; Gao, L.; Wegreffe, S.; Alon, U.; Dziri, N.; Prabhumoye, S.; Yang, Y.; Welleck, S.; Majumder, B. P.; Gupta, S.; Yazdanbakhsh, A.; and Clark, P. 2023. Self-Refine: Iterative Refinement with Self-Feedback. ArXiv, abs/2303.17651. +Meta. 2024. The Llama 3 Herd of Models. ArXiv, abs/2407.21783. +Mingers, J. C.; and O'Brien, F. A. 1995. Creating student groups with similar characteristics: A heuristic approach. Omega-international Journal of Management Science, 23: 313-321. +Motwani, R.; and Raghavan, P. 2013. Randomized Algorithms. USA: Cambridge University Press. ISBN 0511814070. +Novikov, A.; V~u, N.; Eisenberger, M.; Dupont, E.; Huang, P.-S.; Wagner, A. Z.; Shirobokov, S.; Kozlovskii, B. M.; Ruiz, F. J. R.; Mehrabian, A.; Kumar, M. P.; See, A.; Chaudhuri, S.; + +Holland, G.; Davies, A.; Nowozin, S.; Kohli, P.; Balog, M.; and Deepmind, G. 2025. AlphaEvolve: A coding agent for scientific and algorithmic discovery. *ArXiv*, abs/2506.13131. +OpenAI. 2024a. GPT-4o System Card. ArXiv, abs/2410.21276. +OpenAI. 2024b. OpenAI o1 System Card. arXiv:2412.16720. +OpenAI. 2025. OpenAI o3-mini System Card. +Osman, I. H. 1995. Heuristics for the generalised assignment problem: simulated annealing and tabu search approaches. Operations-Research-Spektrum, 17: 211-225. +Osman, I. H.; and Christofides, N. 1994. Capacitated clustering problems by hybrid simulated annealing and tabu search. International Transactions in Operational Research, 1: 317-336. +Papadimitriou, C.; and Steiglitz, K. 1982. Combinatorial Optimization: Algorithms and Complexity, volume 32. Courier Corporation. ISBN 0-13-152462-3. +Petersen, C. C. 1967. Computational Experience with Variants of the Balas Algorithm Applied to the Selection of R&D Projects. Management Science, 13: 736-750. +Qiu, R.; Sun, Z.; and Yang, Y. 2022. DIMES: A Differentiable Meta Solver for Combinatorial Optimization Problems. In Oh, A. H.; Agarwal, A.; Belgrave, D.; and Cho, K., eds., Advances in Neural Information Processing Systems. +Qwen. 2025. QwQ-32B: Embracing the Power of Reinforcement Learning. https://qwenlm.github.io/blog/qwq-32b/. Accessed: 2025-03-24. +Ramamonjison, R.; Yu, T. T.; Li, R.; Li, H.; Carenini, G.; Ghaddar, B.; He, S.; Mostajabdaveh, M.; Banitalebi-Dehkordi, A.; Zhou, Z.; and Zhang, Y. 2023. NL4Opt Competition: Formulating Optimization Problems Based on Their Natural Language Descriptions. In Neural Information Processing Systems. +Ratcliff, M. S. W.; and Bischoff, E. E. 1998. Allowing for weight considerations in container loading. Operations-Research-Spektrum, 20: 65-71. +Romera-Paredes, B.; Barekatain, M.; Novikov, A.; Balog, M.; Kumar, M. P.; Dupont, E.; Ruiz, F. J. R.; Ellenberg, J. S.; Wang, P.; Fawzi, O.; Kohli, P.; Fawzi, A.; Grochow, J.; Lodi, A.; Mouret, J.-B.; Ringer, T.; and Yu, T. 2023. Mathematical discoveries from program search with large language models. Nature, 625: 468 - 475. +Shinn, N.; Cassano, F.; Labash, B.; Gopinath, A.; Narasimhan, K.; and Yao, S. 2023. Reflexion: language agents with verbal reinforcement learning. In Neural Information Processing Systems. +Sun, Z.; and Yang, Y. 2023. DIFUSCO: Graph-based Diffusion Solvers for Combinatorial Optimization. ArXiv, abs/2302.08224. +Taillard, E. 1993. Benchmarks for basic scheduling problems. European Journal of Operational Research, 64(2): 278-285. +Tang, J.; Zhang, Q.; Li, Y.; Chen, N.; and Li, J. 2025. GraphArena: Evaluating and Improving Large Language Models on Graph Computation. In International Conference on Learning Representations. + +Vogiatzis, C.; and Pardalos, P. 2013. Combinatorial optimization in transportation and logistics networks, volume 2-5, 673-722. Germany: Springer. ISBN 9781441979964. Publisher Copyright: $\text{©}$ Springer Science+Business Media New York 2013. All rights are reserved. +xAI. 2025. Grok-3 and the Next Phase of xAI. https://x.ai/news/grok-3. Accessed: 2025-03-24. +Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024a. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In International Conference on Learning Representations. +Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024b. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In The Twelfth International Conference on Learning Representations. +Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025a. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations. +Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025b. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations. +Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2022. ReAct: Synergizing Reasoning and Acting in Language Models. ArXiv, abs/2210.03629. +Ye, H.; Wang, J.; Cao, Z.; Berto, F.; Hua, C.; Kim, H.; Park, J.; and Song, G. 2024. ReEvo: Large Language Models as Hyper-Heuristics with Reflective Evolution. In The Thirty-eighth Annual Conference on Neural Information Processing Systems. +Zheng, Z.; Xie, Z.; Wang, Z.; and Hooi, B. 2025. Monte Carlo Tree Search for Comprehensive Exploration in LLM-Based Automatic Heuristic Design. ArXiv, abs/2501.08603. + +# Problem Description and Scores + +# Aircraft landing + +The problem is to schedule landing times for a set of planes across one or more runways such that each landing occurs within its prescribed time window and all pairwise separation requirements are satisfied; specifically, if plane i lands at or before plane j on the same runway, then the gap between their landing times must be at least the specified separation time provided in the input. In a multiple-runway setting, each plane must also be assigned to one runway, and if planes land on different runways, the separation requirement (which may differ) is applied accordingly. Each plane has an earliest, target, and latest landing time, with penalties incurred proportionally for landing before (earliness) or after (lateness) its target time. The objective is to minimize the total penalty cost while ensuring that no constraints are violated—if any constraint is breached, the solution receives no score. + +
MethodScore
Classical Solver0.5985295365478638
BestOfN0.8057479826999232
Refine0.7503157815146175
FunSearch0.9688863336568327
AIDE0.800637046201484
ReEvo0.9134454710810906
MCTS0.801655240273729
EoH0.8019818529389835
+ +# Assignment problem + +The Assignment Problem involves optimally assigning $n$ items to $n$ agents based on a provided $n$ imes $n$ cost matrix, where each entry $extcost\_matrix[i][j]$ denotes the cost of assigning item $i + 1$ to agent $j + 1$ . The goal is to identify a permutation—each item assigned exactly one agent—that minimizes the total assignment cost. Formally, this is an optimization problem to find a permutation $\pi$ of agents such that the total cost $\sum i = 1^n extcost\_matrix[i - 1][\pi(i) - 1]$ is minimized. The solution returned includes both the minimal total cost and the corresponding optimal assignments. + +Table 3: Aircraft landing + +
MethodScore
Classical Solver1
BestOfN1
Refine1
FunSearch1
AIDE1
ReEvo1
MCTS1
EoH1
+ +# Assortment problem + +This optimization problem involves arranging a set of rectangular pieces within available stock rectangles to minimize the overall waste area percentage. Each stock rectangle has a defined area, and each piece—which may be rotated by $90^{\circ}$ —must be fully contained within a stock without overlapping with other pieces. Additionally, each piece type has specific total minimum and maximum placement limits. You have access to an unlimited number of stocks for each type, but you may use at most two stock types. The objective is to achieve the lowest possible waste area percentage, defined as the ratio of unused area to the total stock area. Solutions must ensure efficient resource utilization while satisfying all geometric and quantity constraints. Any violation of these constraints results in no score. + +Table 4: Assignment problem + +
MethodScore
Classical Solver0.3222852468406736
BestOfN0.36161788534475603
Refine0.10475936163370339
FunSearch0.3622886282031154
AIDE0.1698107561339298
ReEvo0.24290833308629933
MCTS0.1757439194813797
EoH0.2519474328966603
+ +# Bin packing - one-dimensional + +The **one-dimensional bin packing problem** seeks to minimize the number of bins required to pack a given set of items while ensuring that the sum of item sizes within each bin does not exceed the specified bin capacity. Given a test case with an identifier ('id'), a fixed 'bin_capacity', and a list of 'num_items' with their respective sizes ('items'), the objective is to find a packing arrangement that uses the least number of bins. The solution is evaluated based on the total 'num_bins' used, with invalid solutions (e.g., missing or duplicated items, or bins exceeding capacity) incurring a inf heavy penalty. The output must include the number of bins used and a valid assignment of item indices to bins. + +Table 5: Assortment problem + +
MethodScore
Classical Solver0.9628049317089281
BestOfN0.8933315064694979
Refine0.9870315022407082
FunSearch0.9557154223933677
AIDE0.8366913237780297
ReEvo0.9492158360156572
MCTS0.9396436307329097
EoH0.9693475618912389
+ +Table 6: Bin packing - one-dimensional + +# Capacitated warehouse location + +The Capacitated Warehouse Location Problem with Splittable Demand aims to determine which warehouses to open and how to allocate portions of customer demands among these warehouses in order to minimize total costs. Given a set of potential warehouse locations, each with a fixed opening cost and capacity limit, and a set of customers with individual demands and associated per-unit assignment costs to each warehouse, the objective is to decide which warehouses to open and how to distribute each customer's demand among these open warehouses. The allocation must satisfy the constraint that the sum of portions assigned to each customer equals their total demand, and that the total demand allocated to any warehouse does not exceed its capacity. The optimization seeks to minimize the sum of fixed warehouse opening costs and the total per-unit assignment costs. However, if any solution violates these constraints (i.e., a customer's demand is not fully satisfied or a warehouse's capacity is exceeded), then no score is provided. + +
MethodScore
Classical Solver0.6976400141361688
BestOfN0.0
Refine0.7518838886310322
FunSearch0.7196713948459038
AIDE0.6647355906610447
ReEvo0.6715266955394039
MCTS0.6891495773105485
EoH0.7502493181324346
+ +# Common due date scheduling + +Given floor, where $h$ is a predefined fraction (defaulting to 0.6). The goal is to determine an optimal job sequence that minimizes the penalty, calculated as follows: for each job, if its completion time $C$ is earlier than $d$ , an earliness penalty of $aimes(d - C)$ is incurred; if $C$ exceeds $d$ , a tardiness penalty of $bimes(C - d)$ is applied; otherwise, no penalty is incurred. The problem requires finding a permutation of job indices (1-based) that minimizes the total penalty. The evaluation metric sums these penalties for a given schedule. + +Table 7: Capacitated warehouse location + +
MethodScore
Classical Solver0.9187662046144239
BestOfN0.97731110557282
Refine0.9776844987221935
FunSearch0.976604327923604
AIDE0.6291657473867996
ReEvo0.9743199070415761
MCTS0.8838457578182489
EoH0.9773286503168127
+ +# Constrained guillotine cutting + +The problem involves optimizing the guillotine feasible placement of a set of rectangular pieces on a given stock sheet to maximize total value. Each piece type is characterized by its length, width, an upper bound on the number of times it may appear in the final cutting pattern, and an assigned value. Orientation of the pieces is fixed (the edges of the pieces are parallel to the edges of the sheet). The task is to select and place pieces such that each lies completely within the boundaries of the stock sheet, no two pieces overlap, and the number of pieces of any type does not exceed its specified maximum. A set of placements is considered guillotine feasible if there exists at least one straight cut (vertical or horizontal) that does not slice through any rectangle, and the property holds recursively on the resulting subregions. Empty regions or regions exactly matching a placed piece are considered valid. The objective is to maximize the sum of the values of the placed pieces; however, if any spatial or count constraint is violated, the solution is deemed invalid. The output is defined as a dictionary reporting the total value and a list of placements, with each placement specified by the piece type index, x and y coordinates, placed dimensions, and orientation flag. + +Table 8: Common due date scheduling + +
MethodScore
Classical Solver0.7844900098230463
BestOfN0.0
Refine0.981513704843915
FunSearch0.956424099109148
AIDE0.9102922923098641
ReEvo0.0
MCTS0.0
EoH0.0
+ +Table 9: Constrained guillotine cutting + +# Constrained non-guillotine cutting + +The constrained non-guillotine cutting problem involves optimally arranging rectangular pieces onto a single rectangular stock with fixed dimensions (stock_length and stock_width). Each piece type has defined length, width, value, and minimum and maximum usage constraints. The optimization goal is to maximize the total value of all placed pieces, subject to constraints that each piece is entirely within stock boundaries, pieces do not overlap, each piece type's usage falls within its specified [min, max] range, and pieces may optionally be rotated by $90^{\circ}$ . The solution returns a set of placements indicating piece type, bottom-left coordinates $(\mathrm{x},\mathrm{y})$ , and rotation status. If any constraint is violated, the solution receives no score. + +# Container loading + +Solves a container loading problem: Given a 3D container of specified dimensions and multiple box types—each defined by dimensions, orientation constraints, and available quantity—the goal is to optimally place these boxes within + +
MethodScore
Classical Solver0.5585076432266227
BestOfN0.8760613343780126
Refine0.99138085452391
FunSearch0.9623447685846964
AIDE0.8555320134962818
ReEvo0.9264764236682984
MCTS0.7944732650186651
EoH0.9106930512513293
+ +the container to maximize the volume utilization ratio. Each box placement must respect orientation constraints (vertical alignment flags), fit entirely within container boundaries, and avoid overlaps. The solution returns precise coordinates and orientations for each box placement, quantified by a volume utilization score calculated as the total volume of placed boxes divided by the container volume. Invalid placements result in a score of 0.0. + +Table 10: Constrained non-guillotine cutting + +
MethodScore
Classical Solver0.09700224776623062
BestOfN0.8163545342051534
Refine0.18895711345505883
FunSearch0.23070987019597894
AIDE0.7592850816892841
ReEvo0.716081346719743
MCTS0.5451472798828618
EoH0.7795824394970114
+ +# Container loading with weight restrictions + +The Container Loading with Weight Restrictions problem aims to maximize the utilization of a container's volume by selecting and strategically placing boxes inside it. Given a container with specified dimensions (length, width, height) and multiple types of boxes, each characterized by their dimensions, quantities, weights, and load-bearing constraints, the optimization goal is to determine the placement and orientation of these boxes (with each box allowed three possible orientations) that maximizes the ratio of total occupied box volume to container volume. The solution must strictly adhere to spatial constraints (boxes must fit entirely within the container without overlapping), load-bearing constraints (boxes must support the weight of boxes stacked above them according to given limits), and orientation restrictions. The optimization quality is evaluated by the achieved utilization metric, defined as the total volume of successfully placed boxes divided by the container volume; if any constraint is violated, the utilization score is zero. + +Table 11: Container loading + +
MethodScore
Classical Solver0.009225308452359507
BestOfN0.13669723873453465
Refine0.07941319051933145
FunSearch0.2919729304847129
AIDE0.12860429344072807
ReEvo0.1420943670465572
MCTS0.04806324649022297
EoH0.051972410039456414
+ +# Corporate structuring + +Given N countries, each defined by: $\bullet$ a tax code (1: Exemption, 2: Deduction, 3: Source-by-source Pooling, 4: Worldwide Pooling), $\bullet$ a foreign income tax rate, $\bullet$ a domestic income tax rate, and $\bullet$ a profit, and a withholding tax matrix W (where W[i][j] is the rate on dividends from country i to j), construct a valid tree-structured corporate hierarchy (directed, acyclic, connected) rooted at a designated target (whose parent is 0) such that every country with profit $>0$ appears exactly once. + +For each country i, define S as the set of nodes in its subtree (note the subtree includes itself) with a positive profit. Also consider the set of child nodes C_i. If i is not a root country but in the tree, it will send all its income (after tax) to its parent j. Denote this amount as F[i][j]. Assume the total income after domestic tax and withholding tax for country i is: domestic_iincome_i * (1 - domestic_rate_i) + (\sum_{k \in C_i} F[k][i] * (1 - W[k][i])) The extra foreign tax under different tax code is defined as follows: 1. No extra tax. 2. Foreign income tax from the child nodes: foreign_iincome_rate_i * (\sum_{k \in C_i} F[k][i] * (1 - W[k][i])) 3. Foreign income tax computed from the source nodes in each child's subtree: $\sum_{k \in C_i} \max(0, F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\sum_{s \in S_k} domestic_iincome_s))$ 4. Foreign income tax from all source nodes in the subtree, excluding itself: $\max(0, \sum_{k \in C_i} F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\sum_{s \in S_i} domestic_iincome_s) - domestic_iincome_i)$ + +Table 12: Container loading with weight restrictions + +
MethodScore
Classical Solver0.9450572839481785
BestOfN0.9450572839481785
Refine0.9726337326585759
FunSearch0.777775452943618
AIDE0.9450572839481785
ReEvo0.5014939649568603
MCTS0.9844897288603699
EoH0.9431107030735252
+ +Table 13: Corporate structuring + +# Crew scheduling + +The Crew Scheduling Problem involves assigning each task—with defined start and finish times—to exactly one crew, aiming to minimize the total transition costs between consecutive tasks. Each crew's schedule must satisfy three constraints: tasks within a crew must not overlap; valid transitions (with associated costs) must exist between every consecutive pair of tasks; and the crew's total duty time (from the start of the first task to the finish of the last) cannot exceed a specified time limit. Additionally, no more than $\mathrm{K}$ crews can be used to cover all tasks. Solutions violating any of these constraints are considered infeasible and receive no score. The optimization objective is therefore to determine assignments of tasks to no more than $\mathrm{K}$ crews that minimize the sum of transition costs while strictly adhering to all constraints, yielding a feasible and cost-effective scheduling solution. + +
MethodScore
Classical Solver0.45498811952880935
BestOfN0.4483461488661745
Refine0.6690343590115082
FunSearch0.5536756258756895
AIDE0.44095505708697136
ReEvo0.45225267224663634
MCTS0.4446817469828879
EoH0.5864457661923881
+ +# Equitable partitioning problem + +The task is to partition a set of individuals—each characterized by multiple binary attributes—into exactly 8 groups such that the distribution of attribute values is as balanced as possible across these groups. For each attribute, count the number of individuals with a '1' in each group. The optimization objective is to minimize the total imbalance, which is defined as follows: for each attribute, calculate the absolute differences between the count in each group and the mean count across all groups, take the average of these differences, and then sum these averages over all attributes. The goal is to determine a group assignment for each individual that achieves the lowest possible total imbalance score. + +Table 14: Crew scheduling + +
MethodScore
Classical Solver1.0
BestOfN1.0
Refine1.0
FunSearch1.0
AIDE0.7777777777777778
ReEvo0.7777777777777778
MCTS1.0
EoH1.0
+ +# Euclidean Steiner problem + +Given a set of 2D points (terminals), the goal of the Euclidean Steiner Problem is to compute a tree connecting all terminals with minimum total length. The total length is measured as the sum of Euclidean distances (where the Euclidean distance between two points $(x1, y1)$ and $(x2, y2)$ is $sqrt((x1 - x2)^2 + (y1 - y2)^2)$ ). Unlike a Minimum Spanning Tree (MST) computed solely on the given terminals, a Steiner tree may introduce extra points, called Steiner points, to reduce the overall length. In this formulation, it is assumed that the final candidate tree's total length is given by the MST computed on the union of the original terminals and the reported Steiner points. A lower ratio (candidate_tree_length/MST ORIGINAL_length) indicates a better solution. + +Table 15: Equitable partitioning problem + +
MethodScore
Classical Solver0.9779703480188361
BestOfN0.6291391910535526
Refine0.688025642110573
FunSearch0.6968176110449371
AIDE0.04483890014026932
ReEvo0.5469067768233761
MCTS0.43093954323065975
EoH0.5917817000598826
+ +# Flow shop scheduling + +Given $n$ jobs and $m$ machines, the goal of the flow shop scheduling problem is to determine the optimal job sequence that minimizes the makespan, i.e., the total time required to complete all jobs on all machines. Each job follows the same machine order, and the processing times are specified in an $n$ imes $m$ matrix. The output is a permutation of job indices representing the processing order. If the constraints are not satisfied (e.g., invalid job sequencing), the solution receives no score. The objective is to optimize the makespan using the classical flow shop recurrence. + +Table 16: Euclidean Steiner problem + +
MethodScore
Classical Solver0.9222700445897257
BestOfN0.874217493803887
Refine0.8463439348165006
FunSearch0.8537338049420798
AIDE0.9144895115672386
ReEvo0.8424667927400846
MCTS0.9242143967817102
EoH0.940154419652199
+ +Table 17: Flow shop scheduling + +# Generalised assignment problem + +Generalized Assignment Problem (GAP) + +The Generalized Assignment Problem (GAP) involves assigning $n$ jobs to $m$ agents such that each job is assigned to exactly one agent, and the resource consumption for each agent does not exceed its capacity. The objective is to optimize the total cost based on the problem type. When formulated as a maximization problem, the goal is to maximize the total cost; when formulated as a minimization problem, the goal is to minimize the total cost. Given a cost matrix (representing the cost of assigning jobs to agents), a consumption matrix (indicating the resource usage per assignment), and capacities (the resource limits for each agent), the task is to find a valid assignment that meets the capacity constraints while optimizing the total cost as specified by the problem indicator. + +
MethodScore
Classical Solver1.000509368510793
BestOfN1.000152715871272
Refine0.9997973477884884
FunSearch0.9360910283983461
AIDE1.000152715871272
ReEvo1.0002083856508814
MCTS1.0001026538510593
EoH0.9793902133221158
+ +# Graph colouring + +Given a graph in DIMACS format with vertices, edges, and an adjacency list, the goal is to assign a positive integer color (1..n) to each vertex while ensuring that no two adjacent vertices share the same color. The objective is to minimize the number of distinct colors used. If any two adjacent vertices have the same color, the solution is invalid and receives no score. Otherwise, the score is equal to the number of distinct colors used, with a lower score being better. + +Table 18: Generalised assignment problem + +
MethodScore
Classical Solver0.8679121232535366
BestOfN0.7992347794550977
Refine0.9237393162393163
FunSearch0.8993461774953884
AIDE0.7992347794550977
ReEvo0.8119485901255648
MCTS0.8529682767415909
EoH0.804175457505431
+ +# Hybrid Reentrant Shop Scheduling + +The problem is a Hybrid Reentrant Shop Scheduling problem where each of $n$ jobs must sequentially undergo three operations: an initialization phase on one of $m$ identical primary machines, a setup phase on a single remote server, and a final main processing phase on the same primary machine used + +for initialization. Jobs are initialized in a fixed natural order using list scheduling, while the setup phase is processed on the remote server in an order specified by a permutation decision variable. Additionally, each job is assigned to a primary machine for main processing via a batch_assignment, and on each machine, jobs are processed in natural (initialization) order. The objective is to minimize the makespan, defined as the time when the last job completes its main processing, while ensuring that no machine (primary or server) processes more than one job simultaneously and that all operational precedence constraints are satisfied. + +Table 19: Graph colouring + +
MethodScore
Classical Solver0.9057971372430776
BestOfN0.9872450518587456
Refine0.9966666343001128
FunSearch1.0001780484032463
AIDE0.7457203947696327
ReEvo0.9820554515396009
MCTS0.9961239866411462
EoH0.9841146688046011
+ +# Job shop scheduling + +The job shop scheduling problem requires assigning nonnegative integer start times to a set of operations, structured into multiple jobs, each composed of sequential operations. Each operation is processed on a specific machine for a given processing time. The optimization goal is to minimize the makespan, defined as the maximum completion time across all jobs. Constraints include (i) sequential processing of operations within each job, meaning each operation cannot start before its preceding operation finishes, and (ii) nonoverlapping scheduling of operations on the same machine. If these constraints are violated, the solution receives no score. + +Table 20: Hybrid Reentrant Shop Scheduling + +
MethodScore
Classical Solver0.8202016779421567
BestOfN0.7060712883377539
Refine0.7696287350855926
FunSearch0.8192815531664928
AIDE0.6498336005961379
ReEvo0.7982807066317813
MCTS0.7293663754433233
EoH0.7770594374788831
+ +Table 21: Job shop scheduling + +# MIS + +The Maximum Independent Set (MIS) problem is a fundamental NP-hard optimization problem in graph theory. Given an undirected graph $\mathrm{G} = (\mathrm{V},\mathrm{E})$ , where $\mathrm{V}$ is a set of vertices + +and $\mathbf{E}$ is a set of edges, the goal is to find the largest subset $S$ in $V$ such that no two vertices in $S$ are adjacent (i.e., connected by an edge). + +
MethodScore
Classical Solver0.986
BestOfN0.8461150261004076
Refine0.9078324503859446
FunSearch0.9002038932676987
AIDE0.8425484500134511
ReEvo0.8342509729450779
MCTS0.8433127163177989
EoH0.8763795109859694
+ +# Multi-Demand Multidimensional Knapsack problem + +The Multi-Demand Multidimensional Knapsack Problem (MDMKP) is a binary optimization problem that extends the classical MKP by incorporating both upper-bound $(<=)$ and lower-bound $(>=)$ constraints. Formally, given n decision variables $x_{j} \in \{0,1\}$ , the goal is to maximize $\sum_{j=1}^{n} c_{j} x_{j}$ subject to $\sum_{j=1}^{n} a_{ij} x_{j} \leq b_{i} f o r i = 1, \ldots, m$ and $\sum_{j=1}^{n} a_{ij} x_{j} \geq b_{i} f o r i = m+1, \ldots, m+q$ . Instances are generated from standard MKP problems by varying the number of $>=$ constraints (with q taking values 1, m/2, or m) and by using two types of cost coefficients (positive and mixed), thereby producing six distinct variants per base instance. This formulation enables rigorous evaluation of algorithms in contexts where both resource limits and demand fulfillment must be simultaneously addressed. + +Table 22: MIS + +
MethodScore
Classical Solver0.8957822313136857
BestOfN0.7144432351611377
Refine0.8913402342031996
FunSearch0.8354799525874899
AIDE0.8805432369541204
ReEvo0.8920786376031828
MCTS0.8994648109682947
EoH0.9082814870567889
+ +# Multidimensional knapsack problem + +This problem is a multidimensional knapsack optimization where the objective is to maximize the total profit by selecting decision variables, each associated with a profit and resource consumption across multiple constraints. The decision variables must be chosen such that the sum of resource usage for each constraint does not exceed its corresponding capacity. + +Importantly, if any constraint is violated—that is, if the resource consumption for any constraint exceeds its allowed capacity—the solution is deemed infeasible and earns no score. The challenge lies in identifying the optimal combination of items that yields the highest total profit while strictly satisfying all resource constraints. + +Table 23: Multi-Demand Multidimensional Knapsack problem + +
MethodScore
Classical Solver0.9903523477639424
BestOfN0.9401685100749627
Refine0.9947726903727786
FunSearch0.9773347714972982
AIDE0.925117898068383
ReEvo1.0018885951740353
MCTS1.0057751617808324
EoH1.0010112897238341
+ +# Open shop scheduling + +The Open Shop Scheduling Problem involves scheduling a set of jobs across a set of machines with the goal of minimizing the total completion time (makespan). Each job consists of several operations, where each operation must be processed on a specific machine for a given duration. Unlike other scheduling problems, the Open Shop variant has no predetermined order for processing the operations of a job—operations can be scheduled in any order, but a job can only be processed on one machine at a time, and a machine can only process one job at a time. This creates a complex combinatorial optimization challenge where the scheduler must determine both the sequence of operations for each job and the timing of each operation to minimize the overall completion time while ensuring no resource conflicts. + +Table 24: Multidimensional knapsack problem + +
MethodScore
Classical Solver0.7851209868863173
BestOfN0.9017764948703829
Refine0.9930284498507208
FunSearch0.9930284498507208
AIDE0.9156437907474381
ReEvo0.9825099803205837
MCTS0.8960699709846601
EoH0.9930284498507208
+ +Table 25: Open shop scheduling + +# Packing unequal circles + +The problem involves packing a subset of unequal circles into a fixed circular container with radius R_0 and center at the origin, where each circle i has a given radius R_i (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the number of circles packed—that is, + +maximize $\sum_{i=1}^{n} \alpha_{i}$ —subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed R_0; and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii. + +
MethodScore
Classical Solver0.9075757575757577
BestOfN0.8939393939393939
Refine0.9803030303030303
FunSearch0.9719696969696969
AIDE0.8825757575757576
ReEvo0.8825757575757576
MCTS0.9522727272727273
EoH0.8825757575757576
+ +# Packing unequal circles area + +The problem involves packing a subset of unequal circles into a fixed circular container with radius $\mathrm{R\_0}$ and center at the origin, where each circle $\mathrm{i}$ has a given radius $\mathrm{R\_i}$ (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the total area of all circles packed—that is, maximize $\sum_{i=1}^{n} \alpha_i * p_i * R_i^2$ —subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed $\mathrm{R\_0}$ ; and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii. + +Table 26: Packing unequal circles + +
MethodScore
Classical Solver0.8767896840297265
BestOfN0.9923476599194556
Refine1.0226692239919217
FunSearch1.0404725950195108
AIDE0.5972138868724692
ReEvo0.9101821460280035
MCTS0.9617483396206504
EoH1.0056059827170811
+ +# Packing unequal rectangles and squares + +We are given a set of n unequal rectangles (or squares), each with specified dimensions, and a fixed circular container of radius R centered at the origin. The problem is to decide which rectangles to pack and where to position them—by choosing binary selection variables and continuous center coordinates—so that every packed rectangle is entirely contained within the circle and no two packed rectangles overlap. + +For each rectangle, the four corners must lie inside the circle, and if an item is not packed it is forced to a dummy position. The objective is to maximize the number of packed items, i.e., maximize $\sum_{i=1}^{n} \text{alpha}_i$ (or a related sum when each alpha_i is binary). Note that the rotation of the rectangular (by 90 degrees) is sometimes allowed and your algorithm should take that into account. + +Table 27: Packing unequal circles area + +
MethodScore
Classical Solver0.9134625513058007
BestOfN0.8337025039542202
Refine0.932172162950195
FunSearch0.9228828411608733
AIDE0.7950708457573447
ReEvo0.77954425754769
MCTS0.8028450160315149
EoH0.9228828411608733
+ +# Packing unequal rectangles and squares area + +We consider the problem of selecting and placing a subset of $n$ unequal rectangles (or squares) into a fixed-size circular container of radius $R$ so as to maximize the total area of the packed items. Each item $i$ has given dimensions $L_{i}$ and $W_{i}$ (with $L_{i} = W_{i}$ for squares) and an associated area $L_{i}W_{i}$ . The decision variables include a binary indicator $\alpha_{i}$ for whether item $i$ is packed and continuous variables $(x_{i},y_{i})$ for the placement of its center, along with a rotation angle $heta_{i}$ when $90^{\circ}$ rotations are allowed. The formulation enforces that for every packed item, all four of its rotated corners must lie within the circle, and that no two packed items overlap; if an item is not packed, it is fixed at a dummy position. + +Table 28: Packing unequal rectangles and squares + +
MethodScore
Classical Solver0.8893527400499813
BestOfN0.9536806816195774
Refine1.0513451711752306
FunSearch1.0839011538182066
AIDE0.8100272732450019
ReEvo0.9435059488868657
MCTS0.995946490673633
EoH0.9566331174271511
+ +Table 29: Packing unequal rectangles and squares area + +# Resource constrained shortest path + +This problem involves finding the shortest path from vertex 1 to vertex $n$ in a directed graph while satisfying resource constraints. Specifically, each vertex and arc has associated resource consumptions, and the cumulative consumption for each resource must fall within the provided lowerBounds and upperBounds. The input includes the number of vertices (n), arcs (m), resource types (K), resource consumption at + +each vertex, and a graph represented as a mapping from vertices to lists of arcs (each arc being a tuple of end vertex, cost, and arc resource consumptions). The optimization objective is to minimize the total arc cost of the path, with the condition that the path is valid—meaning it starts at vertex 1, ends at vertex $n$ , follows defined transitions in the graph, and respects all resource bounds; if any of these constraints are not met, the solution receives no score. + +
MethodScore
Classical Solver0.7508899529136809
BestOfN0.7508899529136808
Refine0.7284494767232047
FunSearch0.7508899529136808
AIDE0.7508899529136808
ReEvo0.7508899529136808
MCTS0.7284494767232047
EoH0.7508899529136808
+ +# Set covering + +Set Covering Problem. The goal is to select a subset of columns, each with an associated cost, such that every row is covered by at least one chosen column. For each row, the available covering columns are provided (as 1-indexed numbers). The objective is to minimize the total cost of the selected columns, and if even one row is left uncovered, then no score is awarded. + +Table 30: Resource constrained shortest path + +
MethodScore
Classical Solver0.8883906244045974
BestOfN0.8213286754887226
Refine0.9056204467263304
FunSearch0.8887733963981322
AIDE0.8639998129016312
ReEvo0.9360686599803572
MCTS0.8672991644233662
EoH0.8843920544743958
+ +# Set partitioning + +This problem involves solving a set partitioning instance where the goal is to choose a subset of columns such that each row is covered exactly once while minimizing the total cost. Each column is associated with a cost and covers a specific set of rows. The optimization problem is defined by selecting columns from a given set so that every row is covered precisely once, and the sum of the selected columns' costs is minimized. If the solution fails to cover every row exactly once, then no score is awarded. + +Table 31: Set covering + +
MethodScore
Classical Solver0.9996401983661346
BestOfN0.8991338255841825
Refine0.7999991398515384
FunSearch0.83333333333333334
AIDE0.9
ReEvo0.8991338255841825
MCTS0.8647769492523454
EoH0.9324671589175159
+ +# TSP + +The Traveling Salesman Problem (TSP) is a classic combinatorial optimization problem where, given a set of cities with known pairwise distances, the objective is to find the shortest possible tour that visits each city exactly once and returns to the starting city. More formally, given a complete graph $\mathrm{G} = (\mathrm{V},\mathrm{E})$ with vertices $\mathrm{V}$ representing cities and edges $\mathrm{E}$ with weights representing distances, we seek to find a Hamiltonian cycle (a closed path visiting each vertex exactly once) of minimum total weight. + +Table 32: Set partitioning + +
MethodScore
Classical Solver0.986
BestOfN0.8590303340408165
Refine0.9399577646813952
FunSearch0.9016741050908584
AIDE0.7710495444635409
ReEvo0.8488918718349553
MCTS0.5961113158302597
EoH0.7935463156320405
+ +Table 33: TSP + +# Uncapacitated warehouse location + +The Uncapacitated Warehouse Location Problem aims to determine which warehouses to open and how to assign each customer entirely to an open warehouse in order to minimize the total cost. Given a set of potential warehouse locations, each with a fixed opening cost, and a set of customers, each with an associated assignment cost for being served by each warehouse, the objective is to select a subset of warehouses to open and assign every customer completely to one of these open warehouses. The optimization minimizes the sum of fixed warehouse opening costs and the customer assignment costs. Each customer must be assigned to exactly one warehouse; if any customer is left unassigned or assigned to more than one warehouse, the solution is considered infeasible. + +# Unconstrained guillotine cutting + +The unconstrained guillotine cutting problem involves selecting and placing a subset of available pieces within a fixed stock rectangle to maximize the total value of the placed pieces. Each piece, defined by its length, width, and value, + +
MethodScore
Classical Solver0.9968157833494645
BestOfN0.98931916166557
Refine1.00000000000002045
FunSearch0.9978398298062331
AIDE0.9994999857664043
ReEvo0.998083746641369
MCTS0.9951604598088827
EoH0.87499999999978142
+ +may be optionally rotated $90^{\circ}$ if allowed and used at most once. The challenge is to determine both the selection and the positioning of these pieces such that they do not overlap and lie entirely within the stock's boundaries. This optimization problem formalizes the decision variables as the x and y coordinates for the bottom-left placement of each piece and, if rotation is allowed, a binary variable indicating its orientation, while the objective function is to maximize the sum of the values of the pieces successfully placed within the stock. + +Table 34: Uncapacitated warehouse location + +
MethodScore
Classical Solver0.9725381370960237
BestOfN0.8701275303357732
Refine0.9618177725501762
FunSearch0.9646369625362231
AIDE0.8512970128354943
ReEvo0.9828452190272524
MCTS0.8628525304460628
EoH0.9649480933563296
+ +# Vehicle routing: period routing + +The Period Vehicle Routing Problem requires planning delivery routes over a multi-day planning period. + +Each customer (other than the depot, whose id is 0) is provided with a list of candidate service schedules. A schedule is represented by a binary vector of length equal to the period (e.g., [1, 0, 1] for a 3-day period), where a 1 in a given position indicates that the customer must be visited on that day. The decision maker must select exactly one candidate schedule for each customer. + +For every day in the planning period, if a customer's chosen schedule indicates a delivery (i.e., a 1), then exactly one vehicle must visit that customer on that day. Otherwise, the customer should not be visited. The decision maker must also design, for each day, the tours for the vehicles. Each tour is a continuous route that starts at the depot (id 0) and, after visiting a subset of customers, returns to the depot. Each vehicle is only allowed to visit the depot once per day—namely, as its starting and ending point—and it is not allowed to return to the depot in the middle of a tour. + +Moreover, each vehicle route must obey a capacity constraint: the total demand of the customers visited on that tour must not exceed the vehicle capacity each day. Although multiple vehicles are available per day (as specified by the input), not all available vehicles have to be used, but the number of tours in a given day cannot exceed the provided number of vehicles. In addition, the tours on each day must cover exactly those customers who require service per the selected schedules, and no customer may be visited more than once in a given day. + +The objective is to choose a schedule for every customer and plan the daily tours so as to minimize the overall distance traveled by all vehicles during the entire planning period. Distances are measured using Euclidean distance. + +Table 35: Unconstrained guillotine cutting + +
MethodScore
Classical Solver0.12437943290991642
BestOfN0.42032326191804853
Refine0.48371172427664344
FunSearch0.32385035648314586
AIDE0.5362363612554435
ReEvo0.0
MCTS0.0
EoH0.0
+ +Table 36: Vehicle routing: period routing + +# p-median - capacitated + +The Capacitated P-Median Problem is a facility location optimization problem where the objective is to select exactly $p$ customers as medians (facility locations) and assign each customer to one of these medians to minimize the total cost, defined as the sum of the Euclidean distances (rounded down to the nearest integer) between customers and their assigned medians. Each median has a capacity constraint $Q$ , meaning the total demand of the customers assigned to it cannot exceed $Q$ . A feasible solution must respect this capacity constraint for all medians; otherwise, it receives a score of zero. The solution is evaluated by the ratio extscore = rac extbestknown extcomputed_total_cost, where computed_total_cost is the total assignment cost if all constraints are satisfied; otherwise, the score is zero. The output consists of the total cost (if feasible), the selected medians, and the customer assignments. + +# p-median - uncapacitated + +The uncapacitated p-median problem is a combinatorial optimization problem defined on a given graph $\mathrm{G} = (\mathrm{V},\mathrm{E})$ with n vertices and m edges. The objective is to select p medians (facility locations) from the set of vertices such that the total assignment cost is minimized. The assignment cost is computed as the sum of the shortest distances from each vertex to its nearest selected median, where distances are given by a precomputed complete cost matrix (obtained via Floyd's algorithm). Formally, given the cost matrix $D\in \mathbb{R}^{n\times n}$ , the optimization problem seeks to find a subset $S\subseteq Vwith|S| = p$ that minimizes the function: + +
MethodScore
Classical Solver0.8996179560649475
BestOfN0.9892886172082498
Refine0.9737771618997864
FunSearch0.9748437166838722
AIDE0.7442228395960961
ReEvo0.9786585768154689
MCTS0.9829650705934849
EoH0.9853458094532425
+ +$\sum_{v\in V}\min_{s\in S}D(v,s)$ + +where $\mathrm{D}(\mathrm{v},\mathrm{s})$ is the shortest-path distance between vertex v and median s. The solution consists of a list of exactly p distinct vertices representing the chosen medians. + +Table 37: p-median - capacitated + +
MethodScore
Classical Solver0.9952341868141825
BestOfN0.9453613019698086
Refine0.9982141349797949
FunSearch0.9996783954983718
AIDE0.9847816841274486
ReEvo0.9983315585722753
MCTS0.9605290267584901
EoH0.9921177098573016
+ +Table 38: p-median - uncapacitated \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04310/images/0978960c07d8958bf634c685b86a78f0a537e3e3f819cabf1c8daefcc47b3df5.jpg b/data/2025/2504_04xxx/2504.04310/images/0978960c07d8958bf634c685b86a78f0a537e3e3f819cabf1c8daefcc47b3df5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9de09d0f47cb9586633592282e8ec4451050a2c9 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/0978960c07d8958bf634c685b86a78f0a537e3e3f819cabf1c8daefcc47b3df5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fd56e0d4786eb742f595f07607129896278dc6ffaf6d6685a7ee47c219af32e +size 38689 diff --git a/data/2025/2504_04xxx/2504.04310/images/0aa1e9e9a4cf46fb810595679d6adbf788c8a69f1e96077a330f035fb42abeff.jpg b/data/2025/2504_04xxx/2504.04310/images/0aa1e9e9a4cf46fb810595679d6adbf788c8a69f1e96077a330f035fb42abeff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d18af75e8e6d4501d76beb6992d553e582c0d6d --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/0aa1e9e9a4cf46fb810595679d6adbf788c8a69f1e96077a330f035fb42abeff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66d23b3ddef6285a587f6675bb3d4597cdde111ccefc0240a963794a36719adc +size 38027 diff --git a/data/2025/2504_04xxx/2504.04310/images/1088a9dabd21f9bc4a7c6245c1d52571e7ad4220d2bd766e4e5b0fc595d60be6.jpg b/data/2025/2504_04xxx/2504.04310/images/1088a9dabd21f9bc4a7c6245c1d52571e7ad4220d2bd766e4e5b0fc595d60be6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbf185740c622cb0c9d42b1c533fa46ef4778554 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/1088a9dabd21f9bc4a7c6245c1d52571e7ad4220d2bd766e4e5b0fc595d60be6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12b81eb85947ac425fd95292f36cf930f3ad8c58cc22781d53da50b5984cdc52 +size 38124 diff --git a/data/2025/2504_04xxx/2504.04310/images/16ae26d8657a16b70285bc50a759134f3021cbfb2790d4037a24eac8b541466d.jpg b/data/2025/2504_04xxx/2504.04310/images/16ae26d8657a16b70285bc50a759134f3021cbfb2790d4037a24eac8b541466d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e4ba35e50fa20c8d695a207da297f2e0ecae39c8 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/16ae26d8657a16b70285bc50a759134f3021cbfb2790d4037a24eac8b541466d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8950cd3cdb72e46f4f5d5dbbb182b2ba1f005699db6592b8ac90d5d67778c8b +size 38130 diff --git a/data/2025/2504_04xxx/2504.04310/images/1c6900dbfa02950750e85d772fc63df86f44ea725e9af1c1346480142cafa5dd.jpg b/data/2025/2504_04xxx/2504.04310/images/1c6900dbfa02950750e85d772fc63df86f44ea725e9af1c1346480142cafa5dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5d2e7908ea2ec0d2d28ea6c1662ed1429695359 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/1c6900dbfa02950750e85d772fc63df86f44ea725e9af1c1346480142cafa5dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e62d5289756319cda02d3f1147065726ab2dd9f27498c8610b78c893747af76b +size 36725 diff --git a/data/2025/2504_04xxx/2504.04310/images/1d43eb5705a42b8bfe01034dfd0a3d46833c0ca788e8039ac91c760f5fe8f163.jpg b/data/2025/2504_04xxx/2504.04310/images/1d43eb5705a42b8bfe01034dfd0a3d46833c0ca788e8039ac91c760f5fe8f163.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e46bc74c51a4a89e7c24a3b95805ce8606148992 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/1d43eb5705a42b8bfe01034dfd0a3d46833c0ca788e8039ac91c760f5fe8f163.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b98cbe434e99f5b62e6979bea36b95fbed893add32e1bf1e655fd717ab9f38a +size 18285 diff --git a/data/2025/2504_04xxx/2504.04310/images/1d7a5b5a0d918fa26138e139ee5f84229a4e985dacfb1f0643e8314d50d8e7f7.jpg b/data/2025/2504_04xxx/2504.04310/images/1d7a5b5a0d918fa26138e139ee5f84229a4e985dacfb1f0643e8314d50d8e7f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..425270b9d2b193ca14390125ac76f1f434b04d94 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/1d7a5b5a0d918fa26138e139ee5f84229a4e985dacfb1f0643e8314d50d8e7f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b3d0d4dcc3768c8b7a932777835a2e7425b3029a14ba3e41e9cb69c1645f9a3 +size 39074 diff --git a/data/2025/2504_04xxx/2504.04310/images/287382a9ba0debd85980e8bc0ba5ea3829b4b560c6945e4ecea7dc2d6c560da7.jpg b/data/2025/2504_04xxx/2504.04310/images/287382a9ba0debd85980e8bc0ba5ea3829b4b560c6945e4ecea7dc2d6c560da7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ec085fd82b5d75dc020c1bcb7d6b019457d6747 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/287382a9ba0debd85980e8bc0ba5ea3829b4b560c6945e4ecea7dc2d6c560da7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f778125e2fc19fa7579296e3c02e2776773eb142ff171ab4356bd17e78d1d666 +size 36591 diff --git a/data/2025/2504_04xxx/2504.04310/images/34dc9e46e81271756af4bf590f60077ed6c6035553f204712a8b8d812acc3266.jpg b/data/2025/2504_04xxx/2504.04310/images/34dc9e46e81271756af4bf590f60077ed6c6035553f204712a8b8d812acc3266.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce1bbb8d40ed9d1dda98b5b041259ece9b7c7829 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/34dc9e46e81271756af4bf590f60077ed6c6035553f204712a8b8d812acc3266.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:998bcfd1a39f0955707df5203c7b083c962c0d75c20eb85abe3135c63b416613 +size 34898 diff --git a/data/2025/2504_04xxx/2504.04310/images/3c12705cc741d7f0c767659c343b383b159ea76d63677ef4b52e1b95e57153c7.jpg b/data/2025/2504_04xxx/2504.04310/images/3c12705cc741d7f0c767659c343b383b159ea76d63677ef4b52e1b95e57153c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..797018f2ef9b2e7b4f14d9ec193de1e34f215154 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/3c12705cc741d7f0c767659c343b383b159ea76d63677ef4b52e1b95e57153c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b3f63b632ca6ec93d2ba6f784f6cf74fd376bfd491db3274b54ecdc8d17392f +size 21660 diff --git a/data/2025/2504_04xxx/2504.04310/images/428741c0122b2692348448a5b913a1bde056066776cbc270958894b812f42727.jpg b/data/2025/2504_04xxx/2504.04310/images/428741c0122b2692348448a5b913a1bde056066776cbc270958894b812f42727.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71635bfacbc5f8e1905c9c27746977635f04c3ad --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/428741c0122b2692348448a5b913a1bde056066776cbc270958894b812f42727.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b80c1192779745fa965a05a89e68323f331e9c49c5c1c4ca9d3d091b9d42d912 +size 37574 diff --git a/data/2025/2504_04xxx/2504.04310/images/428824232b4a92188e459544fa6dda1bd13bf81b70ba1a5b7ec4cc83d9e076f1.jpg b/data/2025/2504_04xxx/2504.04310/images/428824232b4a92188e459544fa6dda1bd13bf81b70ba1a5b7ec4cc83d9e076f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63ad0ede63c3180d566dc02ea2bdb794a6954ea1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/428824232b4a92188e459544fa6dda1bd13bf81b70ba1a5b7ec4cc83d9e076f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c58292f58868a9549cd2ceb369e87153ddbeb382889841ff281558d00a9a927 +size 39289 diff --git a/data/2025/2504_04xxx/2504.04310/images/4341a2ba49992c4270bf3c7f14378904a53caf1070112d94ff94c83834258545.jpg b/data/2025/2504_04xxx/2504.04310/images/4341a2ba49992c4270bf3c7f14378904a53caf1070112d94ff94c83834258545.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d1f837c657e4fbcd09e4c19c2e3db98af26e036 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/4341a2ba49992c4270bf3c7f14378904a53caf1070112d94ff94c83834258545.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afec1bca331f0d1f8ee3c340bd96ca8fadd78ce9ef98f2985aaf970f14a4b214 +size 4680 diff --git a/data/2025/2504_04xxx/2504.04310/images/4648f1b0a565d32fb21802117ec4365be6437a9799aec287ad8b5c78c80d0ed6.jpg b/data/2025/2504_04xxx/2504.04310/images/4648f1b0a565d32fb21802117ec4365be6437a9799aec287ad8b5c78c80d0ed6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce5f8eee2707d9fb948c63a8209494b8f517453d --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/4648f1b0a565d32fb21802117ec4365be6437a9799aec287ad8b5c78c80d0ed6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:892a56e36739b7d3ea7212a797782ea9e800055e2e1ba21e794fd7ba66029b07 +size 35496 diff --git a/data/2025/2504_04xxx/2504.04310/images/48780e281cba669ba8e060d3cad28d70ccc1e98e257b2bbacbb65c00b55ff8d6.jpg b/data/2025/2504_04xxx/2504.04310/images/48780e281cba669ba8e060d3cad28d70ccc1e98e257b2bbacbb65c00b55ff8d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41a458be231cef15e4713f3f91a5104c2e926720 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/48780e281cba669ba8e060d3cad28d70ccc1e98e257b2bbacbb65c00b55ff8d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c883c7909c7c711398947cf5caecc0343d1e68704e93e0613e8ac467716e90dc +size 39039 diff --git a/data/2025/2504_04xxx/2504.04310/images/4ae1a2d4fa564cca47a694e279dc0d48198b1277546173bf053c33584be0aa3d.jpg b/data/2025/2504_04xxx/2504.04310/images/4ae1a2d4fa564cca47a694e279dc0d48198b1277546173bf053c33584be0aa3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcffb39ea3d877b4b5b7fcce188e63f3ba91f92e --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/4ae1a2d4fa564cca47a694e279dc0d48198b1277546173bf053c33584be0aa3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10873f78b2e07215e45a296bbfce8c7b6a781b81c9add11760f5a5be16f1f1b6 +size 37237 diff --git a/data/2025/2504_04xxx/2504.04310/images/4cd0765a6348b5e88b1231416b40b1a5548d74c49891fb0f535b3a488639f904.jpg b/data/2025/2504_04xxx/2504.04310/images/4cd0765a6348b5e88b1231416b40b1a5548d74c49891fb0f535b3a488639f904.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89711bb81405ad29c4235de2cb0a6f114a8fa160 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/4cd0765a6348b5e88b1231416b40b1a5548d74c49891fb0f535b3a488639f904.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee21c006b2970eb10a17566911267609fc88be0fc40ad958fa4f7fe158d4ebee +size 37031 diff --git a/data/2025/2504_04xxx/2504.04310/images/4f321ae8f77c6ec6e4c0c37679e475311aa04ba6f9ffae5b1424d65d1be924bb.jpg b/data/2025/2504_04xxx/2504.04310/images/4f321ae8f77c6ec6e4c0c37679e475311aa04ba6f9ffae5b1424d65d1be924bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a6234e29ec8ea212aef41b35d059fe952259cbc --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/4f321ae8f77c6ec6e4c0c37679e475311aa04ba6f9ffae5b1424d65d1be924bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ad83bfcd17140393d99771c646ffcaf9b04dcc3bd47a968edf83043c5a05bb3 +size 34120 diff --git a/data/2025/2504_04xxx/2504.04310/images/5819c6cc93559036bfc18a4f34659127bd440c1958315f284088a1ce262e7485.jpg b/data/2025/2504_04xxx/2504.04310/images/5819c6cc93559036bfc18a4f34659127bd440c1958315f284088a1ce262e7485.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36bcf4ce2ed1a33f93677c9ca8713bdef56a6765 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/5819c6cc93559036bfc18a4f34659127bd440c1958315f284088a1ce262e7485.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6983d055d41cbf150ba7e08135706f620b5361bfeb26ed3fc162103fdd3f050 +size 38352 diff --git a/data/2025/2504_04xxx/2504.04310/images/62a6f341da3b481b237dc66465655ab43268510b005cdc9b2fac8b1a88709108.jpg b/data/2025/2504_04xxx/2504.04310/images/62a6f341da3b481b237dc66465655ab43268510b005cdc9b2fac8b1a88709108.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa5433a7c0b3ce5823d1f9c82acd869993b91217 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/62a6f341da3b481b237dc66465655ab43268510b005cdc9b2fac8b1a88709108.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:def3ffbf6a1a6dfc28cbe70dbc6a8ec1a68ec6c6984f6cbcfcb134970a952cdf +size 37344 diff --git a/data/2025/2504_04xxx/2504.04310/images/6a363556df56e4fca88eec4a78e5664c1795ce728eecf49f793d6131a7ad5c6b.jpg b/data/2025/2504_04xxx/2504.04310/images/6a363556df56e4fca88eec4a78e5664c1795ce728eecf49f793d6131a7ad5c6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6e1d60c09159af03332e1b957d7dfe9b2bae68e --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/6a363556df56e4fca88eec4a78e5664c1795ce728eecf49f793d6131a7ad5c6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8828ec8d210ce54e887dae7bd1eff0fb5b44718c21255277baed33b3a60c557 +size 4708 diff --git a/data/2025/2504_04xxx/2504.04310/images/6f0f26aba14980dd6a4df69a7a8dfef8a18f1ea1fd60bff6626b4965f903c7cb.jpg b/data/2025/2504_04xxx/2504.04310/images/6f0f26aba14980dd6a4df69a7a8dfef8a18f1ea1fd60bff6626b4965f903c7cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2fb498d50106dc29ba3e68ce560adee6dd5fa672 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/6f0f26aba14980dd6a4df69a7a8dfef8a18f1ea1fd60bff6626b4965f903c7cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ccd7f39839de9362250c1d2053c357b4e84f5ee9618eeca90e1bf6136a797c0 +size 37926 diff --git a/data/2025/2504_04xxx/2504.04310/images/72f88d648949a3f5f81a832559bd20ad7ec54bf2d97f4b2400f475d88ef4a36d.jpg b/data/2025/2504_04xxx/2504.04310/images/72f88d648949a3f5f81a832559bd20ad7ec54bf2d97f4b2400f475d88ef4a36d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ee6de7bd61d5d01f842008e4bdeeba32a3c91ac --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/72f88d648949a3f5f81a832559bd20ad7ec54bf2d97f4b2400f475d88ef4a36d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1ba3fe3de01c120411c1c156f61ddef26359b445b0a41bee685a1a6b54be1f3 +size 37114 diff --git a/data/2025/2504_04xxx/2504.04310/images/7544e093b87cf336b81774acd3b9ab621799e0e15a8d0ba485f081d12ce12f7e.jpg b/data/2025/2504_04xxx/2504.04310/images/7544e093b87cf336b81774acd3b9ab621799e0e15a8d0ba485f081d12ce12f7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..045beaf6a16b143d43fe2065dac53ccb31c71ce4 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/7544e093b87cf336b81774acd3b9ab621799e0e15a8d0ba485f081d12ce12f7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b94d444ba36233aaa01758e1a37b085acc6117f0056abcaef5df025e0401b7a +size 38888 diff --git a/data/2025/2504_04xxx/2504.04310/images/761b895f5325873705d7f6de307e584bd678aed22c73aa6d947d3748d99f59e0.jpg b/data/2025/2504_04xxx/2504.04310/images/761b895f5325873705d7f6de307e584bd678aed22c73aa6d947d3748d99f59e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb65dcef4f7a14fd4df653e5a424b32b31294edb --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/761b895f5325873705d7f6de307e584bd678aed22c73aa6d947d3748d99f59e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efde4049ffdcdb6e5378ae79c0a1c8a0e1d80dfcc462c04df13a7b6d12f25bfd +size 35802 diff --git a/data/2025/2504_04xxx/2504.04310/images/80e0672adbc3bc370f8f401854e7171a4d6db329894679d039400745e48eb3cb.jpg b/data/2025/2504_04xxx/2504.04310/images/80e0672adbc3bc370f8f401854e7171a4d6db329894679d039400745e48eb3cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87b365b07666614aeaf469ab28333d1d4fcbe3cc --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/80e0672adbc3bc370f8f401854e7171a4d6db329894679d039400745e48eb3cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a314ca84f76893852202c731c87f57f08654303ec5a5554155ec23689ba316c +size 14598 diff --git a/data/2025/2504_04xxx/2504.04310/images/85dd609a586c94b65a492ef0a650c2e384cd798f6f13c90be03fb5f20d969b8d.jpg b/data/2025/2504_04xxx/2504.04310/images/85dd609a586c94b65a492ef0a650c2e384cd798f6f13c90be03fb5f20d969b8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22066bb3fee879162eadab6657c7ee928cca9f7a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/85dd609a586c94b65a492ef0a650c2e384cd798f6f13c90be03fb5f20d969b8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b20d6f008a63ab1b6cf904b95ecfee24cb3c4607935cb836e4d97333c30b8a06 +size 30619 diff --git a/data/2025/2504_04xxx/2504.04310/images/86195873cb96386557e499b2d62386ffe100f9c92ca0b6fd4133962c06d1ff1b.jpg b/data/2025/2504_04xxx/2504.04310/images/86195873cb96386557e499b2d62386ffe100f9c92ca0b6fd4133962c06d1ff1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57218582c5697dded92f839b4107ef45e602bbcc --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/86195873cb96386557e499b2d62386ffe100f9c92ca0b6fd4133962c06d1ff1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffa4c0e865f4ea7344c49c61c138ad04ae7c6d64e127cbacea3b2888d2919b7f +size 48451 diff --git a/data/2025/2504_04xxx/2504.04310/images/8f2560e60912e886f7dbdab354daaf81fa6b15cb2ada6a9420535898615ee0e0.jpg b/data/2025/2504_04xxx/2504.04310/images/8f2560e60912e886f7dbdab354daaf81fa6b15cb2ada6a9420535898615ee0e0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee86803f345e7fae6b9c836af038a8237d7e85bc --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/8f2560e60912e886f7dbdab354daaf81fa6b15cb2ada6a9420535898615ee0e0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:590b7e858bf2a4264fd57a06f02c9d11c1f37f31ffca03cf1dcc320a67f16536 +size 32061 diff --git a/data/2025/2504_04xxx/2504.04310/images/954c0315897a14185b49ad98c139c33d7c283f5cff1989886d08731c96bbf2cf.jpg b/data/2025/2504_04xxx/2504.04310/images/954c0315897a14185b49ad98c139c33d7c283f5cff1989886d08731c96bbf2cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52f5a0d7858103947b0ed6567e9654dfc0d1a8fa --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/954c0315897a14185b49ad98c139c33d7c283f5cff1989886d08731c96bbf2cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69202f853bf8e5ca1d184a746977902f282a8d7a79be81430d5ffa48e956a0a8 +size 56648 diff --git a/data/2025/2504_04xxx/2504.04310/images/95920fac789b7820ecb730f44d27964ef594dfc7be07bcbcd6188d00f543279b.jpg b/data/2025/2504_04xxx/2504.04310/images/95920fac789b7820ecb730f44d27964ef594dfc7be07bcbcd6188d00f543279b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3839ca65c9c71e2d74013da8122d58e6b35e5c2 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/95920fac789b7820ecb730f44d27964ef594dfc7be07bcbcd6188d00f543279b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5962aa1d06c8cc8980fde79ca010075183b23ddc937d78586dcc1a0669c5730e +size 38712 diff --git a/data/2025/2504_04xxx/2504.04310/images/97261b48dee41841c44d566a07844b6ea9c1508689f3c76c552cbb394dd6373b.jpg b/data/2025/2504_04xxx/2504.04310/images/97261b48dee41841c44d566a07844b6ea9c1508689f3c76c552cbb394dd6373b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e67a0faa55d70bc4b397baacbebb4b1a432b52f --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/97261b48dee41841c44d566a07844b6ea9c1508689f3c76c552cbb394dd6373b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9283427f54e551ecc8435c5e80957f31b5fbea219005957a4cf8fadd073c801e +size 28452 diff --git a/data/2025/2504_04xxx/2504.04310/images/9ae20afe1f24c38f159c8b9861480391efd656ea484355a43d911d4dd75427c8.jpg b/data/2025/2504_04xxx/2504.04310/images/9ae20afe1f24c38f159c8b9861480391efd656ea484355a43d911d4dd75427c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9325a95c65f9ae7524ef6700fd1ab7f8d826ee77 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/9ae20afe1f24c38f159c8b9861480391efd656ea484355a43d911d4dd75427c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab1d7d9eaec91816d48e5c8583a6eef75cac752e882c542a27592e7b06af86ca +size 3852 diff --git a/data/2025/2504_04xxx/2504.04310/images/9bad5f71e7bc3d655bcfadd2bb30a745c49b42f1ff44dc5f582d7ab3bc36ef68.jpg b/data/2025/2504_04xxx/2504.04310/images/9bad5f71e7bc3d655bcfadd2bb30a745c49b42f1ff44dc5f582d7ab3bc36ef68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ab51107f5504b8cc66007a3916c9e74d8e4e4ef --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/9bad5f71e7bc3d655bcfadd2bb30a745c49b42f1ff44dc5f582d7ab3bc36ef68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4c1e30d4118f2b8012ce2a1c017fca9289d59e4139e3dd8c705ac6297bcb09a +size 38031 diff --git a/data/2025/2504_04xxx/2504.04310/images/a006c1e5f9cdf229c2d31b9f979e80f46279a28e35df50ba90375221cbe29900.jpg b/data/2025/2504_04xxx/2504.04310/images/a006c1e5f9cdf229c2d31b9f979e80f46279a28e35df50ba90375221cbe29900.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e2b7f281b402266ce248dea3d2428d62f9158ea --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/a006c1e5f9cdf229c2d31b9f979e80f46279a28e35df50ba90375221cbe29900.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4a621157941ecbc2e81558f21f39ad01e6c3f7b597a7b2920a01ee61457d3d0 +size 39276 diff --git a/data/2025/2504_04xxx/2504.04310/images/a931bdbcd976d191d7b7b7449bb0726e814a513e79cb858b40c552dba00b0366.jpg b/data/2025/2504_04xxx/2504.04310/images/a931bdbcd976d191d7b7b7449bb0726e814a513e79cb858b40c552dba00b0366.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cf22e53cbc68ee1ee5814d365d0e699fba20f52 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/a931bdbcd976d191d7b7b7449bb0726e814a513e79cb858b40c552dba00b0366.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c766724727f3f25f2554191a7e2e14a05404aea35484f4ea4b61ad654f203482 +size 4539 diff --git a/data/2025/2504_04xxx/2504.04310/images/a9a3f71437619cfb208811e67ebfaeb37a592dd218250bc67a376effc29926bd.jpg b/data/2025/2504_04xxx/2504.04310/images/a9a3f71437619cfb208811e67ebfaeb37a592dd218250bc67a376effc29926bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08c09628919f32ec0f02d3670baec70d8da00fd7 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/a9a3f71437619cfb208811e67ebfaeb37a592dd218250bc67a376effc29926bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58957f2cccbef5781939797bdefcd02a731c57b11bdedf4c4c076fc07d0e59b7 +size 60356 diff --git a/data/2025/2504_04xxx/2504.04310/images/afd684331587462588791f64a1fc570718250bb0ea8219f611f7236ac0250ac0.jpg b/data/2025/2504_04xxx/2504.04310/images/afd684331587462588791f64a1fc570718250bb0ea8219f611f7236ac0250ac0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46caee3a8f4c01fff979de165cea5059d6b46da1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/afd684331587462588791f64a1fc570718250bb0ea8219f611f7236ac0250ac0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb36d1a705311cf1ea4bdb1a5c04d3cb27a23d361e2f7cfcc22069c256a92b30 +size 37993 diff --git a/data/2025/2504_04xxx/2504.04310/images/b2feb66ee1886e670c2071e49415cdb8656f698e6cd445b6255dfb5c2a08a973.jpg b/data/2025/2504_04xxx/2504.04310/images/b2feb66ee1886e670c2071e49415cdb8656f698e6cd445b6255dfb5c2a08a973.jpg new file mode 100644 index 0000000000000000000000000000000000000000..948b5b06fc098bd078588817a1224780e07d78e4 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/b2feb66ee1886e670c2071e49415cdb8656f698e6cd445b6255dfb5c2a08a973.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c65e0f8b494f4e2b41d5a396ff2193d9b9fb7af61723b14911108c47b9be9a9 +size 7609 diff --git a/data/2025/2504_04xxx/2504.04310/images/bbbdc5ff70a880fb3b8dabef315652e54d5e5b9e7c636cf64ebd852fd9bdf4fb.jpg b/data/2025/2504_04xxx/2504.04310/images/bbbdc5ff70a880fb3b8dabef315652e54d5e5b9e7c636cf64ebd852fd9bdf4fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83690cfeab91d6273e8b8e9c1c5d6cd03b5059ed --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/bbbdc5ff70a880fb3b8dabef315652e54d5e5b9e7c636cf64ebd852fd9bdf4fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03b301367a876dce88d7513a48e84066bfec86336833aab3bd64825a61006121 +size 36660 diff --git a/data/2025/2504_04xxx/2504.04310/images/c60a3cdc429fa90622c07bf6619a002a061ab95eb3b5be46a9ea9e3efb2bd15f.jpg b/data/2025/2504_04xxx/2504.04310/images/c60a3cdc429fa90622c07bf6619a002a061ab95eb3b5be46a9ea9e3efb2bd15f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bec794b252626f860edb32e52ebd1cd243f7e05f --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/c60a3cdc429fa90622c07bf6619a002a061ab95eb3b5be46a9ea9e3efb2bd15f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1407a1e6ddd2708c393045c89130e0d66f6646eec726c6a34c31d3daac4a742f +size 75152 diff --git a/data/2025/2504_04xxx/2504.04310/images/c7678567bee486687e7a3bfc0696818b6449fe61f6ca4dfa433cb8b9cc2d7d41.jpg b/data/2025/2504_04xxx/2504.04310/images/c7678567bee486687e7a3bfc0696818b6449fe61f6ca4dfa433cb8b9cc2d7d41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a401d5508a2dfd060a85c5eac3585f4db7b7900 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/c7678567bee486687e7a3bfc0696818b6449fe61f6ca4dfa433cb8b9cc2d7d41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c79e6e27d8c87478436d012873c9b850033be2e7724f358bb6acc74d3999e82 +size 39117 diff --git a/data/2025/2504_04xxx/2504.04310/images/c84d31b4f0c52d71f3595153fe6a7cd0caf6c2bafccd0acab9a1f99cfdf1decd.jpg b/data/2025/2504_04xxx/2504.04310/images/c84d31b4f0c52d71f3595153fe6a7cd0caf6c2bafccd0acab9a1f99cfdf1decd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..60fb8304abb113d25536d9c55fb5956254f31ba5 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/c84d31b4f0c52d71f3595153fe6a7cd0caf6c2bafccd0acab9a1f99cfdf1decd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cacef24499d230b731b17aa62846540da0081b0644378eb3a7ef8bb4bab7592 +size 38479 diff --git a/data/2025/2504_04xxx/2504.04310/images/c930fd160520eca9fcd102ed9b95b22f35ef4831d26d034c735305a1a74f8924.jpg b/data/2025/2504_04xxx/2504.04310/images/c930fd160520eca9fcd102ed9b95b22f35ef4831d26d034c735305a1a74f8924.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05425c431cd8eaa78e41793f6691a8db505b67d3 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/c930fd160520eca9fcd102ed9b95b22f35ef4831d26d034c735305a1a74f8924.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04b4a5342ce30e2dd5ecb4271f595e269a6970baece86a60689c3e9df12b854e +size 38006 diff --git a/data/2025/2504_04xxx/2504.04310/images/d4d0b3bfe3fa404ab607b2ba34a1b69a65b540411a4cc6457131ef71821b803f.jpg b/data/2025/2504_04xxx/2504.04310/images/d4d0b3bfe3fa404ab607b2ba34a1b69a65b540411a4cc6457131ef71821b803f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..763517e429ccc7c4e43e77a40ec1a79685bae197 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/d4d0b3bfe3fa404ab607b2ba34a1b69a65b540411a4cc6457131ef71821b803f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d352dd16b500944f504eb5c37e1c4082b94b1c773761c8072231c3123ec8604 +size 34764 diff --git a/data/2025/2504_04xxx/2504.04310/images/df632d4e73c3be9bc14ac0b13f0075642385e842b8c38ae2fbcc4eed3552d4e3.jpg b/data/2025/2504_04xxx/2504.04310/images/df632d4e73c3be9bc14ac0b13f0075642385e842b8c38ae2fbcc4eed3552d4e3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b546948758d421aded14f3c4ce4c34062c058a12 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/df632d4e73c3be9bc14ac0b13f0075642385e842b8c38ae2fbcc4eed3552d4e3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b469bd8e27d135ac0ec50e2f98a21d27fb0063ea8f14bae622cf2ab695db79b9 +size 38280 diff --git a/data/2025/2504_04xxx/2504.04310/images/e211d9279db57498735684778c25b26ba91157f8392851900afa6babc3a600a8.jpg b/data/2025/2504_04xxx/2504.04310/images/e211d9279db57498735684778c25b26ba91157f8392851900afa6babc3a600a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa09e06ca4ad1e3bc43a506e923cbf4b19f73c34 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/e211d9279db57498735684778c25b26ba91157f8392851900afa6babc3a600a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:287c6b9b04e92c5519922cceebdf8d3e39c2823ff3237e833317270755d31399 +size 36277 diff --git a/data/2025/2504_04xxx/2504.04310/images/e28e2f58b1de465d6a56fd4bf5fc752436137e04e2ed0ea6c5ad22e5eae41853.jpg b/data/2025/2504_04xxx/2504.04310/images/e28e2f58b1de465d6a56fd4bf5fc752436137e04e2ed0ea6c5ad22e5eae41853.jpg new file mode 100644 index 0000000000000000000000000000000000000000..acde0f9e401330d05933b3a76206f5dd66531d87 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/e28e2f58b1de465d6a56fd4bf5fc752436137e04e2ed0ea6c5ad22e5eae41853.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c1a81de83cac8627c755cb5321bbdc40a2b07c272778c2a2f76753d03d6c823 +size 185350 diff --git a/data/2025/2504_04xxx/2504.04310/images/ee8ea23f151b8868e90fbe35bf9062dedafc6ecd15e07eb3ea88335107dfbc49.jpg b/data/2025/2504_04xxx/2504.04310/images/ee8ea23f151b8868e90fbe35bf9062dedafc6ecd15e07eb3ea88335107dfbc49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc53ed37b1844786b08feb2f0fccdac0ae195a04 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/ee8ea23f151b8868e90fbe35bf9062dedafc6ecd15e07eb3ea88335107dfbc49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d31cf1a2bd24852ad26945482ee55b889c793316c1e790dca719a667a4b3eb00 +size 39192 diff --git a/data/2025/2504_04xxx/2504.04310/images/fd302b72b8f21038db0e5d0441bbdf8d9a3865c61975063a5326b4986fc88f41.jpg b/data/2025/2504_04xxx/2504.04310/images/fd302b72b8f21038db0e5d0441bbdf8d9a3865c61975063a5326b4986fc88f41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a1b5f5307aeab7165ae6d8b30a31f3c4b2b59f1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/fd302b72b8f21038db0e5d0441bbdf8d9a3865c61975063a5326b4986fc88f41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b46533fb6080ca59612e8b264cd43ad0f331d27d8956a15f5b4450acb49dce62 +size 38585 diff --git a/data/2025/2504_04xxx/2504.04310/images/fe4667fbe3580a51f08d32c73efb6eb488c3c0a67f92e2d3bfb34c39414e02f0.jpg b/data/2025/2504_04xxx/2504.04310/images/fe4667fbe3580a51f08d32c73efb6eb488c3c0a67f92e2d3bfb34c39414e02f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..daaaa283b6ddcc4698cc8a028a58195909aee4e9 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/images/fe4667fbe3580a51f08d32c73efb6eb488c3c0a67f92e2d3bfb34c39414e02f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c57329bc0464e386684653e25d648e321ef212fa0bacfb5f86be54d4451c8db3 +size 4759 diff --git a/data/2025/2504_04xxx/2504.04310/layout.json b/data/2025/2504_04xxx/2504.04310/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..89462d12ab85149e89b8f4c5acf17e7a4ac7a195 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04310/layout.json @@ -0,0 +1,15597 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 138, + 95, + 474, + 130 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 95, + 474, + 130 + ], + "spans": [ + { + "bbox": [ + 138, + 95, + 474, + 130 + ], + "type": "text", + "content": "CO-Bench: Benchmarking Language Model Agents in Algorithm Search for Combinatorial Optimization" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 156, + 140, + 459, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 140, + 459, + 156 + ], + "spans": [ + { + "bbox": [ + 156, + 140, + 459, + 156 + ], + "type": "text", + "content": "Weiwei Sun* Shengyu Feng* Shanda Li Yiming Yang" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 248, + 159, + 362, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 159, + 362, + 171 + ], + "spans": [ + { + "bbox": [ + 248, + 159, + 362, + 171 + ], + "type": "text", + "content": "Carnegie Mellon University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 172, + 171, + 436, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 171, + 436, + 182 + ], + "spans": [ + { + "bbox": [ + 172, + 171, + 436, + 182 + ], + "type": "text", + "content": "{weiweis, shengyuf, shandal, yiming}@cs.cmu.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 152, + 217, + 192, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 217, + 192, + 227 + ], + "spans": [ + { + "bbox": [ + 152, + 217, + 192, + 227 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 233, + 284, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 233, + 284, + 404 + ], + "spans": [ + { + "bbox": [ + 60, + 233, + 284, + 404 + ], + "type": "text", + "content": "Although LLM-based agents have attracted significant attention in domains such as software engineering and machine learning research, their role in advancing combinatorial optimization (CO) remains relatively underexplored. This gap underscores the need for a deeper understanding of their potential in tackling structured, constraint-intensive problems—a pursuit currently limited by the absence of comprehensive benchmarks for systematic investigation. To address this, we introduce CO-Bench, a benchmark suite featuring 36 real-world CO problems drawn from a broad range of domains and complexity levels. CO-Bench includes structured problem formulations and curated data to support rigorous investigation of LLM agents. We evaluate multiple agentic frameworks against established human-designed algorithms, revealing the strengths and limitations of existing LLM agents and identifying promising directions for future research. CO-Bench is publicly available at https://github.com/sunnweiwei/CO-Bench." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 418, + 206, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 418, + 206, + 430 + ], + "spans": [ + { + "bbox": [ + 138, + 418, + 206, + 430 + ], + "type": "text", + "content": "Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 434, + 295, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 434, + 295, + 566 + ], + "spans": [ + { + "bbox": [ + 50, + 434, + 295, + 566 + ], + "type": "text", + "content": "Combinatorial Optimization (CO) is a foundational problem class in computer science and operation research, focused on finding optimal solutions in discrete, structured, and constraint-rich domains. It underpins a wide range of real-world applications, including logistics (Vogiatzis and Pardalos 2013), production planning (Crama 1997), bioinformatics (Gusfield 1997), etc. Many CO problems are computationally intractable and classified as NP-hard, making exact solutions impractical at scale. As a result, developing effective algorithms often demands significant domain expertise and manual effort—posing a long-standing challenge in both academic research and industrial applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 565, + 295, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 565, + 295, + 687 + ], + "spans": [ + { + "bbox": [ + 50, + 565, + 295, + 687 + ], + "type": "text", + "content": "Recent advances in Large Language Models (LLMs) (OpenAI 2024b; DeepSeek-AI 2025a) have positioned LLM-based agents as increasingly promising tools for a variety of prediction and decision-making tasks (Jimenez et al. 2023; Chan et al. 2024; Gottweis et al. 2025). In particular, there is growing interest in applying LLMs to CO problems. Initial investigations have largely focused on solution correctness, evaluated on small-scale test instances (Ramamonjison et al. 2023; Yang et al. 2025a; Xiao et al. 2024a), and are often geared towards solving problems posed by general users. More recent works have begun to explore autonomous LLMs as a new approach." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 318, + 214, + 557, + 365 + ], + "blocks": [ + { + "bbox": [ + 318, + 214, + 557, + 365 + ], + "lines": [ + { + "bbox": [ + 318, + 214, + 557, + 365 + ], + "spans": [ + { + "bbox": [ + 318, + 214, + 557, + 365 + ], + "type": "image", + "image_path": "86195873cb96386557e499b2d62386ffe100f9c92ca0b6fd4133962c06d1ff1b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 373, + 560, + 420 + ], + "lines": [ + { + "bbox": [ + 315, + 373, + 560, + 420 + ], + "spans": [ + { + "bbox": [ + 315, + 373, + 560, + 420 + ], + "type": "text", + "content": "Figure 1: Overview of CO-Bench. CO-Bench includes 36 problems from 8 categories, and aims to evaluate LLM agents' ability to develop effective and efficient algorithms for solving real-world combinatorial optimization problems." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 440, + 560, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 440, + 560, + 540 + ], + "spans": [ + { + "bbox": [ + 315, + 440, + 560, + 540 + ], + "type": "text", + "content": "agents capable of conducting research and designing more efficient algorithms for complex scientific and industrial challenges. For example, FunSearch (Romera-Paredes et al. 2023) combines LLM prompting with evolutionary search to discover heuristics that outperform human-designed counterparts in the Cap Set and Bin Packing problems. Subsequent methods (Liu et al. 2024; Ye et al. 2024; Novikov et al. 2025) further improve computational efficiency and broaden applicability to domains such as routing and scheduling." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 540, + 560, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 540, + 560, + 627 + ], + "spans": [ + { + "bbox": [ + 315, + 540, + 560, + 627 + ], + "type": "text", + "content": "Despite these advancements, most existing efforts focus on narrow components (e.g., priority functions) within established algorithms, across a limited set of tasks (typically 4-7 problems), and often rely on heavily handcrafted, problem-specific prompts and templates (Romera-Paredes et al. 2023; Ye et al. 2024). Furthermore, there remains a lack of systematic evaluation of how these agents perform across a broader and more diverse collection of real-world CO problems." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 627, + 560, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 627, + 560, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 627, + 560, + 704 + ], + "type": "text", + "content": "To address this gap, we introduce CO-Bench, a comprehensive benchmark designed to evaluate LLM agents in the context of efficient CO algorithm development. CO-Bench comprises real-world CO problems spanning a wide range of domains and complexities. Figure 1 illustrates the problem categories and examples, while Table 1 compares CO-Bench with existing CO benchmarks. Compared to prior bench" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 219, + 37, + 574 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 219, + 37, + 574 + ], + "spans": [ + { + "bbox": [ + 14, + 219, + 37, + 574 + ], + "type": "text", + "content": "arXiv:2504.04310v3 [cs.CL] 22 Aug 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 64, + 693, + 194, + 704 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 693, + 194, + 704 + ], + "spans": [ + { + "bbox": [ + 64, + 693, + 194, + 704 + ], + "type": "text", + "content": "*These authors contributed equally." + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 54, + 294, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 54, + 294, + 163 + ], + "spans": [ + { + "bbox": [ + 50, + 54, + 294, + 163 + ], + "type": "text", + "content": "marks, CO-Bench offers broader problem coverage, and emphasizes end-to-end evaluation of LLM-based research agents, focusing on their ability to design efficient, potentially novel algorithms from abstract problem descriptions. This design enables reproducible and scalable evaluation of agent performance, including comparisons with human-designed classical CO solver under equivalent time constraints. By doing so, CO-Bench introduces new challenges for LLM agent development, such as the discovery of algorithms that extend beyond current human knowledge of CO." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 164, + 294, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 164, + 294, + 339 + ], + "spans": [ + { + "bbox": [ + 50, + 164, + 294, + 339 + ], + "type": "text", + "content": "Using CO-Bench, we benchmark 15 LLMs and 9 agentic frameworks, comparing their performances against both human-designed classical algorithms and the best-known solutions reported in the literature. Our results show that reasoning models (e.g., o3-mini and Claude-3.7-sonnet) consistently outperform standard no-reasoning LLMs. When integrated into agentic frameworks like FunSearch, LLMs further improve through trial-and-error exploration. Notably, on 25 problems, LLM-generated algorithms outperformed classical solvers, and on 3 problems, they surpassed the best-known solutions. However, our analysis also reveals current limitations, such as limited algorithmic novelty and insufficient handling of feasibility constraints. These findings highlight both the promise and challenges of LLM-driven research in CO and suggest key directions for advancing autonomous algorithm design." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 60, + 339, + 294, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 339, + 294, + 351 + ], + "spans": [ + { + "bbox": [ + 60, + 339, + 294, + 351 + ], + "type": "text", + "content": "In summary, this paper makes the following contributions:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 354, + 294, + 464 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 50, + 354, + 294, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 354, + 294, + 396 + ], + "spans": [ + { + "bbox": [ + 50, + 354, + 294, + 396 + ], + "type": "text", + "content": "(i) We introduce CO-Bench, the first comprehensive benchmark to evaluate the capability of LLMs to develop algorithms for diverse and challenging real-world CO problems" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 399, + 294, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 399, + 294, + 464 + ], + "spans": [ + { + "bbox": [ + 47, + 399, + 294, + 464 + ], + "type": "text", + "content": "(ii) We benchmark 15 LLMs and 9 agentic frameworks, analyzing their performance relative to expert-designed pipelines. Our results highlight the strengths of agent-generated algorithms, while also revealing limitations in planning, feasibility checking, and the generation of efficient solution." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 140, + 475, + 205, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 475, + 205, + 488 + ], + "spans": [ + { + "bbox": [ + 140, + 475, + 205, + 488 + ], + "type": "text", + "content": "Preliminary" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 491, + 187, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 491, + 187, + 502 + ], + "spans": [ + { + "bbox": [ + 51, + 491, + 187, + 502 + ], + "type": "text", + "content": "Combinatorial Optimization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 504, + 294, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 504, + 294, + 559 + ], + "spans": [ + { + "bbox": [ + 50, + 504, + 294, + 559 + ], + "type": "text", + "content": "For each CO problem " + }, + { + "bbox": [ + 50, + 504, + 294, + 559 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 50, + 504, + 294, + 559 + ], + "type": "text", + "content": " (for example, Traveling salesman problem), we follow Papadimitriou and Steiglitz (1982) to formulate it as a constrained optimization problem in the discrete space. Consider an instance " + }, + { + "bbox": [ + 50, + 504, + 294, + 559 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 50, + 504, + 294, + 559 + ], + "type": "text", + "content": ", the optimization problem could be expressed as" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 563, + 293, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 563, + 293, + 582 + ], + "spans": [ + { + "bbox": [ + 116, + 563, + 293, + 582 + ], + "type": "interline_equation", + "content": "\\min _ {x \\in S _ {c} (p)} f _ {c} (x; p) + g _ {c} (x; p), \\tag {1}", + "image_path": "6a363556df56e4fca88eec4a78e5664c1795ce728eecf49f793d6131a7ad5c6b.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "spans": [ + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "S_{c}(p)" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " represents the solution space, e.g., " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "\\mathbf{Z}^{m} \\times \\mathbb{R}^{n}" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " discrete variables and " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " continuous variables, " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "f_{c}(x;p)" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " corresponds to the objective function, and " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "g_{c}(x;p)" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " stands for the constraint violation, which is 0 for feasible solutions and " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "+\\infty" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " otherwise. To avoid the clutter, we simply denote " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "h_c(x;p) = f_c(x;p) + g_c(x;p)" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " in the following text and omit " + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 50, + 586, + 293, + 662 + ], + "type": "text", + "content": " if the context is clear." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 663, + 294, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 663, + 294, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 663, + 294, + 685 + ], + "type": "text", + "content": "Given an algorithm set " + }, + { + "bbox": [ + 50, + 663, + 294, + 685 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 50, + 663, + 294, + 685 + ], + "type": "text", + "content": " and a problem instance distribution " + }, + { + "bbox": [ + 50, + 663, + 294, + 685 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 50, + 663, + 294, + 685 + ], + "type": "text", + "content": ", the algorithm search problem is defined as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 689, + 293, + 707 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 689, + 293, + 707 + ], + "spans": [ + { + "bbox": [ + 115, + 689, + 293, + 707 + ], + "type": "interline_equation", + "content": "\\min _ {A \\in \\mathcal {A}} \\mathbb {E} _ {p \\sim D, x \\sim A (p)} [ h (x; p) ]. \\tag {2}", + "image_path": "a931bdbcd976d191d7b7b7449bb0726e814a513e79cb858b40c552dba00b0366.jpg" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 317, + 52, + 559, + 159 + ], + "blocks": [ + { + "bbox": [ + 317, + 52, + 559, + 159 + ], + "lines": [ + { + "bbox": [ + 317, + 52, + 559, + 159 + ], + "spans": [ + { + "bbox": [ + 317, + 52, + 559, + 159 + ], + "type": "table", + "html": "
DatasetAlgorithm DevProblem NumInstance NumLargest Variables
NPHardEvalX990024
NL4OPTX52893
OptiBenchX460518
ComplexORX201009
ReEvo75971,000
CO-Bench366,48211,000
", + "image_path": "4648f1b0a565d32fb21802117ec4365be6437a9799aec287ad8b5c78c80d0ed6.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 165, + 559, + 221 + ], + "lines": [ + { + "bbox": [ + 315, + 165, + 559, + 221 + ], + "spans": [ + { + "bbox": [ + 315, + 165, + 559, + 221 + ], + "type": "text", + "content": "Table 1: Data statistics for CO-Bench and related CO benchmarks, including the indicator for algorithm development support, the number of problem types, the number of test-set problem instances, and the largest number of test-set variables (e.g., the number of nodes in the largest graph)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "spans": [ + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "text", + "content": "In contrast to previous neural CO solvers (Bengio, Lodi, and Prouvost 2020) that directly parameterize " + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "text", + "content": " with a neural network, we focus on symbolic searching space where " + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "text", + "content": " consists of all algorithms that could be represented by a Python Program, with a maximum number of " + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "text", + "content": " tokens, where " + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "text", + "content": " is typically decided by the output length limit of an LLM. Considering the popularity of randomized algorithms (Motwani and Raghavan 2013) for CO, we treat the output of an algorithm " + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "inline_equation", + "content": "A(p)" + }, + { + "bbox": [ + 315, + 243, + 559, + 354 + ], + "type": "text", + "content": " as a distribution of solutions, while deterministic algorithms would correspond to the point distributions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "spans": [ + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "text", + "content": "The main endeavor of this work is focused on the shaping of the algorithm set " + }, + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "text", + "content": ", the curation of the data distribution " + }, + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "text", + "content": " and the definition of " + }, + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 316, + 354, + 558, + 387 + ], + "type": "text", + "content": " on our collected CO problems." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 317, + 396, + 379, + 409 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 396, + 379, + 409 + ], + "spans": [ + { + "bbox": [ + 317, + 396, + 379, + 409 + ], + "type": "text", + "content": "LLM Agents" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 411, + 559, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 411, + 559, + 434 + ], + "spans": [ + { + "bbox": [ + 316, + 411, + 559, + 434 + ], + "type": "text", + "content": "Given a CO problem " + }, + { + "bbox": [ + 316, + 411, + 559, + 434 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 316, + 411, + 559, + 434 + ], + "type": "text", + "content": ", a candidate algorithm could be generated by an LLM as" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 391, + 441, + 558, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 441, + 558, + 453 + ], + "spans": [ + { + "bbox": [ + 391, + 441, + 558, + 453 + ], + "type": "interline_equation", + "content": "A \\sim M (\\text {t e x t i f y} (c); \\theta), \\tag {3}", + "image_path": "9ae20afe1f24c38f159c8b9861480391efd656ea484355a43d911d4dd75427c8.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 459, + 559, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 459, + 559, + 536 + ], + "spans": [ + { + "bbox": [ + 315, + 459, + 559, + 536 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 315, + 459, + 559, + 536 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 315, + 459, + 559, + 536 + ], + "type": "text", + "content": " denotes an LLM with parameters " + }, + { + "bbox": [ + 315, + 459, + 559, + 536 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 315, + 459, + 559, + 536 + ], + "type": "text", + "content": ". However, one-time generation usually leads to infeasible code or suboptimal algorithms (Madaan et al. 2023), and agentic frameworks address this by enabling iterative refinement through structured interactions with external tools (e.g., a coding environment). Formally, an agent performs reasoning-action iterations (Yao et al. 2022):" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 367, + 543, + 559, + 555 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 543, + 559, + 555 + ], + "spans": [ + { + "bbox": [ + 367, + 543, + 559, + 555 + ], + "type": "interline_equation", + "content": "r _ {t + 1} \\sim M \\left(\\operatorname {t e x t i f y} _ {r} \\left(c, A _ {t}, H _ {t}\\right); \\theta\\right), \\tag {4}", + "image_path": "fe4667fbe3580a51f08d32c73efb6eb488c3c0a67f92e2d3bfb34c39414e02f0.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 368, + 557, + 559, + 570 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 368, + 557, + 559, + 570 + ], + "spans": [ + { + "bbox": [ + 368, + 557, + 559, + 570 + ], + "type": "interline_equation", + "content": "a _ {t + 1} \\sim M \\left(\\text {t e x t i f y} _ {a} \\left(r _ {t + 1}, H _ {t}\\right); \\theta\\right), \\tag {5}", + "image_path": "4341a2ba49992c4270bf3c7f14378904a53caf1070112d94ff94c83834258545.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "spans": [ + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "inline_equation", + "content": "r_t" + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "text", + "content": " is the reasoning step, " + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "inline_equation", + "content": "a_t" + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "text", + "content": " is the action step (e.g., executing code, evaluating results), and " + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "inline_equation", + "content": "H_t = (r_i, a_i, \\text{result}(a_i))_{i=1}^{t-1}" + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "text", + "content": " maintains the interaction history. Thus, an LLM agent is formally defined as an LLM " + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "inline_equation", + "content": "M(\\cdot; \\theta)" + }, + { + "bbox": [ + 315, + 576, + 559, + 644 + ], + "type": "text", + "content": " guided by a structured workflow specifying iterative external interactions to enhance its outputs." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 409, + 654, + 466, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 409, + 654, + 466, + 666 + ], + "spans": [ + { + "bbox": [ + 409, + 654, + 466, + 666 + ], + "type": "text", + "content": "CO-Bench" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 670, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 670, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 670, + 559, + 704 + ], + "type": "text", + "content": "We introduce CO-Bench, a comprehensive benchmark designed to evaluate the algorithm development ability of LLM agents on combinatorial optimization (CO) problems. The" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 54, + 294, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 54, + 294, + 122 + ], + "spans": [ + { + "bbox": [ + 50, + 54, + 294, + 122 + ], + "type": "text", + "content": "benchmark consists of 36 problems mainly sourced from OR-Library (Beasley 1990), an established archive containing datasets accumulated by researchers across over 30 years of operations research. These problems span a wide range of realistic CO challenges in academia and industrial applications." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 131, + 123, + 142 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 131, + 123, + 142 + ], + "spans": [ + { + "bbox": [ + 51, + 131, + 123, + 142 + ], + "type": "text", + "content": "Data Curation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 147, + 294, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 147, + 294, + 181 + ], + "spans": [ + { + "bbox": [ + 50, + 147, + 294, + 181 + ], + "type": "text", + "content": "Problem Selection We first perform rigorous filtering and cleaning, and select 36 CO problems that cover diverse domains and complexities, including:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 63, + 184, + 293, + 557 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 63, + 184, + 293, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 184, + 293, + 265 + ], + "spans": [ + { + "bbox": [ + 63, + 184, + 293, + 265 + ], + "type": "text", + "content": "- Packing problems: Bin packing (Falkenauer 1996), Multi-Demand Multidimensional Knapsack problem (Cappanera and Trubian 2001), Multidimensional knapsack problem (Petersen 1967), Container loading (Bischoff and Ratcliff 1995; Ivancic 1988), Container loading with weight restrictions (Ratcliff and Bischoff 1998; Bischoff 2006), Packing unequal circles (López and Beasley 2016), Packing unequal rectangles and squares number / area (López and Beasley 2018)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 63, + 267, + 293, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 267, + 293, + 308 + ], + "spans": [ + { + "bbox": [ + 63, + 267, + 293, + 308 + ], + "type": "text", + "content": "- Cutting problems: Assortment problem (Beasley 1985a), Constrained / unconstrained guillotine cutting (Christofides and Whitlock 1977; Beasley 1985b), Constrained non-guillotine cutting (Beasley 1985c, 2004)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 63, + 311, + 293, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 311, + 293, + 350 + ], + "spans": [ + { + "bbox": [ + 63, + 311, + 293, + 350 + ], + "type": "text", + "content": "- Facility location problems: Capacitated / Uncapacitated warehouse location (Beasley 1988, 1993), Capacitated / Uncapacitated p-median problem (Beasley 1985d; Osman and Christofides 1994)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 354, + 293, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 354, + 293, + 415 + ], + "spans": [ + { + "bbox": [ + 63, + 354, + 293, + 415 + ], + "type": "text", + "content": "- Scheduling problems: Aircraft landing (Beasley et al. 2000, 2004), Crew scheduling (Beasley and Cao 1996), Common due date scheduling (Biskup and Feldmann 2001), Flow shop scheduling (Taillard 1993), Hybrid Reentrant Shop Scheduling (Chakhlevitch and Glass 2009), Job shop scheduling (Taillard 1993), Open shop scheduling (Taillard 1993)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 417, + 293, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 417, + 293, + 457 + ], + "spans": [ + { + "bbox": [ + 63, + 417, + 293, + 457 + ], + "type": "text", + "content": "- Routing problems: Traveling salesman problem (Laporte 1992), Period vehicle routing problem (Christofides and Beasley 1984), Resource constrained shortest path (Beasley and Christofides 1989)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 63, + 460, + 293, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 460, + 293, + 480 + ], + "spans": [ + { + "bbox": [ + 63, + 460, + 293, + 480 + ], + "type": "text", + "content": "- Assignment problems: Constrained / unconstrained assignment (Osman 1995; and 1990)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 63, + 483, + 293, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 483, + 293, + 505 + ], + "spans": [ + { + "bbox": [ + 63, + 483, + 293, + 505 + ], + "type": "text", + "content": "- Tree problems: Euclidean Steiner (Beasley 1992), Corporate structuring (Anken and Beasley 2012)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 63, + 507, + 293, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 507, + 293, + 557 + ], + "spans": [ + { + "bbox": [ + 63, + 507, + 293, + 557 + ], + "type": "text", + "content": "- Graph and set problems: Maximal Independent Set (Erdos and Renyi 1984), Graph colouring (Fleurent and Ferland 1996), Equitable partitioning (Mingers and O'Brien 1995), Set partitioning (Chu and Beasley 1998), Set covering (Beasley and Jornsten 1992)." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 564, + 294, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 564, + 294, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 564, + 294, + 675 + ], + "type": "text", + "content": "Data Annotation For each problem, we manually annotate the following components: (1) Problem description: a formal definition of the optimization problem in natural language, accompanied by a clearly specified solve function as the starter code; (2) Data loading function: a load_data function to load and preprocess raw data from the test files; (3) Evaluation function: an eval_func function that rigorously and robustly evaluates the quality of a solution. Additionally, each problem comprises a development set and a test set, each containing several problem instances." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 681, + 294, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 681, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 51, + 681, + 294, + 704 + ], + "type": "text", + "content": "Evaluation Framework We develop a rigorous and efficient evaluation framework to assess the performance of" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 54, + 560, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 54, + 560, + 198 + ], + "spans": [ + { + "bbox": [ + 315, + 54, + 560, + 198 + ], + "type": "text", + "content": "LLM agents in simulated, time-constrained competition scenarios (Chan et al. 2024). Specifically, LLM agents operate within a sandbox environment with access to a Linux machine. For each problem, agents are provided with a problem description, development datasets, and an API endpoint for submitting their solutions (i.e. codebases) to receive evaluation feedback. An independent evaluation system, which is protected by built-in safeguards, scores the submitted solutions on the development set in parallel. After a limited number of research steps, the agent submits its final solution for evaluation on the test set. During the agent development process, both eval_func and test data are invisible. Figure 2 shows the evaluation pipeline in CO-Bench." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 202, + 560, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 202, + 560, + 324 + ], + "spans": [ + { + "bbox": [ + 315, + 202, + 560, + 324 + ], + "type": "text", + "content": "Designing Classical Solver Baselines To investigate how existing LLM agents perform compared to classical solvers, we establish a classical solver baseline. Specifically, the authors of this paper—who have extensive experience in related areas and are familiar with the problems in CO-Bench—spent approximately 30 minutes per problem testing and selecting the most effective classical solvers (e.g., LKH for TSP, CPLEX for scheduling, Gurobi for MIS) and tuning their hyperparameters on the development set. This process ensures that the classical solver baseline is well-tuned and competitive for each problem in CO-Bench." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 333, + 410, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 333, + 410, + 344 + ], + "spans": [ + { + "bbox": [ + 316, + 333, + 410, + 344 + ], + "type": "text", + "content": "Evaluation Metrics" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 347, + 558, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 347, + 558, + 393 + ], + "spans": [ + { + "bbox": [ + 315, + 347, + 558, + 393 + ], + "type": "text", + "content": "Avg Score The main evaluation metric is similar to the Primal Gap (Berthold 2006), defined as the normalized score of the primal bound " + }, + { + "bbox": [ + 315, + 347, + 558, + 393 + ], + "type": "inline_equation", + "content": "h(x; p)" + }, + { + "bbox": [ + 315, + 347, + 558, + 393 + ], + "type": "text", + "content": " against a pre-computed optimal (or best-known) objective value " + }, + { + "bbox": [ + 315, + 347, + 558, + 393 + ], + "type": "inline_equation", + "content": "h_p^*" + }, + { + "bbox": [ + 315, + 347, + 558, + 393 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 370, + 399, + 558, + 428 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 399, + 558, + 428 + ], + "spans": [ + { + "bbox": [ + 370, + 399, + 558, + 428 + ], + "type": "interline_equation", + "content": "s (x, p) = \\frac {\\operatorname* {m i n} \\left\\{\\left| h (x , p) \\right| , \\left| h _ {p} ^ {*} \\right| \\right\\}}{\\operatorname* {m a x} \\left\\{\\left| h (x , p) \\right| , \\left| h _ {p} ^ {*} \\right| \\right\\}}, \\tag {6}", + "image_path": "b2feb66ee1886e670c2071e49415cdb8656f698e6cd445b6255dfb5c2a08a973.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 433, + 559, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 433, + 559, + 511 + ], + "spans": [ + { + "bbox": [ + 315, + 433, + 559, + 511 + ], + "type": "text", + "content": "A higher value indicates better performance and a score of 1 signifies the performance identical to the optimal or best-known solution. Program errors or infeasible solutions lead to a score of 0.0. The score of a solver on a given problem is computed by averaging its scores across all test instances. The overall benchmark score is then obtained by averaging these problem-level scores across all 36 problems." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 516, + 558, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 516, + 558, + 592 + ], + "spans": [ + { + "bbox": [ + 315, + 516, + 558, + 592 + ], + "type": "text", + "content": "Valid Solution We compute the percentage of problems for which the generated code is correct on all test instances. Any raised error—such as constraint violation or timeout—is treated as an invalid signal. If any test instance for a given problem results in an invalid signal, the entire solution for that problem is considered invalid, even if it produces valid results on other test instances." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 598, + 559, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 598, + 559, + 632 + ], + "spans": [ + { + "bbox": [ + 315, + 598, + 559, + 632 + ], + "type": "text", + "content": "Above Classical Given the performance of classical solver, we calculate the portion of problems where the model outperforms the classical solver baseline." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 637, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 637, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 637, + 559, + 704 + ], + "type": "text", + "content": "Survival Rate The survival rate measures that, for each problem, the percentage of test instances where the model's solution is above " + }, + { + "bbox": [ + 315, + 637, + 559, + 704 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 315, + 637, + 559, + 704 + ], + "type": "text", + "content": " of the reference score (reported optimal or best-known solution from literature). This serve as a challenge metric as the model can only get credit when it is very close or better than previous-best algorithm." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 52, + 534, + 189 + ], + "blocks": [ + { + "bbox": [ + 78, + 52, + 534, + 189 + ], + "lines": [ + { + "bbox": [ + 78, + 52, + 534, + 189 + ], + "spans": [ + { + "bbox": [ + 78, + 52, + 534, + 189 + ], + "type": "image", + "image_path": "a9a3f71437619cfb208811e67ebfaeb37a592dd218250bc67a376effc29926bd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 196, + 560, + 232 + ], + "lines": [ + { + "bbox": [ + 50, + 196, + 560, + 232 + ], + "spans": [ + { + "bbox": [ + 50, + 196, + 560, + 232 + ], + "type": "text", + "content": "Figure 2: CO-Bench is an evaluation environment for AI agents. Each problem has an associated description and a development dataset. Following the setup in Chan et al. (2024), the agent-generated code implements an algorithm design, which is further graded and compared against the best-known solution and human expert solution." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 250, + 224, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 250, + 224, + 264 + ], + "spans": [ + { + "bbox": [ + 119, + 250, + 224, + 264 + ], + "type": "text", + "content": "Experimental Setup" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 265, + 164, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 265, + 164, + 276 + ], + "spans": [ + { + "bbox": [ + 51, + 265, + 164, + 276 + ], + "type": "text", + "content": "Benchmarked Methods" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 278, + 294, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 278, + 294, + 313 + ], + "spans": [ + { + "bbox": [ + 50, + 278, + 294, + 313 + ], + "type": "text", + "content": "On CO-Bench, we evaluate various LLMs combined with different agentic frameworks, and compare them with existing human-designed CO solvers." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 316, + 295, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 316, + 295, + 427 + ], + "spans": [ + { + "bbox": [ + 50, + 316, + 295, + 427 + ], + "type": "text", + "content": "LLMs We conduct experiments on 5 open-source models and 10 proprietary models. These include instruction-tuned models such as Llama-3.3-70B-Instruct (Meta 2024), Qwen-2.5-Code-32B-Instruct (Hui et al. 2024), DeepSeek-V3 (DeepSeek-AI 2024), and GPT-4o (OpenAI 2024a), as well as frontier reasoning models, including o3-mini (OpenAI 2025), Claude-3.7-Sonnet-Thinking (Anthropic 2025), DeepSeek-R1 (DeepSeek-AI 2025b), Grok-3-Thinking (xAI 2025), QwQ-32B (Qwen 2025), and Gemini 2.5 Pro (DeepMind 2025)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 431, + 295, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 431, + 295, + 498 + ], + "spans": [ + { + "bbox": [ + 50, + 431, + 295, + 498 + ], + "type": "text", + "content": "Agentic frameworks For the aforementioned LLMs, we apply various agentic frameworks to evaluate their performance across different strategies. These range from simple approaches, such as direct generation, to more sophisticated frameworks that augment LLM with additional tools, workflows, and test-time compute:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 499, + 295, + 704 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 55, + 499, + 295, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 499, + 295, + 533 + ], + "spans": [ + { + "bbox": [ + 55, + 499, + 295, + 533 + ], + "type": "text", + "content": "- Direct Answer: The simplest approach, where the LLM directly generates a solution to the combinatorial optimization problem without further refinement." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 534, + 295, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 534, + 295, + 567 + ], + "spans": [ + { + "bbox": [ + 56, + 534, + 295, + 567 + ], + "type": "text", + "content": "- BestOfN Sampling (Chen et al. 2021): Generate " + }, + { + "bbox": [ + 56, + 534, + 295, + 567 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 56, + 534, + 295, + 567 + ], + "type": "text", + "content": " candidate solutions, evaluate each on a development set, and select the solution with the best performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 568, + 295, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 568, + 295, + 602 + ], + "spans": [ + { + "bbox": [ + 56, + 568, + 295, + 602 + ], + "type": "text", + "content": "- Chain of Experts (Xiao et al. 2024a): A multi-agent prompting framework where agents of different roles cooperate to debug and deliver one solution." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 603, + 295, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 603, + 295, + 659 + ], + "spans": [ + { + "bbox": [ + 56, + 603, + 295, + 659 + ], + "type": "text", + "content": "- Greedy Refinement (Shinn et al. 2023; Madaan et al. 2023): Iteratively prompt the LLM to refine the current best solution based on the evaluation results of the development set, repeating this refinement process for " + }, + { + "bbox": [ + 56, + 603, + 295, + 659 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 56, + 603, + 295, + 659 + ], + "type": "text", + "content": " steps." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 659, + 294, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 659, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 56, + 659, + 294, + 704 + ], + "type": "text", + "content": "- FunSearch (Romera-Paredes et al. 2023): Prompt the LLM to either draft a new solution or refine an existing one, followed by employing an evolutionary algorithm to iteratively select and improve candidate solutions." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 321, + 251, + 560, + 434 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 321, + 251, + 560, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 251, + 560, + 285 + ], + "spans": [ + { + "bbox": [ + 321, + 251, + 560, + 285 + ], + "type": "text", + "content": "- EoH (Liu et al. 2024): Evolve both thoughts and codes in an evolutionary search framework for generating high-performance heuristics." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 321, + 286, + 559, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 286, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 321, + 286, + 559, + 342 + ], + "type": "text", + "content": "- AIDE (Jiang et al. 2025): A representative method for machine learning engineering tasks, which stores existing solutions in a tree structure and selectively prompts the LLM to draft new solutions, debug or improve previously stored solutions." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 321, + 343, + 559, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 343, + 559, + 378 + ], + "spans": [ + { + "bbox": [ + 321, + 343, + 559, + 378 + ], + "type": "text", + "content": "- ReEvo (Ye et al. 2024): A recent evolutionary algorithm that incorporates short-term and long-term reflection modules, as well as a multi-agentic framework." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 321, + 379, + 558, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 379, + 558, + 434 + ], + "spans": [ + { + "bbox": [ + 321, + 379, + 558, + 434 + ], + "type": "text", + "content": "- MSTC-AHD (Zheng et al. 2025): A Monte Carlo Tree Search (MCTS)-based agentic pipeline that organizes all LLM-generated heuristics in a tree structure and uses the MCTS algorithm with progressive widening technique to guide the evolution of heuristics." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 316, + 446, + 429, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 446, + 429, + 459 + ], + "spans": [ + { + "bbox": [ + 316, + 446, + 429, + 459 + ], + "type": "text", + "content": "Implementation Details" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 460, + 559, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 460, + 559, + 516 + ], + "spans": [ + { + "bbox": [ + 315, + 460, + 559, + 516 + ], + "type": "text", + "content": "For benchmark evaluation, we limit the solving time of each test instance to 10 seconds on a single CPU, such that the exact solving of the problem (achieving the optimal solution) is impossible on most test instances. Test instances that result in a timeout or error receive a score of 0." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 516, + 559, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 516, + 559, + 659 + ], + "spans": [ + { + "bbox": [ + 315, + 516, + 559, + 659 + ], + "type": "text", + "content": "For agent implementation, we use o3-mini-medium as the default base model. Since the original implementations of these agents may use different evaluation setups, we adapt their approaches to our benchmark setting (i.e., end-to-end algorithm search) by adjusting the prompts and tools. For all agents, we set the number of iteration steps to 64. In each step, the agent generates a code block as a candidate algorithm and obtains its evaluation score on the development set. After 64 iterations, the agent produces 64 candidate algorithms, from which the best-performing solution on the development set is selected for final benchmark evaluation. All evaluations are conducted on a single CPU core of a dual AMD EPYC 7313 16-Core processor." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 667, + 382, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 667, + 382, + 678 + ], + "spans": [ + { + "bbox": [ + 316, + 667, + 382, + 678 + ], + "type": "text", + "content": "Main Results" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 681, + 559, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 681, + 559, + 705 + ], + "spans": [ + { + "bbox": [ + 315, + 681, + 559, + 705 + ], + "type": "text", + "content": "Figure 3 presents the results of LLMs and agents on the test set. We highlight the following key findings." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 52, + 558, + 411 + ], + "blocks": [ + { + "bbox": [ + 53, + 52, + 558, + 411 + ], + "lines": [ + { + "bbox": [ + 53, + 52, + 558, + 411 + ], + "spans": [ + { + "bbox": [ + 53, + 52, + 558, + 411 + ], + "type": "image", + "image_path": "e28e2f58b1de465d6a56fd4bf5fc752436137e04e2ed0ea6c5ad22e5eae41853.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 419, + 561, + 467 + ], + "lines": [ + { + "bbox": [ + 50, + 419, + 561, + 467 + ], + "spans": [ + { + "bbox": [ + 50, + 419, + 561, + 467 + ], + "type": "text", + "content": "Figure 3: Overall Performance. LLM Agents are all based on o3-mini-medium. Avg Score refers to the average normalized objective scores across all problems. Valid Solution indicates the percentage of test-set problems for which the solutions are feasible. Above Classical represents the percentage of test instances where the model outperforms the classical solver baseline. Survival Rate measures the percentage of test instances where the model's score exceeds " + }, + { + "bbox": [ + 50, + 419, + 561, + 467 + ], + "type": "inline_equation", + "content": "99\\%" + }, + { + "bbox": [ + 50, + 419, + 561, + 467 + ], + "type": "text", + "content": " of the reference score." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 486, + 295, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 486, + 295, + 587 + ], + "spans": [ + { + "bbox": [ + 50, + 486, + 295, + 587 + ], + "type": "text", + "content": "Direct generation performance is limited. LLMs show significantly lower average scores compared to the classical solver. They often fail to generate valid solutions (i.e., bug-free code that satisfies all constraints within the time limit), rarely outperform the classical solver on individual instances, and often fail to produce optimal solutions. Reasoning-capable models tend to perform better than nonreasoning ones. The best-performing LLM for one-shot generation is Claude-3.7 Sonnet, with an average score of 0.65." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 590, + 295, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 590, + 295, + 690 + ], + "spans": [ + { + "bbox": [ + 50, + 590, + 295, + 690 + ], + "type": "text", + "content": "Agentic systems substantially improve LLM performance. Compared to direct generation, the agentic pipeline achieves considerably higher scores across all metrics. Among the evaluated frameworks, FunSearch attains the highest average score of 0.842, outperforming the classical solver (0.797). It also surpasses the solver on over half the test instances (see \"Above Classical\" score) and achieves a higher survival rate. These results highlight the effectiveness of LLM-based agents in solving CO problems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 693, + 294, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 294, + 706 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 294, + 706 + ], + "type": "text", + "content": "Agent performance varies widely. Some advanced agentic" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 315, + 486, + 560, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 486, + 560, + 553 + ], + "spans": [ + { + "bbox": [ + 315, + 486, + 560, + 553 + ], + "type": "text", + "content": "frameworks, such as AIDE, underperform compared to simpler strategies like BestOfN on most metrics, though they show higher valid solution rates—possibly due to their debugging capabilities. This indicates that current planning mechanisms in agents are still underdeveloped and may not reliably outperform random sampling." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 315, + 553, + 560, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 553, + 560, + 609 + ], + "spans": [ + { + "bbox": [ + 315, + 553, + 560, + 609 + ], + "type": "text", + "content": "Valid solution rates still lag behind classical solvers. According to the Valid Solution metric, the best-performing agents achieve a success rate of 0.555—lower than that of the classical solver (0.611). This suggests that current agents often struggle with solution feasibility and reliability." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 620, + 424, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 620, + 424, + 633 + ], + "spans": [ + { + "bbox": [ + 316, + 620, + 424, + 633 + ], + "type": "text", + "content": "Agents Error Analysis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 315, + 637, + 560, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 637, + 560, + 705 + ], + "spans": [ + { + "bbox": [ + 315, + 637, + 560, + 705 + ], + "type": "text", + "content": "To investigate why the agents' valid solution scores are low, Figure 4 shows the types of errors among invalid solutions for five agents. We observe that code errors (i.e., bugs that prevent compilation) are the least frequent issue. The dominant error type varies across agents: Greedy Refine and ReEvo exhibit more constraint violations, while FunSearch, AIDE, and" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 50, + 534, + 159 + ], + "blocks": [ + { + "bbox": [ + 78, + 50, + 534, + 159 + ], + "lines": [ + { + "bbox": [ + 78, + 50, + 534, + 159 + ], + "spans": [ + { + "bbox": [ + 78, + 50, + 534, + 159 + ], + "type": "image", + "image_path": "761b895f5325873705d7f6de307e584bd678aed22c73aa6d947d3748d99f59e0.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 165, + 526, + 178 + ], + "lines": [ + { + "bbox": [ + 83, + 165, + 526, + 178 + ], + "spans": [ + { + "bbox": [ + 83, + 165, + 526, + 178 + ], + "type": "text", + "content": "Figure 4: Agents Error Analysis. Distribution of three types of errors among invalid solutions for five agents." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 54, + 198, + 291, + 355 + ], + "blocks": [ + { + "bbox": [ + 54, + 198, + 291, + 355 + ], + "lines": [ + { + "bbox": [ + 54, + 198, + 291, + 355 + ], + "spans": [ + { + "bbox": [ + 54, + 198, + 291, + 355 + ], + "type": "image", + "image_path": "85dd609a586c94b65a492ef0a650c2e384cd798f6f13c90be03fb5f20d969b8d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 365, + 293, + 388 + ], + "lines": [ + { + "bbox": [ + 50, + 365, + 293, + 388 + ], + "spans": [ + { + "bbox": [ + 50, + 365, + 293, + 388 + ], + "type": "text", + "content": "Figure 5: Avg Score vs. the number of iteration steps (in total 64 steps) during the algorithm development." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 411, + 294, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 411, + 294, + 445 + ], + "spans": [ + { + "bbox": [ + 50, + 411, + 294, + 445 + ], + "type": "text", + "content": "BoN encounter more timeout errors. This highlights agents' limitations in satisfying constraints and generating efficient algorithms within time limits." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 455, + 209, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 455, + 209, + 468 + ], + "spans": [ + { + "bbox": [ + 51, + 455, + 209, + 468 + ], + "type": "text", + "content": "Performance over Iteration Steps" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 472, + 293, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 472, + 293, + 528 + ], + "spans": [ + { + "bbox": [ + 50, + 472, + 293, + 528 + ], + "type": "text", + "content": "Figure 5 illustrates the performance of several representative LLM agents across different iteration steps. At each step, the agent generates a new algorithm and receives evaluation results on the development set. We also include the performance of the classical solver baseline for comparison." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 528, + 294, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 528, + 294, + 627 + ], + "spans": [ + { + "bbox": [ + 50, + 528, + 294, + 627 + ], + "type": "text", + "content": "All agents exhibit the ability to improve their performance with more iteration steps. FunSearch consistently achieves the best results, reaching a score of 0.8423 and converging after around 50 steps. Notably, both FunSearch and Refine discover algorithms that outperform the classical solver within approximately 10 steps. However, performance tends to saturate after 30 steps, with further search yielding diminishing returns. Enabling more consistent improvements under longer search budgets presents an interesting future direction." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 627, + 294, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 627, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 627, + 294, + 704 + ], + "type": "text", + "content": "Figure 6 shows an example trajectory of algorithm development by Greedy Refinement (o3-mini) on TSP over multiple search steps. In the early stages, the agent enhances code efficiency by adopting vectorized data structures and utilizing a K-D tree. It then increases the number of search iterations and introduces perturbations to escape local optima. Finally, the agent integrates simulated annealing to balance exploration" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 317, + 199, + 558, + 327 + ], + "blocks": [ + { + "bbox": [ + 317, + 199, + 558, + 327 + ], + "lines": [ + { + "bbox": [ + 317, + 199, + 558, + 327 + ], + "spans": [ + { + "bbox": [ + 317, + 199, + 558, + 327 + ], + "type": "image", + "image_path": "954c0315897a14185b49ad98c139c33d7c283f5cff1989886d08731c96bbf2cf.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 333, + 560, + 390 + ], + "lines": [ + { + "bbox": [ + 315, + 333, + 560, + 390 + ], + "spans": [ + { + "bbox": [ + 315, + 333, + 560, + 390 + ], + "type": "text", + "content": "Figure 6: Trajectory of algorithm development for Greedy Refinement on TSP over 64 steps. The curve and highlighted dots indicate the best-ever score and the steps where improvements occurred. The algorithmic ideas behind each improvement step are summarized in corresponding boxes." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 411, + 559, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 411, + 559, + 456 + ], + "spans": [ + { + "bbox": [ + 315, + 411, + 559, + 456 + ], + "type": "text", + "content": "and exploitation and applies adaptive heuristics for different instance sizes. This example demonstrates that LLMs excel in applying established techniques to improve efficiency and implementation quality, but failing at algorithmic novelty." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 465, + 462, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 465, + 462, + 478 + ], + "spans": [ + { + "bbox": [ + 315, + 465, + 462, + 478 + ], + "type": "text", + "content": "Comparison to Neural Solvers" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 480, + 560, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 480, + 560, + 646 + ], + "spans": [ + { + "bbox": [ + 315, + 480, + 560, + 646 + ], + "type": "text", + "content": "Table 2 compares the performance of agents with representative neural solvers on TSP and MIS, two well-studied CO problems. We include DIMES (Qiu, Sun, and Yang 2022), DIFUSCO (Sun and Yang 2023), and T2T (Li et al. 2023) as neural baselines. For the method with multiple variants, we only include their best results on each dataset. We also consider a hybrid method, LEHD + ReEvo (Ye et al. 2024), which combines the neural solver with LLM-designed heuristics. We report both the objective values (the tour length for TSP and set size for MIS) and the solving time. The results show that the agents such as Greedy Refine and FunSearch achieve competitive performance on both problems, often outperforming existing neural solvers under similar time budget and approaching the best results achieved by previous solvers given extended search time." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 655, + 402, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 655, + 402, + 667 + ], + "spans": [ + { + "bbox": [ + 315, + 655, + 402, + 667 + ], + "type": "text", + "content": "Solution Analysis" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 670, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 670, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 670, + 559, + 704 + ], + "type": "text", + "content": "In Figure 7, we plot the percentage of algorithms developed by the Greedy Refinement agent for the 36 CO problems that utilize existing solvers (e.g., code importing ortools," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 90, + 52, + 520, + 168 + ], + "blocks": [ + { + "bbox": [ + 90, + 52, + 520, + 168 + ], + "lines": [ + { + "bbox": [ + 90, + 52, + 520, + 168 + ], + "spans": [ + { + "bbox": [ + 90, + 52, + 520, + 168 + ], + "type": "table", + "html": "
TSP-500TSP-1000TSP-10000ER-SmallER-Large
Len ↓Time ↓Len ↓Time ↓Len ↓Time ↓Size ↑Time ↓Size ↑Time ↓
Gurobi16.5545.6h----41.3850.0m--
DIMES18.841.1m26.362.4m85.754.8m42.0612.0m332.8012.5m
DIFUSCO16.6511.5m23.4548.1m73.896.72h41.1226.6m--
T2T16.6116.0m23.3054.6m--41.3729.7m--
LEHD + ReEvo16.78-23.82-------
Greedy Refine (o3-mini)17.3719.1m24.4019.1m77.652.5m42.3520.1m354.002.5m
FunSearch (o3-mini)17.2019.1m25.3119.1m80.182.5m41.651.9m356.502.1m
", + "image_path": "c60a3cdc429fa90622c07bf6619a002a061ab95eb3b5be46a9ea9e3efb2bd15f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 94, + 176, + 514, + 189 + ], + "lines": [ + { + "bbox": [ + 94, + 176, + 514, + 189 + ], + "spans": [ + { + "bbox": [ + 94, + 176, + 514, + 189 + ], + "type": "text", + "content": "Table 2: Objective values and solving time of different solvers on TSP and MIS, with varying data sizes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 70, + 214, + 273, + 345 + ], + "blocks": [ + { + "bbox": [ + 70, + 214, + 273, + 345 + ], + "lines": [ + { + "bbox": [ + 70, + 214, + 273, + 345 + ], + "spans": [ + { + "bbox": [ + 70, + 214, + 273, + 345 + ], + "type": "image", + "image_path": "1d43eb5705a42b8bfe01034dfd0a3d46833c0ca788e8039ac91c760f5fe8f163.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 354, + 295, + 399 + ], + "lines": [ + { + "bbox": [ + 50, + 354, + 295, + 399 + ], + "spans": [ + { + "bbox": [ + 50, + 354, + 295, + 399 + ], + "type": "text", + "content": "Figure 7: Percentage of algorithms developed by the Greedy Refinement agent that rely on existing solvers (e.g., code importing ortools, pulp) over 64 iteration steps. We observe an increasing use of existing solvers." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 420, + 293, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 420, + 293, + 486 + ], + "spans": [ + { + "bbox": [ + 50, + 420, + 293, + 486 + ], + "type": "text", + "content": "pulp). The percentages are shown across 64 iteration steps. We observe an increasing trend in the use of existing solvers in the agent's solutions. After 64 iterations, the final usage rate reaches " + }, + { + "bbox": [ + 50, + 420, + 293, + 486 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 50, + 420, + 293, + 486 + ], + "type": "text", + "content": " (i.e., solutions for 9 problems use existing solvers). The solvers used throughout all steps and problems are limited to three: ortools, pulp, and scipy." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 486, + 294, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 486, + 294, + 575 + ], + "spans": [ + { + "bbox": [ + 50, + 486, + 294, + 575 + ], + "type": "text", + "content": "This suggests that while existing LLM agents are capable of developing algorithms without relying on existing solvers for most problems, there is a growing tendency to do so over time. Moreover, the solvers used are basic general-purpose tools rather than state-of-the-art solvers specifically designed for each problem (e.g., LKH for TSP), indicating that the agent lacks the necessary knowledge to select the best-performing solver." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 135, + 585, + 209, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 585, + 209, + 597 + ], + "spans": [ + { + "bbox": [ + 135, + 585, + 209, + 597 + ], + "type": "text", + "content": "Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 601, + 225, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 601, + 225, + 613 + ], + "spans": [ + { + "bbox": [ + 51, + 601, + 225, + 613 + ], + "type": "text", + "content": "Automatic Algorithm Search for CO" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 616, + 295, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 616, + 295, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 616, + 295, + 704 + ], + "type": "text", + "content": "Automating algorithm search for combinatorial optimization (CO) has emerged as a significant research direction in the machine learning community. Traditional machine learning solvers primarily parameterize CO algorithms as trainable neural networks (Bengio, Lodi, and Prouvost 2020; Cappart et al. 2023). Although effective in capturing data distributions, these neural approaches often struggle to generate feasible solutions, necessitating integration with human-" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 209, + 560, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 209, + 560, + 374 + ], + "spans": [ + { + "bbox": [ + 315, + 209, + 560, + 374 + ], + "type": "text", + "content": "designed heuristics such as branch-and-bound (Gasse et al. 2019) and tree search (Böther et al. 2022). To address this limitation, Kuang et al. (2024a,b) propose to decompose CO algorithms into symbolic operators and conduct searches in the symbolic space. However, designing these unit symbolic operators demands substantial human expertise, limiting generalizability and comprehensive coverage of all algorithm types. Recent advances in Large Language Models (LLMs) and LLM-based agents have significantly mitigated this challenge by enabling symbolic searching in programming language formats (Romera-Paredes et al. 2023; Ye et al. 2024; Liu et al. 2024). Building on these developments, CO-Bench aims to extend the success of these methods to more real-world CO problems and facilitate further research in this domain." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 385, + 446, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 385, + 446, + 396 + ], + "spans": [ + { + "bbox": [ + 316, + 385, + 446, + 396 + ], + "type": "text", + "content": "CO Benchmarks for LLMs" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 400, + 560, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 400, + 560, + 589 + ], + "spans": [ + { + "bbox": [ + 315, + 400, + 560, + 589 + ], + "type": "text", + "content": "Existing CO benchmarks can be roughly classified into two categories. The first type formulates CO problems as question-answering tasks (Fan et al. 2024; Tang et al. 2025). Although LLMs have the potential to solve CO problems via natural language reasoning, their excessive parameter size makes them inefficient CO solvers in general. Therefore, the second type of benchmarks evaluates the tool-using ability of LLMs, e.g., calling an existing CO solver, to address CO problems (Xiao et al. 2024b; Ahmaditeshnizi, Gao, and Udell 2024; Yang et al. 2025b). However, these benchmarks only evaluate the correctness of the generated algorithm on small-scale CO problems, whose problem parameters could be fully expressed in natural language. In contrast, CO-Bench targets scientific and industrial challenges, emphasizing the evaluation of algorithm efficiency on diverse, large-scale CO instances. This results in a more demanding benchmark, well-suited for assessing powerful reasoning models and agents." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 407, + 600, + 468, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 407, + 600, + 468, + 611 + ], + "spans": [ + { + "bbox": [ + 407, + 600, + 468, + 611 + ], + "type": "text", + "content": "Conclusion" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 616, + 560, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 616, + 560, + 704 + ], + "spans": [ + { + "bbox": [ + 315, + 616, + 560, + 704 + ], + "type": "text", + "content": "This work introduces CO-Bench, the first benchmark designed to evaluate the ability of LLMs in the search of combinatorial optimization (CO) algorithms. Our systematic evaluation reveals that reasoning-focused LLMs, especially when paired with agentic frameworks, can automatically discover effective algorithms that rival or surpass the classical solvers designed by human experts, with competitive searching time. However, we also identify key limitations of current LLM" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 54, + 294, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 54, + 294, + 110 + ], + "spans": [ + { + "bbox": [ + 51, + 54, + 294, + 110 + ], + "type": "text", + "content": "agents such as they struggle to understand the problem constraints. These shortcomings highlight the need for future research to enhance agents' problem comprehension and creative reasoning abilities in CO tasks, enabling more robust and autonomous scientific discovery." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 143, + 121, + 202, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 121, + 202, + 133 + ], + "spans": [ + { + "bbox": [ + 143, + 121, + 202, + 133 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 136, + 294, + 703 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 51, + 136, + 294, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 136, + 294, + 213 + ], + "spans": [ + { + "bbox": [ + 51, + 136, + 294, + 213 + ], + "type": "text", + "content": "Ahmaditeshnizi, A.; Gao, W.; and Udell, M. 2024. OptiMUS: Scalable Optimization Modeling with (MI)LP Solvers and Large Language Models. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 577-596. PMLR." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 216, + 294, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 216, + 294, + 249 + ], + "spans": [ + { + "bbox": [ + 52, + 216, + 294, + 249 + ], + "type": "text", + "content": "and, J. E. B. 1990. Linear Programming on Cray Supercomputers. Journal of the Operational Research Society, 41(2): 133-139." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 251, + 294, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 251, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 51, + 251, + 294, + 285 + ], + "type": "text", + "content": "Anken, F.; and Beasley, J. E. 2012. Corporate structure optimisation for multinational companies. Omega-international Journal of Management Science, 40: 230-243." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 287, + 294, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 287, + 294, + 310 + ], + "spans": [ + { + "bbox": [ + 51, + 287, + 294, + 310 + ], + "type": "text", + "content": "Anthropic. 2025. Claude Sonnet. https://www.anthropic.com/claude/sonnet. Accessed: 2025-03-24." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 312, + 294, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 312, + 294, + 345 + ], + "spans": [ + { + "bbox": [ + 51, + 312, + 294, + 345 + ], + "type": "text", + "content": "Beasley, J. E. 1985a. An algorithm for the two-dimensional assortment problem. European Journal of Operational Research, 19: 253-261." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 348, + 294, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 348, + 294, + 381 + ], + "spans": [ + { + "bbox": [ + 51, + 348, + 294, + 381 + ], + "type": "text", + "content": "Beasley, J. E. 1985b. Algorithms for Unconstrained Two-Dimensional Guillotine Cutting. Journal of the Operational Research Society, 36: 297-306." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 384, + 294, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 384, + 294, + 416 + ], + "spans": [ + { + "bbox": [ + 51, + 384, + 294, + 416 + ], + "type": "text", + "content": "Beasley, J. E. 1985c. An Exact Two-Dimensional Non-Guillotine Cutting Tree Search Procedure. Oper. Res., 33: 49-64." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 419, + 294, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 419, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 51, + 419, + 294, + 453 + ], + "type": "text", + "content": "Beasley, J. E. 1985d. A note on solving large p-median problems. European Journal of Operational Research, 21: 270-273." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 456, + 294, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 456, + 294, + 488 + ], + "spans": [ + { + "bbox": [ + 51, + 456, + 294, + 488 + ], + "type": "text", + "content": "Beasley, J. E. 1988. An algorithm for solving large capacitated warehouse location problems. European Journal of Operational Research, 33: 314-325." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 491, + 294, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 491, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 51, + 491, + 294, + 525 + ], + "type": "text", + "content": "Beasley, J. E. 1990. OR-Library: Distributing Test Problems by Electronic Mail. Journal of the Operational Research Society, 41: 1069-1072." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 527, + 294, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 527, + 294, + 559 + ], + "spans": [ + { + "bbox": [ + 51, + 527, + 294, + 559 + ], + "type": "text", + "content": "Beasley, J. E. 1992. A heuristic for Euclidean and rectilinear Steiner problems. European Journal of Operational Research, 58: 284-292." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 563, + 294, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 563, + 294, + 596 + ], + "spans": [ + { + "bbox": [ + 51, + 563, + 294, + 596 + ], + "type": "text", + "content": "Beasley, J. E. 1993. Lagrangean heuristics for location problems. European Journal of Operational Research, 65: 383-399." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 599, + 294, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 599, + 294, + 632 + ], + "spans": [ + { + "bbox": [ + 51, + 599, + 294, + 632 + ], + "type": "text", + "content": "Beasley, J. E. 2004. A population heuristic for constrained two-dimensional non-guillotine cutting. *Eur. J. Oper. Res.*, 156: 601-627." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 635, + 294, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 635, + 294, + 667 + ], + "spans": [ + { + "bbox": [ + 51, + 635, + 294, + 667 + ], + "type": "text", + "content": "Beasley, J. E.; and Cao, B. 1996. A tree search algorithm for the crew scheduling problem. European Journal of Operational Research, 94: 517-526." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 670, + 294, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 670, + 294, + 703 + ], + "spans": [ + { + "bbox": [ + 51, + 670, + 294, + 703 + ], + "type": "text", + "content": "Beasley, J. E.; and Christofides, N. 1989. An algorithm for the resource constrained shortest path problem. Networks, 19: 379-394." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 54, + 558, + 703 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 317, + 54, + 558, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 54, + 558, + 87 + ], + "spans": [ + { + "bbox": [ + 317, + 54, + 558, + 87 + ], + "type": "text", + "content": "Beasley, J. E.; and Jornsten, K. 1992. Enhancing an algorithm for set covering problems. European Journal of Operational Research, 58: 293-300." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 90, + 558, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 90, + 558, + 123 + ], + "spans": [ + { + "bbox": [ + 317, + 90, + 558, + 123 + ], + "type": "text", + "content": "Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2000. Scheduling Aircraft Landings - The Static Case. Transp. Sci., 34: 180-197." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 125, + 558, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 125, + 558, + 170 + ], + "spans": [ + { + "bbox": [ + 317, + 125, + 558, + 170 + ], + "type": "text", + "content": "Beasley, J. E.; Krishnamoorthy, M.; Sharaiha, Y. M.; and Abramson, D. 2004. Displacement problem and dynamically scheduling aircraft landings. Journal of the Operational Research Society, 55: 54-64." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 172, + 558, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 172, + 558, + 205 + ], + "spans": [ + { + "bbox": [ + 317, + 172, + 558, + 205 + ], + "type": "text", + "content": "Bengio, Y.; Lodi, A.; and Prouvost, A. 2020. Machine Learning for Combinatorial Optimization: a Methodological Tour d'Horizon. arXiv:1811.06128." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 208, + 558, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 208, + 558, + 230 + ], + "spans": [ + { + "bbox": [ + 317, + 208, + 558, + 230 + ], + "type": "text", + "content": "Berthold, T. 2006. Primal heuristics for mixed integer programs. Ph.D. thesis, Zuse Institute Berlin (ZIB)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 232, + 558, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 232, + 558, + 265 + ], + "spans": [ + { + "bbox": [ + 317, + 232, + 558, + 265 + ], + "type": "text", + "content": "Bischoff, E. E. 2006. Three-dimensional packing of items with limited load bearing strength. Eur. J. Oper. Res., 168: 952-966." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 268, + 558, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 268, + 558, + 301 + ], + "spans": [ + { + "bbox": [ + 317, + 268, + 558, + 301 + ], + "type": "text", + "content": "Bischoff, E. E.; and Ratcliff, M. S. W. 1995. Issues in the development of approaches to container loading. Omega-international Journal of Management Science, 23: 377-390." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 304, + 558, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 304, + 558, + 337 + ], + "spans": [ + { + "bbox": [ + 317, + 304, + 558, + 337 + ], + "type": "text", + "content": "Biskup, D.; and Feldmann, M. 2001. Benchmarks for scheduling on a single machine against restrictive and unrestricted common due dates. Comput. Oper. Res., 28: 787-801." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 339, + 558, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 339, + 558, + 384 + ], + "spans": [ + { + "bbox": [ + 317, + 339, + 558, + 384 + ], + "type": "text", + "content": "Böther, M.; Kißig, O.; Taraz, M.; Cohen, S.; Seidel, K.; and Friedrich, T. 2022. What's Wrong with Deep Learning in Tree Search for Combinatorial Optimization. In International Conference on Learning Representations." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 386, + 558, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 386, + 558, + 419 + ], + "spans": [ + { + "bbox": [ + 317, + 386, + 558, + 419 + ], + "type": "text", + "content": "Cappanera, P.; and Trubian, M. 2001. A Local-Search-Based Heuristic for the Demand-Constrained Multidimensional Knapsack Problem. INFORMS J. Comput., 17: 82-98." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 421, + 558, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 421, + 558, + 465 + ], + "spans": [ + { + "bbox": [ + 317, + 421, + 558, + 465 + ], + "type": "text", + "content": "Cappart, Q.; ChA©telat, D.; Khalil, E. B.; Lodi, A.; Morris, C.; and VeliAkoviA‡, P. 2023. Combinatorial Optimization and Reasoning with Graph Neural Networks. Journal of Machine Learning Research, 24(130): 1-61." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 468, + 558, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 468, + 558, + 501 + ], + "spans": [ + { + "bbox": [ + 317, + 468, + 558, + 501 + ], + "type": "text", + "content": "Chakhlevitch, K.; and Glass, C. A. 2009. Scheduling reentrant jobs on parallel machines with a remote server. Comput. Oper. Res., 36: 2580-2589." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 503, + 558, + 558 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 503, + 558, + 558 + ], + "spans": [ + { + "bbox": [ + 317, + 503, + 558, + 558 + ], + "type": "text", + "content": "Chan, J. S.; Chowdhury, N.; Jaffe, O.; Aung, J.; Sherburn, D.; Mays, E.; Starace, G.; Liu, K.; Maksin, L.; Patwardhan, T. A.; Weng, L.; and Mkadry, A. 2024. MLE-bench: Evaluating Machine Learning Agents on Machine Learning Engineering. ArXiv, abs/2410.07095." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 561, + 558, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 561, + 558, + 703 + ], + "spans": [ + { + "bbox": [ + 317, + 561, + 558, + 703 + ], + "type": "text", + "content": "Chen, M.; Tworek, J.; Jun, H.; Yuan, Q.; Ponde, H.; Kaplan, J.; Edwards, H.; Burda, Y.; Joseph, N.; Brockman, G.; Ray, A.; Puri, R.; Krueger, G.; Petrov, M.; Khlaaf, H.; Sastry, G.; Mishkin, P.; Chan, B.; Gray, S.; Ryder, N.; Pavlov, M.; Power, A.; Kaiser, L.; Bavarian, M.; Winter, C.; Tillet, P.; Such, F. P.; Cummings, D. W.; Plappert, M.; Chantzis, F.; Barnes, E.; Herbert-Voss, A.; Guss, W. H.; Nichol, A.; Babuschkin, I.; Balaji, S.; Jain, S.; Carr, A.; Leike, J.; Achiam, J.; Misra, V.; Morikawa, E.; Radford, A.; Knight, M. M.; Brundage, M.; Murati, M.; Mayer, K.; Welinder, P.; McGrew, B.; Amodei, D.; McCandlish, S.; Sutskever, I.; and Zaremba, W. 2021. Evaluating Large Language Models Trained on Code. ArXiv, abs/2107.03374." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 54, + 294, + 704 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 51, + 54, + 294, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 54, + 294, + 77 + ], + "spans": [ + { + "bbox": [ + 51, + 54, + 294, + 77 + ], + "type": "text", + "content": "Christofides, N.; and Beasley, J. E. 1984. The period routing problem. Networks, 14: 237-256." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 79, + 294, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 79, + 294, + 102 + ], + "spans": [ + { + "bbox": [ + 52, + 79, + 294, + 102 + ], + "type": "text", + "content": "Christofides, N.; and Whitlock, C. 1977. An Algorithm for Two-Dimensional Cutting Problems. Oper. Res., 25: 30-44." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 103, + 294, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 103, + 294, + 137 + ], + "spans": [ + { + "bbox": [ + 51, + 103, + 294, + 137 + ], + "type": "text", + "content": "Chu, P. C.; and Beasley, J. E. 1998. Constraint Handling in Genetic Algorithms: The Set Partitioning Problem. Journal of Heuristics, 4: 323-357." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 138, + 294, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 138, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 51, + 138, + 294, + 172 + ], + "type": "text", + "content": "Crama, Y. 1997. Combinatorial optimization models for production scheduling in automated manufacturing systems. European Journal of Operational Research, 99(1): 136-153." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 173, + 294, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 173, + 294, + 207 + ], + "spans": [ + { + "bbox": [ + 51, + 173, + 294, + 207 + ], + "type": "text", + "content": "DeepMind, G. 2025. Flash Thinking: Behind the Scenes of Gemini. https://deepmind.google/technologies/gemini/flash-thinking/. Accessed: 2025-03-24." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 208, + 294, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 208, + 294, + 231 + ], + "spans": [ + { + "bbox": [ + 51, + 208, + 294, + 231 + ], + "type": "text", + "content": "DeepSeek-AI. 2024. DeepSeek-V3 Technical Report. ArXiv, abs/2412.19437." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 233, + 294, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 233, + 294, + 266 + ], + "spans": [ + { + "bbox": [ + 51, + 233, + 294, + 266 + ], + "type": "text", + "content": "DeepSeek-AI. 2025a. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. arXiv:2501.12948." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 268, + 294, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 268, + 294, + 300 + ], + "spans": [ + { + "bbox": [ + 51, + 268, + 294, + 300 + ], + "type": "text", + "content": "DeepSeek-AI. 2025b. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. ArXiv, abs/2501.12948." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 303, + 294, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 303, + 294, + 335 + ], + "spans": [ + { + "bbox": [ + 51, + 303, + 294, + 335 + ], + "type": "text", + "content": "Erdos, P. L.; and Rényi, A. 1984. On the evolution of random graphs. Transactions of the American Mathematical Society, 286: 257-257." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 338, + 294, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 338, + 294, + 361 + ], + "spans": [ + { + "bbox": [ + 51, + 338, + 294, + 361 + ], + "type": "text", + "content": "Falkenauer, E. 1996. A hybrid grouping genetic algorithm for bin packing. Journal of Heuristics, 2: 5-30." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 363, + 294, + 441 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 363, + 294, + 441 + ], + "spans": [ + { + "bbox": [ + 51, + 363, + 294, + 441 + ], + "type": "text", + "content": "Fan, L.; Hua, W.; Li, L.; Ling, H.; and Zhang, Y. 2024. NPHardEval: Dynamic Benchmark on Reasoning Ability of Large Language Models via Complexity Classes. In Ku, L.-W.; Martins, A.; and Srikumar, V., eds., Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 4092-4114. Bangkok, Thailand: Association for Computational Linguistics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 441, + 294, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 441, + 294, + 475 + ], + "spans": [ + { + "bbox": [ + 51, + 441, + 294, + 475 + ], + "type": "text", + "content": "Fleurent, C.; and Ferland, J. A. 1996. Genetic and hybrid algorithms for graph coloring. Annals of Operations Research, 63: 437-461." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 477, + 294, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 477, + 294, + 521 + ], + "spans": [ + { + "bbox": [ + 51, + 477, + 294, + 521 + ], + "type": "text", + "content": "Gasse, M.; Chételat, D.; Ferroni, N.; Charlin, L.; and Lodi, A. 2019. Exact Combinatorial Optimization with Graph Convolutional Neural Networks. In Advances in Neural Information Processing Systems 32." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 523, + 294, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 523, + 294, + 622 + ], + "spans": [ + { + "bbox": [ + 51, + 523, + 294, + 622 + ], + "type": "text", + "content": "Gottweis, J.; Weng, W.-H.; Daryin, A.; Tu, T.; Palepu, A.; Sirkovic, P.; Myaskovsky, A.; Weissenberger, F.; Rong, K.; Tanno, R.; Saab, K.; Popovici, D.; Blum, J.; Zhang, F.; Chou, K.; Hassidim, A.; Gokturk, B.; Vahdat, A.; Kohli, P.; Matias, Y.; Carroll, A.; Kulkarni, K.; Tomaev, N.; Guan, Y.; Dhillon, V.; Vaishnav, E. D.; Lee, B.; Costa, T. R. D.; Penad'es, J. R.; Peltz, G.; Xu, Y.; Pawlosky, A.; Karthikesalingam, A.; and Natarajan, V. 2025. Towards an AI co-scientist. *ArXiv*, abs/2502.18864." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 624, + 294, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 624, + 294, + 658 + ], + "spans": [ + { + "bbox": [ + 51, + 624, + 294, + 658 + ], + "type": "text", + "content": "Gusfield, D. 1997. Algorithms on stings, trees, and sequences: Computer science and computational biology. *Acm Sigact News*, 28(4): 41-60." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "type": "text", + "content": "Hui, B.; Yang, J.; Cui, Z.; Yang, J.; Liu, D.; Zhang, L.; Liu, T.; Zhang, J.; Yu, B.; Dang, K.; Yang, A.; Men, R.; Huang, F.; Quan, S.; Ren, X.; Ren, X.; Zhou, J.; and Lin, J. 2024. Qwen2.5-Coder Technical Report. ArXiv, abs/2409.12186." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 54, + 559, + 704 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 317, + 54, + 559, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 54, + 559, + 77 + ], + "spans": [ + { + "bbox": [ + 317, + 54, + 559, + 77 + ], + "type": "text", + "content": "Ivancic, N. J. 1988. An integer programming based heuristic approach to the three-dimensional packing problem." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 79, + 559, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 79, + 559, + 112 + ], + "spans": [ + { + "bbox": [ + 317, + 79, + 559, + 112 + ], + "type": "text", + "content": "Jiang, Z.; Schmidt, D.; Srikanth, D.; Xu, D.; Kaplan, I.; Jacenko, D.; and Wu, Y. 2025. AIDE: AI-Driven Exploration in the Space of Code. ArXiv, abs/2502.13138." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 114, + 559, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 114, + 559, + 158 + ], + "spans": [ + { + "bbox": [ + 317, + 114, + 559, + 158 + ], + "type": "text", + "content": "Jimenez, C. E.; Yang, J.; Wettig, A.; Yao, S.; Pei, K.; Press, O.; and Narasimhan, K. 2023. SWE-bench: Can Language Models Resolve Real-World GitHub Issues? ArXiv, abs/2310.06770." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 160, + 559, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 160, + 559, + 216 + ], + "spans": [ + { + "bbox": [ + 317, + 160, + 559, + 216 + ], + "type": "text", + "content": "Kuang, Y.; Wang, J.; Liu, H.; Zhu, F.; Li, X.; Zeng, J.; HAO, J.; Li, B.; and Wu, F. 2024a. Rethinking Branching on Exact Combinatorial Optimization Solver: The First Deep Symbolic Discovery Framework. In *The Twelfth International Conference on Learning Representations*." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 217, + 559, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 217, + 559, + 306 + ], + "spans": [ + { + "bbox": [ + 317, + 217, + 559, + 306 + ], + "type": "text", + "content": "Kuang, Y.; Wang, J.; Zhou, Y.; Li, X.; Zhu, F.; Hao, J.; and Wu, F. 2024b. Towards General Algorithm Discovery for Combinatorial Optimization: Learning Symbolic Branching Policy from Bipartite Graph. In Salakhutdinov, R.; Kolter, Z.; Heller, K.; Weller, A.; Oliver, N.; Scarlett, J.; and Berkenkamp, F., eds., Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, 25623-25641. PMLR." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 308, + 558, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 308, + 558, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 308, + 558, + 342 + ], + "type": "text", + "content": "Laporte, G. 1992. The traveling salesman problem: An overview of exact and approximate algorithms. European Journal of Operational Research, 59(2): 231-247." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 343, + 558, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 343, + 558, + 388 + ], + "spans": [ + { + "bbox": [ + 317, + 343, + 558, + 388 + ], + "type": "text", + "content": "Li, Y.; Guo, J.; Wang, R.; and Yan, J. 2023. From Distribution Learning in Training to Gradient Search in Testing for Combinatorial Optimization. In Neural Information Processing Systems." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 390, + 558, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 390, + 558, + 434 + ], + "spans": [ + { + "bbox": [ + 317, + 390, + 558, + 434 + ], + "type": "text", + "content": "Liu, F.; Tong, X.; Yuan, M.; Lin, X.; Luo, F.; Wang, Z.; Lu, Z.; and Zhang, Q. 2024. Evolution of Heuristics: Towards Efficient Automatic Algorithm Design Using Large Language Model. In ICML." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 436, + 558, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 436, + 558, + 470 + ], + "spans": [ + { + "bbox": [ + 317, + 436, + 558, + 470 + ], + "type": "text", + "content": "López, C. O.; and Beasley, J. E. 2016. A formulation space search heuristic for packing unequal circles in a fixed size circular container. Eur. J. Oper. Res., 251: 64-73." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 472, + 558, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 472, + 558, + 506 + ], + "spans": [ + { + "bbox": [ + 317, + 472, + 558, + 506 + ], + "type": "text", + "content": "López, C. O.; and Beasley, J. E. 2018. Packing unequal rectangles and squares in a fixed size circular container using formulation space search. Comput. Oper. Res., 94: 106-117." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 507, + 558, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 507, + 558, + 562 + ], + "spans": [ + { + "bbox": [ + 317, + 507, + 558, + 562 + ], + "type": "text", + "content": "Madaan, A.; Tandon, N.; Gupta, P.; Hallinan, S.; Gao, L.; Wegreffe, S.; Alon, U.; Dziri, N.; Prabhumoye, S.; Yang, Y.; Welleck, S.; Majumder, B. P.; Gupta, S.; Yazdanbakhsh, A.; and Clark, P. 2023. Self-Refine: Iterative Refinement with Self-Feedback. ArXiv, abs/2303.17651." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 563, + 558, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 563, + 558, + 586 + ], + "spans": [ + { + "bbox": [ + 317, + 563, + 558, + 586 + ], + "type": "text", + "content": "Meta. 2024. The Llama 3 Herd of Models. ArXiv, abs/2407.21783." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 589, + 558, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 589, + 558, + 632 + ], + "spans": [ + { + "bbox": [ + 317, + 589, + 558, + 632 + ], + "type": "text", + "content": "Mingers, J. C.; and O'Brien, F. A. 1995. Creating student groups with similar characteristics: A heuristic approach. Omega-international Journal of Management Science, 23: 313-321." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 635, + 558, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 635, + 558, + 668 + ], + "spans": [ + { + "bbox": [ + 317, + 635, + 558, + 668 + ], + "type": "text", + "content": "Motwani, R.; and Raghavan, P. 2013. Randomized Algorithms. USA: Cambridge University Press. ISBN 0511814070." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 670, + 558, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 670, + 558, + 704 + ], + "spans": [ + { + "bbox": [ + 317, + 670, + 558, + 704 + ], + "type": "text", + "content": "Novikov, A.; V~u, N.; Eisenberger, M.; Dupont, E.; Huang, P.-S.; Wagner, A. Z.; Shirobokov, S.; Kozlovskii, B. M.; Ruiz, F. J. R.; Mehrabian, A.; Kumar, M. P.; See, A.; Chaudhuri, S.;" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 54, + 294, + 704 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 51, + 54, + 294, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 54, + 294, + 89 + ], + "spans": [ + { + "bbox": [ + 51, + 54, + 294, + 89 + ], + "type": "text", + "content": "Holland, G.; Davies, A.; Nowozin, S.; Kohli, P.; Balog, M.; and Deepmind, G. 2025. AlphaEvolve: A coding agent for scientific and algorithmic discovery. *ArXiv*, abs/2506.13131." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 52, + 90, + 294, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 90, + 294, + 112 + ], + "spans": [ + { + "bbox": [ + 52, + 90, + 294, + 112 + ], + "type": "text", + "content": "OpenAI. 2024a. GPT-4o System Card. ArXiv, abs/2410.21276." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 114, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 114, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 52, + 114, + 294, + 127 + ], + "type": "text", + "content": "OpenAI. 2024b. OpenAI o1 System Card. arXiv:2412.16720." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 129, + 239, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 129, + 239, + 140 + ], + "spans": [ + { + "bbox": [ + 52, + 129, + 239, + 140 + ], + "type": "text", + "content": "OpenAI. 2025. OpenAI o3-mini System Card." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 143, + 294, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 143, + 294, + 176 + ], + "spans": [ + { + "bbox": [ + 51, + 143, + 294, + 176 + ], + "type": "text", + "content": "Osman, I. H. 1995. Heuristics for the generalised assignment problem: simulated annealing and tabu search approaches. Operations-Research-Spektrum, 17: 211-225." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 178, + 294, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 178, + 294, + 222 + ], + "spans": [ + { + "bbox": [ + 51, + 178, + 294, + 222 + ], + "type": "text", + "content": "Osman, I. H.; and Christofides, N. 1994. Capacitated clustering problems by hybrid simulated annealing and tabu search. International Transactions in Operational Research, 1: 317-336." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 224, + 294, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 224, + 294, + 258 + ], + "spans": [ + { + "bbox": [ + 51, + 224, + 294, + 258 + ], + "type": "text", + "content": "Papadimitriou, C.; and Steiglitz, K. 1982. Combinatorial Optimization: Algorithms and Complexity, volume 32. Courier Corporation. ISBN 0-13-152462-3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 260, + 294, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 260, + 294, + 295 + ], + "spans": [ + { + "bbox": [ + 51, + 260, + 294, + 295 + ], + "type": "text", + "content": "Petersen, C. C. 1967. Computational Experience with Variants of the Balas Algorithm Applied to the Selection of R&D Projects. Management Science, 13: 736-750." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 297, + 294, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 297, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 51, + 297, + 294, + 342 + ], + "type": "text", + "content": "Qiu, R.; Sun, Z.; and Yang, Y. 2022. DIMES: A Differentiable Meta Solver for Combinatorial Optimization Problems. In Oh, A. H.; Agarwal, A.; Belgrave, D.; and Cho, K., eds., Advances in Neural Information Processing Systems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 343, + 294, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 343, + 294, + 376 + ], + "spans": [ + { + "bbox": [ + 51, + 343, + 294, + 376 + ], + "type": "text", + "content": "Qwen. 2025. QwQ-32B: Embracing the Power of Reinforcement Learning. https://qwenlm.github.io/blog/qwq-32b/. Accessed: 2025-03-24." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 379, + 294, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 379, + 294, + 446 + ], + "spans": [ + { + "bbox": [ + 51, + 379, + 294, + 446 + ], + "type": "text", + "content": "Ramamonjison, R.; Yu, T. T.; Li, R.; Li, H.; Carenini, G.; Ghaddar, B.; He, S.; Mostajabdaveh, M.; Banitalebi-Dehkordi, A.; Zhou, Z.; and Zhang, Y. 2023. NL4Opt Competition: Formulating Optimization Problems Based on Their Natural Language Descriptions. In Neural Information Processing Systems." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 448, + 294, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 448, + 294, + 481 + ], + "spans": [ + { + "bbox": [ + 51, + 448, + 294, + 481 + ], + "type": "text", + "content": "Ratcliff, M. S. W.; and Bischoff, E. E. 1998. Allowing for weight considerations in container loading. Operations-Research-Spektrum, 20: 65-71." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 483, + 294, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 483, + 294, + 549 + ], + "spans": [ + { + "bbox": [ + 51, + 483, + 294, + 549 + ], + "type": "text", + "content": "Romera-Paredes, B.; Barekatain, M.; Novikov, A.; Balog, M.; Kumar, M. P.; Dupont, E.; Ruiz, F. J. R.; Ellenberg, J. S.; Wang, P.; Fawzi, O.; Kohli, P.; Fawzi, A.; Grochow, J.; Lodi, A.; Mouret, J.-B.; Ringer, T.; and Yu, T. 2023. Mathematical discoveries from program search with large language models. Nature, 625: 468 - 475." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 552, + 294, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 552, + 294, + 597 + ], + "spans": [ + { + "bbox": [ + 51, + 552, + 294, + 597 + ], + "type": "text", + "content": "Shinn, N.; Cassano, F.; Labash, B.; Gopinath, A.; Narasimhan, K.; and Yao, S. 2023. Reflexion: language agents with verbal reinforcement learning. In Neural Information Processing Systems." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 599, + 294, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 599, + 294, + 632 + ], + "spans": [ + { + "bbox": [ + 51, + 599, + 294, + 632 + ], + "type": "text", + "content": "Sun, Z.; and Yang, Y. 2023. DIFUSCO: Graph-based Diffusion Solvers for Combinatorial Optimization. ArXiv, abs/2302.08224." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 635, + 294, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 635, + 294, + 658 + ], + "spans": [ + { + "bbox": [ + 51, + 635, + 294, + 658 + ], + "type": "text", + "content": "Taillard, E. 1993. Benchmarks for basic scheduling problems. European Journal of Operational Research, 64(2): 278-285." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 51, + 659, + 294, + 704 + ], + "type": "text", + "content": "Tang, J.; Zhang, Q.; Li, Y.; Chen, N.; and Li, J. 2025. GraphArena: Evaluating and Improving Large Language Models on Graph Computation. In International Conference on Learning Representations." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 54, + 560, + 491 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 317, + 54, + 560, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 54, + 560, + 110 + ], + "spans": [ + { + "bbox": [ + 317, + 54, + 560, + 110 + ], + "type": "text", + "content": "Vogiatzis, C.; and Pardalos, P. 2013. Combinatorial optimization in transportation and logistics networks, volume 2-5, 673-722. Germany: Springer. ISBN 9781441979964. Publisher Copyright: " + }, + { + "bbox": [ + 317, + 54, + 560, + 110 + ], + "type": "inline_equation", + "content": "\\text{©}" + }, + { + "bbox": [ + 317, + 54, + 560, + 110 + ], + "type": "text", + "content": " Springer Science+Business Media New York 2013. All rights are reserved." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 111, + 559, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 111, + 559, + 133 + ], + "spans": [ + { + "bbox": [ + 317, + 111, + 559, + 133 + ], + "type": "text", + "content": "xAI. 2025. Grok-3 and the Next Phase of xAI. https://x.ai/news/grok-3. Accessed: 2025-03-24." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 136, + 559, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 136, + 559, + 191 + ], + "spans": [ + { + "bbox": [ + 317, + 136, + 559, + 191 + ], + "type": "text", + "content": "Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024a. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In International Conference on Learning Representations." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 193, + 559, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 193, + 559, + 249 + ], + "spans": [ + { + "bbox": [ + 317, + 193, + 559, + 249 + ], + "type": "text", + "content": "Xiao, Z.; Zhang, D.; Wu, Y.; Xu, L.; Wang, Y. J.; Han, X.; Fu, X.; Zhong, T.; Zeng, J.; Song, M.; and Chen, G. 2024b. Chain-of-Experts: When LLMs Meet Complex Operations Research Problems. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 251, + 559, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 251, + 559, + 306 + ], + "spans": [ + { + "bbox": [ + 317, + 251, + 559, + 306 + ], + "type": "text", + "content": "Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025a. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 308, + 559, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 308, + 559, + 364 + ], + "spans": [ + { + "bbox": [ + 317, + 308, + 559, + 364 + ], + "type": "text", + "content": "Yang, Z.; Wang, Y.; Huang, Y.; Guo, Z.; Shi, W.; Han, X.; Feng, L.; Song, L.; Liang, X.; and Tang, J. 2025b. OptiBench Meets ReSocratic: Measure and Improve LLMs for Optimization Modeling. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 365, + 558, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 365, + 558, + 399 + ], + "spans": [ + { + "bbox": [ + 317, + 365, + 558, + 399 + ], + "type": "text", + "content": "Yao, S.; Zhao, J.; Yu, D.; Du, N.; Shafran, I.; Narasimhan, K.; and Cao, Y. 2022. ReAct: Synergizing Reasoning and Acting in Language Models. ArXiv, abs/2210.03629." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 400, + 559, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 400, + 559, + 456 + ], + "spans": [ + { + "bbox": [ + 317, + 400, + 559, + 456 + ], + "type": "text", + "content": "Ye, H.; Wang, J.; Cao, Z.; Berto, F.; Hua, C.; Kim, H.; Park, J.; and Song, G. 2024. ReEvo: Large Language Models as Hyper-Heuristics with Reflective Evolution. In The Thirty-eighth Annual Conference on Neural Information Processing Systems." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 457, + 558, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 457, + 558, + 491 + ], + "spans": [ + { + "bbox": [ + 317, + 457, + 558, + 491 + ], + "type": "text", + "content": "Zheng, Z.; Xie, Z.; Wang, Z.; and Hooi, B. 2025. Monte Carlo Tree Search for Comprehensive Exploration in LLM-Based Automatic Heuristic Design. ArXiv, abs/2501.08603." + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 88, + 53, + 256, + 68 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 53, + 256, + 68 + ], + "spans": [ + { + "bbox": [ + 88, + 53, + 256, + 68 + ], + "type": "text", + "content": "Problem Description and Scores" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 70, + 130, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 70, + 130, + 84 + ], + "spans": [ + { + "bbox": [ + 51, + 70, + 130, + 84 + ], + "type": "text", + "content": "Aircraft landing" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 87, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 87, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 50, + 87, + 294, + 251 + ], + "type": "text", + "content": "The problem is to schedule landing times for a set of planes across one or more runways such that each landing occurs within its prescribed time window and all pairwise separation requirements are satisfied; specifically, if plane i lands at or before plane j on the same runway, then the gap between their landing times must be at least the specified separation time provided in the input. In a multiple-runway setting, each plane must also be assigned to one runway, and if planes land on different runways, the separation requirement (which may differ) is applied accordingly. Each plane has an earliest, target, and latest landing time, with penalties incurred proportionally for landing before (earliness) or after (lateness) its target time. The objective is to minimize the total penalty cost while ensuring that no constraints are violated—if any constraint is breached, the solution receives no score." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 52, + 262, + 231, + 375 + ], + "blocks": [ + { + "bbox": [ + 52, + 262, + 231, + 375 + ], + "lines": [ + { + "bbox": [ + 52, + 262, + 231, + 375 + ], + "spans": [ + { + "bbox": [ + 52, + 262, + 231, + 375 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.5985295365478638
BestOfN0.8057479826999232
Refine0.7503157815146175
FunSearch0.9688863336568327
AIDE0.800637046201484
ReEvo0.9134454710810906
MCTS0.801655240273729
EoH0.8019818529389835
", + "image_path": "df632d4e73c3be9bc14ac0b13f0075642385e842b8c38ae2fbcc4eed3552d4e3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 419, + 151, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 419, + 151, + 432 + ], + "spans": [ + { + "bbox": [ + 51, + 419, + 151, + 432 + ], + "type": "text", + "content": "Assignment problem" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "spans": [ + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": "The Assignment Problem involves optimally assigning " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " items to " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " agents based on a provided " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " imes " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " cost matrix, where each entry " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "extcost\\_matrix[i][j]" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " denotes the cost of assigning item " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " to agent " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "j + 1" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": ". The goal is to identify a permutation—each item assigned exactly one agent—that minimizes the total assignment cost. Formally, this is an optimization problem to find a permutation " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " of agents such that the total cost " + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "inline_equation", + "content": "\\sum i = 1^n extcost\\_matrix[i - 1][\\pi(i) - 1]" + }, + { + "bbox": [ + 50, + 435, + 294, + 547 + ], + "type": "text", + "content": " is minimized. The solution returned includes both the minimal total cost and the corresponding optimal assignments." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 52, + 557, + 167, + 670 + ], + "blocks": [ + { + "bbox": [ + 121, + 382, + 223, + 395 + ], + "lines": [ + { + "bbox": [ + 121, + 382, + 223, + 395 + ], + "spans": [ + { + "bbox": [ + 121, + 382, + 223, + 395 + ], + "type": "text", + "content": "Table 3: Aircraft landing" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 557, + 167, + 670 + ], + "lines": [ + { + "bbox": [ + 52, + 557, + 167, + 670 + ], + "spans": [ + { + "bbox": [ + 52, + 557, + 167, + 670 + ], + "type": "table", + "html": "
MethodScore
Classical Solver1
BestOfN1
Refine1
FunSearch1
AIDE1
ReEvo1
MCTS1
EoH1
", + "image_path": "80e0672adbc3bc370f8f401854e7171a4d6db329894679d039400745e48eb3cb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 54, + 416, + 67 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 54, + 416, + 67 + ], + "spans": [ + { + "bbox": [ + 317, + 54, + 416, + 67 + ], + "type": "text", + "content": "Assortment problem" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 71, + 559, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 71, + 559, + 224 + ], + "spans": [ + { + "bbox": [ + 315, + 71, + 559, + 224 + ], + "type": "text", + "content": "This optimization problem involves arranging a set of rectangular pieces within available stock rectangles to minimize the overall waste area percentage. Each stock rectangle has a defined area, and each piece—which may be rotated by " + }, + { + "bbox": [ + 315, + 71, + 559, + 224 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 315, + 71, + 559, + 224 + ], + "type": "text", + "content": "—must be fully contained within a stock without overlapping with other pieces. Additionally, each piece type has specific total minimum and maximum placement limits. You have access to an unlimited number of stocks for each type, but you may use at most two stock types. The objective is to achieve the lowest possible waste area percentage, defined as the ratio of unused area to the total stock area. Solutions must ensure efficient resource utilization while satisfying all geometric and quantity constraints. Any violation of these constraints results in no score." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 317, + 234, + 502, + 347 + ], + "blocks": [ + { + "bbox": [ + 111, + 677, + 233, + 689 + ], + "lines": [ + { + "bbox": [ + 111, + 677, + 233, + 689 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 233, + 689 + ], + "type": "text", + "content": "Table 4: Assignment problem" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 234, + 502, + 347 + ], + "lines": [ + { + "bbox": [ + 317, + 234, + 502, + 347 + ], + "spans": [ + { + "bbox": [ + 317, + 234, + 502, + 347 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.3222852468406736
BestOfN0.36161788534475603
Refine0.10475936163370339
FunSearch0.3622886282031154
AIDE0.1698107561339298
ReEvo0.24290833308629933
MCTS0.1757439194813797
EoH0.2519474328966603
", + "image_path": "1d7a5b5a0d918fa26138e139ee5f84229a4e985dacfb1f0643e8314d50d8e7f7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 316, + 394, + 462, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 394, + 462, + 407 + ], + "spans": [ + { + "bbox": [ + 316, + 394, + 462, + 407 + ], + "type": "text", + "content": "Bin packing - one-dimensional" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 411, + 559, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 411, + 559, + 544 + ], + "spans": [ + { + "bbox": [ + 315, + 411, + 559, + 544 + ], + "type": "text", + "content": "The **one-dimensional bin packing problem** seeks to minimize the number of bins required to pack a given set of items while ensuring that the sum of item sizes within each bin does not exceed the specified bin capacity. Given a test case with an identifier ('id'), a fixed 'bin_capacity', and a list of 'num_items' with their respective sizes ('items'), the objective is to find a packing arrangement that uses the least number of bins. The solution is evaluated based on the total 'num_bins' used, with invalid solutions (e.g., missing or duplicated items, or bins exceeding capacity) incurring a inf heavy penalty. The output must include the number of bins used and a valid assignment of item indices to bins." + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 317, + 555, + 496, + 669 + ], + "blocks": [ + { + "bbox": [ + 378, + 354, + 497, + 366 + ], + "lines": [ + { + "bbox": [ + 378, + 354, + 497, + 366 + ], + "spans": [ + { + "bbox": [ + 378, + 354, + 497, + 366 + ], + "type": "text", + "content": "Table 5: Assortment problem" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 555, + 496, + 669 + ], + "lines": [ + { + "bbox": [ + 317, + 555, + 496, + 669 + ], + "spans": [ + { + "bbox": [ + 317, + 555, + 496, + 669 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9628049317089281
BestOfN0.8933315064694979
Refine0.9870315022407082
FunSearch0.9557154223933677
AIDE0.8366913237780297
ReEvo0.9492158360156572
MCTS0.9396436307329097
EoH0.9693475618912389
", + "image_path": "ee8ea23f151b8868e90fbe35bf9062dedafc6ecd15e07eb3ea88335107dfbc49.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 357, + 675, + 518, + 688 + ], + "lines": [ + { + "bbox": [ + 357, + 675, + 518, + 688 + ], + "spans": [ + { + "bbox": [ + 357, + 675, + 518, + 688 + ], + "type": "text", + "content": "Table 6: Bin packing - one-dimensional" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 54, + 202, + 66 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 54, + 202, + 66 + ], + "spans": [ + { + "bbox": [ + 53, + 54, + 202, + 66 + ], + "type": "text", + "content": "Capacitated warehouse location" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 68, + 293, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 68, + 293, + 266 + ], + "spans": [ + { + "bbox": [ + 53, + 68, + 293, + 266 + ], + "type": "text", + "content": "The Capacitated Warehouse Location Problem with Splittable Demand aims to determine which warehouses to open and how to allocate portions of customer demands among these warehouses in order to minimize total costs. Given a set of potential warehouse locations, each with a fixed opening cost and capacity limit, and a set of customers with individual demands and associated per-unit assignment costs to each warehouse, the objective is to decide which warehouses to open and how to distribute each customer's demand among these open warehouses. The allocation must satisfy the constraint that the sum of portions assigned to each customer equals their total demand, and that the total demand allocated to any warehouse does not exceed its capacity. The optimization seeks to minimize the sum of fixed warehouse opening costs and the total per-unit assignment costs. However, if any solution violates these constraints (i.e., a customer's demand is not fully satisfied or a warehouse's capacity is exceeded), then no score is provided." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 52, + 277, + 229, + 388 + ], + "blocks": [ + { + "bbox": [ + 52, + 277, + 229, + 388 + ], + "lines": [ + { + "bbox": [ + 52, + 277, + 229, + 388 + ], + "spans": [ + { + "bbox": [ + 52, + 277, + 229, + 388 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.6976400141361688
BestOfN0.0
Refine0.7518838886310322
FunSearch0.7196713948459038
AIDE0.6647355906610447
ReEvo0.6715266955394039
MCTS0.6891495773105485
EoH0.7502493181324346
", + "image_path": "34dc9e46e81271756af4bf590f60077ed6c6035553f204712a8b8d812acc3266.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 432, + 192, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 432, + 192, + 445 + ], + "spans": [ + { + "bbox": [ + 53, + 432, + 192, + 445 + ], + "type": "text", + "content": "Common due date scheduling" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "spans": [ + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": "Given floor, where " + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": " is a predefined fraction (defaulting to 0.6). The goal is to determine an optimal job sequence that minimizes the penalty, calculated as follows: for each job, if its completion time " + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": " is earlier than " + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": ", an earliness penalty of " + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "inline_equation", + "content": "aimes(d - C)" + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": " is incurred; if " + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": " exceeds " + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": ", a tardiness penalty of " + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "inline_equation", + "content": "bimes(C - d)" + }, + { + "bbox": [ + 53, + 447, + 293, + 546 + ], + "type": "text", + "content": " is applied; otherwise, no penalty is incurred. The problem requires finding a permutation of job indices (1-based) that minimizes the total penalty. The evaluation metric sums these penalties for a given schedule." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 52, + 557, + 229, + 669 + ], + "blocks": [ + { + "bbox": [ + 91, + 398, + 253, + 408 + ], + "lines": [ + { + "bbox": [ + 91, + 398, + 253, + 408 + ], + "spans": [ + { + "bbox": [ + 91, + 398, + 253, + 408 + ], + "type": "text", + "content": "Table 7: Capacitated warehouse location" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 557, + 229, + 669 + ], + "lines": [ + { + "bbox": [ + 52, + 557, + 229, + 669 + ], + "spans": [ + { + "bbox": [ + 52, + 557, + 229, + 669 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9187662046144239
BestOfN0.97731110557282
Refine0.9776844987221935
FunSearch0.976604327923604
AIDE0.6291657473867996
ReEvo0.9743199070415761
MCTS0.8838457578182489
EoH0.9773286503168127
", + "image_path": "62a6f341da3b481b237dc66465655ab43268510b005cdc9b2fac8b1a88709108.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 318, + 54, + 459, + 66 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 54, + 459, + 66 + ], + "spans": [ + { + "bbox": [ + 318, + 54, + 459, + 66 + ], + "type": "text", + "content": "Constrained guillotine cutting" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 68, + 558, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 68, + 558, + 311 + ], + "spans": [ + { + "bbox": [ + 318, + 68, + 558, + 311 + ], + "type": "text", + "content": "The problem involves optimizing the guillotine feasible placement of a set of rectangular pieces on a given stock sheet to maximize total value. Each piece type is characterized by its length, width, an upper bound on the number of times it may appear in the final cutting pattern, and an assigned value. Orientation of the pieces is fixed (the edges of the pieces are parallel to the edges of the sheet). The task is to select and place pieces such that each lies completely within the boundaries of the stock sheet, no two pieces overlap, and the number of pieces of any type does not exceed its specified maximum. A set of placements is considered guillotine feasible if there exists at least one straight cut (vertical or horizontal) that does not slice through any rectangle, and the property holds recursively on the resulting subregions. Empty regions or regions exactly matching a placed piece are considered valid. The objective is to maximize the sum of the values of the placed pieces; however, if any spatial or count constraint is violated, the solution is deemed invalid. The output is defined as a dictionary reporting the total value and a list of placements, with each placement specified by the piece type index, x and y coordinates, placed dimensions, and orientation flag." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 318, + 321, + 495, + 433 + ], + "blocks": [ + { + "bbox": [ + 96, + 677, + 249, + 689 + ], + "lines": [ + { + "bbox": [ + 96, + 677, + 249, + 689 + ], + "spans": [ + { + "bbox": [ + 96, + 677, + 249, + 689 + ], + "type": "text", + "content": "Table 8: Common due date scheduling" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 321, + 495, + 433 + ], + "lines": [ + { + "bbox": [ + 318, + 321, + 495, + 433 + ], + "spans": [ + { + "bbox": [ + 318, + 321, + 495, + 433 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.7844900098230463
BestOfN0.0
Refine0.981513704843915
FunSearch0.956424099109148
AIDE0.9102922923098641
ReEvo0.0
MCTS0.0
EoH0.0
", + "image_path": "97261b48dee41841c44d566a07844b6ea9c1508689f3c76c552cbb394dd6373b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 361, + 441, + 515, + 453 + ], + "lines": [ + { + "bbox": [ + 361, + 441, + 515, + 453 + ], + "spans": [ + { + "bbox": [ + 361, + 441, + 515, + 453 + ], + "type": "text", + "content": "Table 9: Constrained guillotine cutting" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 318, + 476, + 480, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 476, + 480, + 489 + ], + "spans": [ + { + "bbox": [ + 318, + 476, + 480, + 489 + ], + "type": "text", + "content": "Constrained non-guillotine cutting" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 491, + 558, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 491, + 558, + 633 + ], + "spans": [ + { + "bbox": [ + 318, + 491, + 558, + 633 + ], + "type": "text", + "content": "The constrained non-guillotine cutting problem involves optimally arranging rectangular pieces onto a single rectangular stock with fixed dimensions (stock_length and stock_width). Each piece type has defined length, width, value, and minimum and maximum usage constraints. The optimization goal is to maximize the total value of all placed pieces, subject to constraints that each piece is entirely within stock boundaries, pieces do not overlap, each piece type's usage falls within its specified [min, max] range, and pieces may optionally be rotated by " + }, + { + "bbox": [ + 318, + 491, + 558, + 633 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 318, + 491, + 558, + 633 + ], + "type": "text", + "content": ". The solution returns a set of placements indicating piece type, bottom-left coordinates " + }, + { + "bbox": [ + 318, + 491, + 558, + 633 + ], + "type": "inline_equation", + "content": "(\\mathrm{x},\\mathrm{y})" + }, + { + "bbox": [ + 318, + 491, + 558, + 633 + ], + "type": "text", + "content": ", and rotation status. If any constraint is violated, the solution receives no score." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 644, + 403, + 657 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 644, + 403, + 657 + ], + "spans": [ + { + "bbox": [ + 318, + 644, + 403, + 657 + ], + "type": "text", + "content": "Container loading" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 659, + 558, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 659, + 558, + 704 + ], + "spans": [ + { + "bbox": [ + 318, + 659, + 558, + 704 + ], + "type": "text", + "content": "Solves a container loading problem: Given a 3D container of specified dimensions and multiple box types—each defined by dimensions, orientation constraints, and available quantity—the goal is to optimally place these boxes within" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 52, + 229, + 163 + ], + "blocks": [ + { + "bbox": [ + 52, + 52, + 229, + 163 + ], + "lines": [ + { + "bbox": [ + 52, + 52, + 229, + 163 + ], + "spans": [ + { + "bbox": [ + 52, + 52, + 229, + 163 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.5585076432266227
BestOfN0.8760613343780126
Refine0.99138085452391
FunSearch0.9623447685846964
AIDE0.8555320134962818
ReEvo0.9264764236682984
MCTS0.7944732650186651
EoH0.9106930512513293
", + "image_path": "4cd0765a6348b5e88b1231416b40b1a5548d74c49891fb0f535b3a488639f904.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 210, + 293, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 210, + 293, + 299 + ], + "spans": [ + { + "bbox": [ + 50, + 210, + 293, + 299 + ], + "type": "text", + "content": "the container to maximize the volume utilization ratio. Each box placement must respect orientation constraints (vertical alignment flags), fit entirely within container boundaries, and avoid overlaps. The solution returns precise coordinates and orientations for each box placement, quantified by a volume utilization score calculated as the total volume of placed boxes divided by the container volume. Invalid placements result in a score of 0.0." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 52, + 312, + 235, + 425 + ], + "blocks": [ + { + "bbox": [ + 82, + 171, + 261, + 184 + ], + "lines": [ + { + "bbox": [ + 82, + 171, + 261, + 184 + ], + "spans": [ + { + "bbox": [ + 82, + 171, + 261, + 184 + ], + "type": "text", + "content": "Table 10: Constrained non-guillotine cutting" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 312, + 235, + 425 + ], + "lines": [ + { + "bbox": [ + 52, + 312, + 235, + 425 + ], + "spans": [ + { + "bbox": [ + 52, + 312, + 235, + 425 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.09700224776623062
BestOfN0.8163545342051534
Refine0.18895711345505883
FunSearch0.23070987019597894
AIDE0.7592850816892841
ReEvo0.716081346719743
MCTS0.5451472798828618
EoH0.7795824394970114
", + "image_path": "0978960c07d8958bf634c685b86a78f0a537e3e3f819cabf1c8daefcc47b3df5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 483, + 251, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 483, + 251, + 496 + ], + "spans": [ + { + "bbox": [ + 51, + 483, + 251, + 496 + ], + "type": "text", + "content": "Container loading with weight restrictions" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 506, + 294, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 506, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 506, + 294, + 704 + ], + "type": "text", + "content": "The Container Loading with Weight Restrictions problem aims to maximize the utilization of a container's volume by selecting and strategically placing boxes inside it. Given a container with specified dimensions (length, width, height) and multiple types of boxes, each characterized by their dimensions, quantities, weights, and load-bearing constraints, the optimization goal is to determine the placement and orientation of these boxes (with each box allowed three possible orientations) that maximizes the ratio of total occupied box volume to container volume. The solution must strictly adhere to spatial constraints (boxes must fit entirely within the container without overlapping), load-bearing constraints (boxes must support the weight of boxes stacked above them according to given limits), and orientation restrictions. The optimization quality is evaluated by the achieved utilization metric, defined as the total volume of successfully placed boxes divided by the container volume; if any constraint is violated, the utilization score is zero." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 318, + 52, + 506, + 164 + ], + "blocks": [ + { + "bbox": [ + 115, + 432, + 229, + 444 + ], + "lines": [ + { + "bbox": [ + 115, + 432, + 229, + 444 + ], + "spans": [ + { + "bbox": [ + 115, + 432, + 229, + 444 + ], + "type": "text", + "content": "Table 11: Container loading" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 52, + 506, + 164 + ], + "lines": [ + { + "bbox": [ + 318, + 52, + 506, + 164 + ], + "spans": [ + { + "bbox": [ + 318, + 52, + 506, + 164 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.009225308452359507
BestOfN0.13669723873453465
Refine0.07941319051933145
FunSearch0.2919729304847129
AIDE0.12860429344072807
ReEvo0.1420943670465572
MCTS0.04806324649022297
EoH0.051972410039456414
", + "image_path": "428824232b4a92188e459544fa6dda1bd13bf81b70ba1a5b7ec4cc83d9e076f1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 209, + 424, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 209, + 424, + 223 + ], + "spans": [ + { + "bbox": [ + 316, + 209, + 424, + 223 + ], + "type": "text", + "content": "Corporate structuring" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "spans": [ + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "text", + "content": "Given N countries, each defined by: " + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "text", + "content": " a tax code (1: Exemption, 2: Deduction, 3: Source-by-source Pooling, 4: Worldwide Pooling), " + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "text", + "content": " a foreign income tax rate, " + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "text", + "content": " a domestic income tax rate, and " + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "inline_equation", + "content": "\\bullet" + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "text", + "content": " a profit, and a withholding tax matrix W (where W[i][j] is the rate on dividends from country i to j), construct a valid tree-structured corporate hierarchy (directed, acyclic, connected) rooted at a designated target (whose parent is 0) such that every country with profit " + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "inline_equation", + "content": ">0" + }, + { + "bbox": [ + 315, + 232, + 559, + 332 + ], + "type": "text", + "content": " appears exactly once." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 334, + 559, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 334, + 559, + 537 + ], + "spans": [ + { + "bbox": [ + 315, + 334, + 559, + 537 + ], + "type": "text", + "content": "For each country i, define S as the set of nodes in its subtree (note the subtree includes itself) with a positive profit. Also consider the set of child nodes C_i. If i is not a root country but in the tree, it will send all its income (after tax) to its parent j. Denote this amount as F[i][j]. Assume the total income after domestic tax and withholding tax for country i is: domestic_iincome_i * (1 - domestic_rate_i) + (\\sum_{k \\in C_i} F[k][i] * (1 - W[k][i])) The extra foreign tax under different tax code is defined as follows: 1. No extra tax. 2. Foreign income tax from the child nodes: foreign_iincome_rate_i * (\\sum_{k \\in C_i} F[k][i] * (1 - W[k][i])) 3. Foreign income tax computed from the source nodes in each child's subtree: " + }, + { + "bbox": [ + 315, + 334, + 559, + 537 + ], + "type": "inline_equation", + "content": "\\sum_{k \\in C_i} \\max(0, F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\\sum_{s \\in S_k} domestic_iincome_s))" + }, + { + "bbox": [ + 315, + 334, + 559, + 537 + ], + "type": "text", + "content": " 4. Foreign income tax from all source nodes in the subtree, excluding itself: " + }, + { + "bbox": [ + 315, + 334, + 559, + 537 + ], + "type": "inline_equation", + "content": "\\max(0, \\sum_{k \\in C_i} F[k][i] * (1 - W[k][i]) - (1 - foreign_iincome_rate_i) * (\\sum_{s \\in S_i} domestic_iincome_s) - domestic_iincome_i)" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 318, + 552, + 495, + 663 + ], + "blocks": [ + { + "bbox": [ + 331, + 171, + 542, + 184 + ], + "lines": [ + { + "bbox": [ + 331, + 171, + 542, + 184 + ], + "spans": [ + { + "bbox": [ + 331, + 171, + 542, + 184 + ], + "type": "text", + "content": "Table 12: Container loading with weight restrictions" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 552, + 495, + 663 + ], + "lines": [ + { + "bbox": [ + 318, + 552, + 495, + 663 + ], + "spans": [ + { + "bbox": [ + 318, + 552, + 495, + 663 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9450572839481785
BestOfN0.9450572839481785
Refine0.9726337326585759
FunSearch0.777775452943618
AIDE0.9450572839481785
ReEvo0.5014939649568603
MCTS0.9844897288603699
EoH0.9431107030735252
", + "image_path": "72f88d648949a3f5f81a832559bd20ad7ec54bf2d97f4b2400f475d88ef4a36d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 373, + 672, + 501, + 684 + ], + "lines": [ + { + "bbox": [ + 373, + 672, + 501, + 684 + ], + "spans": [ + { + "bbox": [ + 373, + 672, + 501, + 684 + ], + "type": "text", + "content": "Table 13: Corporate structuring" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 54, + 132, + 66 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 54, + 132, + 66 + ], + "spans": [ + { + "bbox": [ + 53, + 54, + 132, + 66 + ], + "type": "text", + "content": "Crew scheduling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 69, + 293, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 69, + 293, + 234 + ], + "spans": [ + { + "bbox": [ + 53, + 69, + 293, + 234 + ], + "type": "text", + "content": "The Crew Scheduling Problem involves assigning each task—with defined start and finish times—to exactly one crew, aiming to minimize the total transition costs between consecutive tasks. Each crew's schedule must satisfy three constraints: tasks within a crew must not overlap; valid transitions (with associated costs) must exist between every consecutive pair of tasks; and the crew's total duty time (from the start of the first task to the finish of the last) cannot exceed a specified time limit. Additionally, no more than " + }, + { + "bbox": [ + 53, + 69, + 293, + 234 + ], + "type": "inline_equation", + "content": "\\mathrm{K}" + }, + { + "bbox": [ + 53, + 69, + 293, + 234 + ], + "type": "text", + "content": " crews can be used to cover all tasks. Solutions violating any of these constraints are considered infeasible and receive no score. The optimization objective is therefore to determine assignments of tasks to no more than " + }, + { + "bbox": [ + 53, + 69, + 293, + 234 + ], + "type": "inline_equation", + "content": "\\mathrm{K}" + }, + { + "bbox": [ + 53, + 69, + 293, + 234 + ], + "type": "text", + "content": " crews that minimize the sum of transition costs while strictly adhering to all constraints, yielding a feasible and cost-effective scheduling solution." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 52, + 244, + 235, + 356 + ], + "blocks": [ + { + "bbox": [ + 52, + 244, + 235, + 356 + ], + "lines": [ + { + "bbox": [ + 52, + 244, + 235, + 356 + ], + "spans": [ + { + "bbox": [ + 52, + 244, + 235, + 356 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.45498811952880935
BestOfN0.4483461488661745
Refine0.6690343590115082
FunSearch0.5536756258756895
AIDE0.44095505708697136
ReEvo0.45225267224663634
MCTS0.4446817469828879
EoH0.5864457661923881
", + "image_path": "7544e093b87cf336b81774acd3b9ab621799e0e15a8d0ba485f081d12ce12f7e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 399, + 200, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 399, + 200, + 412 + ], + "spans": [ + { + "bbox": [ + 53, + 399, + 200, + 412 + ], + "type": "text", + "content": "Equitable partitioning problem" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 415, + 293, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 415, + 293, + 546 + ], + "spans": [ + { + "bbox": [ + 53, + 415, + 293, + 546 + ], + "type": "text", + "content": "The task is to partition a set of individuals—each characterized by multiple binary attributes—into exactly 8 groups such that the distribution of attribute values is as balanced as possible across these groups. For each attribute, count the number of individuals with a '1' in each group. The optimization objective is to minimize the total imbalance, which is defined as follows: for each attribute, calculate the absolute differences between the count in each group and the mean count across all groups, take the average of these differences, and then sum these averages over all attributes. The goal is to determine a group assignment for each individual that achieves the lowest possible total imbalance score." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 52, + 557, + 229, + 668 + ], + "blocks": [ + { + "bbox": [ + 119, + 365, + 226, + 376 + ], + "lines": [ + { + "bbox": [ + 119, + 365, + 226, + 376 + ], + "spans": [ + { + "bbox": [ + 119, + 365, + 226, + 376 + ], + "type": "text", + "content": "Table 14: Crew scheduling" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 557, + 229, + 668 + ], + "lines": [ + { + "bbox": [ + 52, + 557, + 229, + 668 + ], + "spans": [ + { + "bbox": [ + 52, + 557, + 229, + 668 + ], + "type": "table", + "html": "
MethodScore
Classical Solver1.0
BestOfN1.0
Refine1.0
FunSearch1.0
AIDE0.7777777777777778
ReEvo0.7777777777777778
MCTS1.0
EoH1.0
", + "image_path": "3c12705cc741d7f0c767659c343b383b159ea76d63677ef4b52e1b95e57153c7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 318, + 54, + 444, + 66 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 54, + 444, + 66 + ], + "spans": [ + { + "bbox": [ + 318, + 54, + 444, + 66 + ], + "type": "text", + "content": "Euclidean Steiner problem" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "spans": [ + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "text", + "content": "Given a set of 2D points (terminals), the goal of the Euclidean Steiner Problem is to compute a tree connecting all terminals with minimum total length. The total length is measured as the sum of Euclidean distances (where the Euclidean distance between two points " + }, + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "inline_equation", + "content": "(x1, y1)" + }, + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "inline_equation", + "content": "(x2, y2)" + }, + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "inline_equation", + "content": "sqrt((x1 - x2)^2 + (y1 - y2)^2)" + }, + { + "bbox": [ + 318, + 68, + 559, + 221 + ], + "type": "text", + "content": "). Unlike a Minimum Spanning Tree (MST) computed solely on the given terminals, a Steiner tree may introduce extra points, called Steiner points, to reduce the overall length. In this formulation, it is assumed that the final candidate tree's total length is given by the MST computed on the union of the original terminals and the reported Steiner points. A lower ratio (candidate_tree_length/MST ORIGINAL_length) indicates a better solution." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 318, + 232, + 500, + 342 + ], + "blocks": [ + { + "bbox": [ + 90, + 677, + 254, + 689 + ], + "lines": [ + { + "bbox": [ + 90, + 677, + 254, + 689 + ], + "spans": [ + { + "bbox": [ + 90, + 677, + 254, + 689 + ], + "type": "text", + "content": "Table 15: Equitable partitioning problem" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 232, + 500, + 342 + ], + "lines": [ + { + "bbox": [ + 318, + 232, + 500, + 342 + ], + "spans": [ + { + "bbox": [ + 318, + 232, + 500, + 342 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9779703480188361
BestOfN0.6291391910535526
Refine0.688025642110573
FunSearch0.6968176110449371
AIDE0.04483890014026932
ReEvo0.5469067768233761
MCTS0.43093954323065975
EoH0.5917817000598826
", + "image_path": "287382a9ba0debd85980e8bc0ba5ea3829b4b560c6945e4ecea7dc2d6c560da7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 318, + 388, + 419, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 388, + 419, + 400 + ], + "spans": [ + { + "bbox": [ + 318, + 388, + 419, + 400 + ], + "type": "text", + "content": "Flow shop scheduling" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "spans": [ + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "text", + "content": "Given " + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "text", + "content": " jobs and " + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "text", + "content": " machines, the goal of the flow shop scheduling problem is to determine the optimal job sequence that minimizes the makespan, i.e., the total time required to complete all jobs on all machines. Each job follows the same machine order, and the processing times are specified in an " + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "text", + "content": "imes " + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 318, + 403, + 558, + 513 + ], + "type": "text", + "content": " matrix. The output is a permutation of job indices representing the processing order. If the constraints are not satisfied (e.g., invalid job sequencing), the solution receives no score. The objective is to optimize the makespan using the classical flow shop recurrence." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 318, + 523, + 495, + 635 + ], + "blocks": [ + { + "bbox": [ + 364, + 351, + 511, + 362 + ], + "lines": [ + { + "bbox": [ + 364, + 351, + 511, + 362 + ], + "spans": [ + { + "bbox": [ + 364, + 351, + 511, + 362 + ], + "type": "text", + "content": "Table 16: Euclidean Steiner problem" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 523, + 495, + 635 + ], + "lines": [ + { + "bbox": [ + 318, + 523, + 495, + 635 + ], + "spans": [ + { + "bbox": [ + 318, + 523, + 495, + 635 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9222700445897257
BestOfN0.874217493803887
Refine0.8463439348165006
FunSearch0.8537338049420798
AIDE0.9144895115672386
ReEvo0.8424667927400846
MCTS0.9242143967817102
EoH0.940154419652199
", + "image_path": "0aa1e9e9a4cf46fb810595679d6adbf788c8a69f1e96077a330f035fb42abeff.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 374, + 643, + 501, + 654 + ], + "lines": [ + { + "bbox": [ + 374, + 643, + 501, + 654 + ], + "spans": [ + { + "bbox": [ + 374, + 643, + 501, + 654 + ], + "type": "text", + "content": "Table 17: Flow shop scheduling" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 318, + 677, + 471, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 677, + 471, + 690 + ], + "spans": [ + { + "bbox": [ + 318, + 677, + 471, + 690 + ], + "type": "text", + "content": "Generalised assignment problem" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 693, + 482, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 693, + 482, + 704 + ], + "spans": [ + { + "bbox": [ + 318, + 693, + 482, + 704 + ], + "type": "text", + "content": "Generalized Assignment Problem (GAP)" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 54, + 294, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 54, + 294, + 209 + ], + "spans": [ + { + "bbox": [ + 50, + 54, + 294, + 209 + ], + "type": "text", + "content": "The Generalized Assignment Problem (GAP) involves assigning " + }, + { + "bbox": [ + 50, + 54, + 294, + 209 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 54, + 294, + 209 + ], + "type": "text", + "content": " jobs to " + }, + { + "bbox": [ + 50, + 54, + 294, + 209 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 54, + 294, + 209 + ], + "type": "text", + "content": " agents such that each job is assigned to exactly one agent, and the resource consumption for each agent does not exceed its capacity. The objective is to optimize the total cost based on the problem type. When formulated as a maximization problem, the goal is to maximize the total cost; when formulated as a minimization problem, the goal is to minimize the total cost. Given a cost matrix (representing the cost of assigning jobs to agents), a consumption matrix (indicating the resource usage per assignment), and capacities (the resource limits for each agent), the task is to find a valid assignment that meets the capacity constraints while optimizing the total cost as specified by the problem indicator." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 216, + 230, + 328 + ], + "blocks": [ + { + "bbox": [ + 52, + 216, + 230, + 328 + ], + "lines": [ + { + "bbox": [ + 52, + 216, + 230, + 328 + ], + "spans": [ + { + "bbox": [ + 52, + 216, + 230, + 328 + ], + "type": "table", + "html": "
MethodScore
Classical Solver1.000509368510793
BestOfN1.000152715871272
Refine0.9997973477884884
FunSearch0.9360910283983461
AIDE1.000152715871272
ReEvo1.0002083856508814
MCTS1.0001026538510593
EoH0.9793902133221158
", + "image_path": "4ae1a2d4fa564cca47a694e279dc0d48198b1277546173bf053c33584be0aa3d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 369, + 133, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 369, + 133, + 383 + ], + "spans": [ + { + "bbox": [ + 50, + 369, + 133, + 383 + ], + "type": "text", + "content": "Graph colouring" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 384, + 294, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 384, + 294, + 473 + ], + "spans": [ + { + "bbox": [ + 50, + 384, + 294, + 473 + ], + "type": "text", + "content": "Given a graph in DIMACS format with vertices, edges, and an adjacency list, the goal is to assign a positive integer color (1..n) to each vertex while ensuring that no two adjacent vertices share the same color. The objective is to minimize the number of distinct colors used. If any two adjacent vertices have the same color, the solution is invalid and receives no score. Otherwise, the score is equal to the number of distinct colors used, with a lower score being better." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 52, + 482, + 230, + 594 + ], + "blocks": [ + { + "bbox": [ + 85, + 335, + 259, + 348 + ], + "lines": [ + { + "bbox": [ + 85, + 335, + 259, + 348 + ], + "spans": [ + { + "bbox": [ + 85, + 335, + 259, + 348 + ], + "type": "text", + "content": "Table 18: Generalised assignment problem" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 482, + 230, + 594 + ], + "lines": [ + { + "bbox": [ + 52, + 482, + 230, + 594 + ], + "spans": [ + { + "bbox": [ + 52, + 482, + 230, + 594 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.8679121232535366
BestOfN0.7992347794550977
Refine0.9237393162393163
FunSearch0.8993461774953884
AIDE0.7992347794550977
ReEvo0.8119485901255648
MCTS0.8529682767415909
EoH0.804175457505431
", + "image_path": "fd302b72b8f21038db0e5d0441bbdf8d9a3865c61975063a5326b4986fc88f41.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 634, + 220, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 634, + 220, + 647 + ], + "spans": [ + { + "bbox": [ + 51, + 634, + 220, + 647 + ], + "type": "text", + "content": "Hybrid Reentrant Shop Scheduling" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 648, + 294, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 648, + 294, + 706 + ], + "spans": [ + { + "bbox": [ + 50, + 648, + 294, + 706 + ], + "type": "text", + "content": "The problem is a Hybrid Reentrant Shop Scheduling problem where each of " + }, + { + "bbox": [ + 50, + 648, + 294, + 706 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 50, + 648, + 294, + 706 + ], + "type": "text", + "content": " jobs must sequentially undergo three operations: an initialization phase on one of " + }, + { + "bbox": [ + 50, + 648, + 294, + 706 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 50, + 648, + 294, + 706 + ], + "type": "text", + "content": " identical primary machines, a setup phase on a single remote server, and a final main processing phase on the same primary machine used" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 54, + 560, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 54, + 560, + 177 + ], + "spans": [ + { + "bbox": [ + 315, + 54, + 560, + 177 + ], + "type": "text", + "content": "for initialization. Jobs are initialized in a fixed natural order using list scheduling, while the setup phase is processed on the remote server in an order specified by a permutation decision variable. Additionally, each job is assigned to a primary machine for main processing via a batch_assignment, and on each machine, jobs are processed in natural (initialization) order. The objective is to minimize the makespan, defined as the time when the last job completes its main processing, while ensuring that no machine (primary or server) processes more than one job simultaneously and that all operational precedence constraints are satisfied." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 318, + 186, + 495, + 299 + ], + "blocks": [ + { + "bbox": [ + 117, + 601, + 227, + 615 + ], + "lines": [ + { + "bbox": [ + 117, + 601, + 227, + 615 + ], + "spans": [ + { + "bbox": [ + 117, + 601, + 227, + 615 + ], + "type": "text", + "content": "Table 19: Graph colouring" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 186, + 495, + 299 + ], + "lines": [ + { + "bbox": [ + 318, + 186, + 495, + 299 + ], + "spans": [ + { + "bbox": [ + 318, + 186, + 495, + 299 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9057971372430776
BestOfN0.9872450518587456
Refine0.9966666343001128
FunSearch1.0001780484032463
AIDE0.7457203947696327
ReEvo0.9820554515396009
MCTS0.9961239866411462
EoH0.9841146688046011
", + "image_path": "5819c6cc93559036bfc18a4f34659127bd440c1958315f284088a1ce262e7485.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 316, + 342, + 414, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 342, + 414, + 354 + ], + "spans": [ + { + "bbox": [ + 316, + 342, + 414, + 354 + ], + "type": "text", + "content": "Job shop scheduling" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 357, + 560, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 357, + 560, + 479 + ], + "spans": [ + { + "bbox": [ + 315, + 357, + 560, + 479 + ], + "type": "text", + "content": "The job shop scheduling problem requires assigning nonnegative integer start times to a set of operations, structured into multiple jobs, each composed of sequential operations. Each operation is processed on a specific machine for a given processing time. The optimization goal is to minimize the makespan, defined as the maximum completion time across all jobs. Constraints include (i) sequential processing of operations within each job, meaning each operation cannot start before its preceding operation finishes, and (ii) nonoverlapping scheduling of operations on the same machine. If these constraints are violated, the solution receives no score." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 318, + 498, + 495, + 609 + ], + "blocks": [ + { + "bbox": [ + 345, + 306, + 529, + 319 + ], + "lines": [ + { + "bbox": [ + 345, + 306, + 529, + 319 + ], + "spans": [ + { + "bbox": [ + 345, + 306, + 529, + 319 + ], + "type": "text", + "content": "Table 20: Hybrid Reentrant Shop Scheduling" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 498, + 495, + 609 + ], + "lines": [ + { + "bbox": [ + 318, + 498, + 495, + 609 + ], + "spans": [ + { + "bbox": [ + 318, + 498, + 495, + 609 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.8202016779421567
BestOfN0.7060712883377539
Refine0.7696287350855926
FunSearch0.8192815531664928
AIDE0.6498336005961379
ReEvo0.7982807066317813
MCTS0.7293663754433233
EoH0.7770594374788831
", + "image_path": "428741c0122b2692348448a5b913a1bde056066776cbc270958894b812f42727.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 375, + 617, + 500, + 630 + ], + "lines": [ + { + "bbox": [ + 375, + 617, + 500, + 630 + ], + "spans": [ + { + "bbox": [ + 375, + 617, + 500, + 630 + ], + "type": "text", + "content": "Table 21: Job shop scheduling" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 316, + 654, + 340, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 654, + 340, + 666 + ], + "spans": [ + { + "bbox": [ + 316, + 654, + 340, + 666 + ], + "type": "text", + "content": "MIS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 670, + 559, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 670, + 559, + 705 + ], + "spans": [ + { + "bbox": [ + 315, + 670, + 559, + 705 + ], + "type": "text", + "content": "The Maximum Independent Set (MIS) problem is a fundamental NP-hard optimization problem in graph theory. Given an undirected graph " + }, + { + "bbox": [ + 315, + 670, + 559, + 705 + ], + "type": "inline_equation", + "content": "\\mathrm{G} = (\\mathrm{V},\\mathrm{E})" + }, + { + "bbox": [ + 315, + 670, + 559, + 705 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 315, + 670, + 559, + 705 + ], + "type": "inline_equation", + "content": "\\mathrm{V}" + }, + { + "bbox": [ + 315, + 670, + 559, + 705 + ], + "type": "text", + "content": " is a set of vertices" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "spans": [ + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "text", + "content": "and " + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "inline_equation", + "content": "\\mathbf{E}" + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "text", + "content": " is a set of edges, the goal is to find the largest subset " + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "text", + "content": " such that no two vertices in " + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 50, + 54, + 295, + 89 + ], + "type": "text", + "content": " are adjacent (i.e., connected by an edge)." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 99, + 231, + 212 + ], + "blocks": [ + { + "bbox": [ + 52, + 99, + 231, + 212 + ], + "lines": [ + { + "bbox": [ + 52, + 99, + 231, + 212 + ], + "spans": [ + { + "bbox": [ + 52, + 99, + 231, + 212 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.986
BestOfN0.8461150261004076
Refine0.9078324503859446
FunSearch0.9002038932676987
AIDE0.8425484500134511
ReEvo0.8342509729450779
MCTS0.8433127163177989
EoH0.8763795109859694
", + "image_path": "bbbdc5ff70a880fb3b8dabef315652e54d5e5b9e7c636cf64ebd852fd9bdf4fb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 255, + 258, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 255, + 258, + 281 + ], + "spans": [ + { + "bbox": [ + 50, + 255, + 258, + 281 + ], + "type": "text", + "content": "Multi-Demand Multidimensional Knapsack problem" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "spans": [ + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": "The Multi-Demand Multidimensional Knapsack Problem (MDMKP) is a binary optimization problem that extends the classical MKP by incorporating both upper-bound " + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "inline_equation", + "content": "(<=)" + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": " and lower-bound " + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "inline_equation", + "content": "(>=)" + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": " constraints. Formally, given n decision variables " + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "inline_equation", + "content": "x_{j} \\in \\{0,1\\}" + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": ", the goal is to maximize " + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "inline_equation", + "content": "\\sum_{j=1}^{n} c_{j} x_{j}" + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": " subject to " + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "inline_equation", + "content": "\\sum_{j=1}^{n} a_{ij} x_{j} \\leq b_{i} f o r i = 1, \\ldots, m" + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "inline_equation", + "content": "\\sum_{j=1}^{n} a_{ij} x_{j} \\geq b_{i} f o r i = m+1, \\ldots, m+q" + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": ". Instances are generated from standard MKP problems by varying the number of " + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "inline_equation", + "content": ">=" + }, + { + "bbox": [ + 50, + 284, + 295, + 443 + ], + "type": "text", + "content": " constraints (with q taking values 1, m/2, or m) and by using two types of cost coefficients (positive and mixed), thereby producing six distinct variants per base instance. This formulation enables rigorous evaluation of algorithms in contexts where both resource limits and demand fulfillment must be simultaneously addressed." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 52, + 453, + 230, + 566 + ], + "blocks": [ + { + "bbox": [ + 141, + 219, + 203, + 231 + ], + "lines": [ + { + "bbox": [ + 141, + 219, + 203, + 231 + ], + "spans": [ + { + "bbox": [ + 141, + 219, + 203, + 231 + ], + "type": "text", + "content": "Table 22: MIS" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 453, + 230, + 566 + ], + "lines": [ + { + "bbox": [ + 52, + 453, + 230, + 566 + ], + "spans": [ + { + "bbox": [ + 52, + 453, + 230, + 566 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.8957822313136857
BestOfN0.7144432351611377
Refine0.8913402342031996
FunSearch0.8354799525874899
AIDE0.8805432369541204
ReEvo0.8920786376031828
MCTS0.8994648109682947
EoH0.9082814870567889
", + "image_path": "48780e281cba669ba8e060d3cad28d70ccc1e98e257b2bbacbb65c00b55ff8d6.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 621, + 226, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 621, + 226, + 635 + ], + "spans": [ + { + "bbox": [ + 51, + 621, + 226, + 635 + ], + "type": "text", + "content": "Multidimensional knapsack problem" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 637, + 294, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 637, + 294, + 706 + ], + "spans": [ + { + "bbox": [ + 50, + 637, + 294, + 706 + ], + "type": "text", + "content": "This problem is a multidimensional knapsack optimization where the objective is to maximize the total profit by selecting decision variables, each associated with a profit and resource consumption across multiple constraints. The decision variables must be chosen such that the sum of resource usage for each constraint does not exceed its corresponding capacity." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 54, + 560, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 54, + 560, + 121 + ], + "spans": [ + { + "bbox": [ + 315, + 54, + 560, + 121 + ], + "type": "text", + "content": "Importantly, if any constraint is violated—that is, if the resource consumption for any constraint exceeds its allowed capacity—the solution is deemed infeasible and earns no score. The challenge lies in identifying the optimal combination of items that yields the highest total profit while strictly satisfying all resource constraints." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 317, + 131, + 496, + 244 + ], + "blocks": [ + { + "bbox": [ + 50, + 573, + 295, + 595 + ], + "lines": [ + { + "bbox": [ + 50, + 573, + 295, + 595 + ], + "spans": [ + { + "bbox": [ + 50, + 573, + 295, + 595 + ], + "type": "text", + "content": "Table 23: Multi-Demand Multidimensional Knapsack problem" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 131, + 496, + 244 + ], + "lines": [ + { + "bbox": [ + 317, + 131, + 496, + 244 + ], + "spans": [ + { + "bbox": [ + 317, + 131, + 496, + 244 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9903523477639424
BestOfN0.9401685100749627
Refine0.9947726903727786
FunSearch0.9773347714972982
AIDE0.925117898068383
ReEvo1.0018885951740353
MCTS1.0057751617808324
EoH1.0010112897238341
", + "image_path": "6f0f26aba14980dd6a4df69a7a8dfef8a18f1ea1fd60bff6626b4965f903c7cb.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 286, + 423, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 286, + 423, + 300 + ], + "spans": [ + { + "bbox": [ + 315, + 286, + 423, + 300 + ], + "type": "text", + "content": "Open shop scheduling" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 302, + 560, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 302, + 560, + 456 + ], + "spans": [ + { + "bbox": [ + 315, + 302, + 560, + 456 + ], + "type": "text", + "content": "The Open Shop Scheduling Problem involves scheduling a set of jobs across a set of machines with the goal of minimizing the total completion time (makespan). Each job consists of several operations, where each operation must be processed on a specific machine for a given duration. Unlike other scheduling problems, the Open Shop variant has no predetermined order for processing the operations of a job—operations can be scheduled in any order, but a job can only be processed on one machine at a time, and a machine can only process one job at a time. This creates a complex combinatorial optimization challenge where the scheduler must determine both the sequence of operations for each job and the timing of each operation to minimize the overall completion time while ensuring no resource conflicts." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 317, + 467, + 496, + 579 + ], + "blocks": [ + { + "bbox": [ + 342, + 251, + 533, + 264 + ], + "lines": [ + { + "bbox": [ + 342, + 251, + 533, + 264 + ], + "spans": [ + { + "bbox": [ + 342, + 251, + 533, + 264 + ], + "type": "text", + "content": "Table 24: Multidimensional knapsack problem" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 467, + 496, + 579 + ], + "lines": [ + { + "bbox": [ + 317, + 467, + 496, + 579 + ], + "spans": [ + { + "bbox": [ + 317, + 467, + 496, + 579 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.7851209868863173
BestOfN0.9017764948703829
Refine0.9930284498507208
FunSearch0.9930284498507208
AIDE0.9156437907474381
ReEvo0.9825099803205837
MCTS0.8960699709846601
EoH0.9930284498507208
", + "image_path": "c930fd160520eca9fcd102ed9b95b22f35ef4831d26d034c735305a1a74f8924.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 372, + 586, + 503, + 599 + ], + "lines": [ + { + "bbox": [ + 372, + 586, + 503, + 599 + ], + "spans": [ + { + "bbox": [ + 372, + 586, + 503, + 599 + ], + "type": "text", + "content": "Table 25: Open shop scheduling" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 315, + 622, + 430, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 622, + 430, + 635 + ], + "spans": [ + { + "bbox": [ + 315, + 622, + 430, + 635 + ], + "type": "text", + "content": "Packing unequal circles" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 637, + 560, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 637, + 560, + 706 + ], + "spans": [ + { + "bbox": [ + 315, + 637, + 560, + 706 + ], + "type": "text", + "content": "The problem involves packing a subset of unequal circles into a fixed circular container with radius R_0 and center at the origin, where each circle i has a given radius R_i (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the number of circles packed—that is," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 53, + 294, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 53, + 294, + 133 + ], + "spans": [ + { + "bbox": [ + 50, + 53, + 294, + 133 + ], + "type": "text", + "content": "maximize " + }, + { + "bbox": [ + 50, + 53, + 294, + 133 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\alpha_{i}" + }, + { + "bbox": [ + 50, + 53, + 294, + 133 + ], + "type": "text", + "content": "—subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed R_0; and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 139, + 231, + 252 + ], + "blocks": [ + { + "bbox": [ + 52, + 139, + 231, + 252 + ], + "lines": [ + { + "bbox": [ + 52, + 139, + 231, + 252 + ], + "spans": [ + { + "bbox": [ + 52, + 139, + 231, + 252 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9075757575757577
BestOfN0.8939393939393939
Refine0.9803030303030303
FunSearch0.9719696969696969
AIDE0.8825757575757576
ReEvo0.8825757575757576
MCTS0.9522727272727273
EoH0.8825757575757576
", + "image_path": "1088a9dabd21f9bc4a7c6245c1d52571e7ad4220d2bd766e4e5b0fc595d60be6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 293, + 188, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 293, + 188, + 306 + ], + "spans": [ + { + "bbox": [ + 50, + 293, + 188, + 306 + ], + "type": "text", + "content": "Packing unequal circles area" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "spans": [ + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "text", + "content": "The problem involves packing a subset of unequal circles into a fixed circular container with radius " + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "inline_equation", + "content": "\\mathrm{R\\_0}" + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "text", + "content": " and center at the origin, where each circle " + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "inline_equation", + "content": "\\mathrm{i}" + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "text", + "content": " has a given radius " + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "inline_equation", + "content": "\\mathrm{R\\_i}" + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "text", + "content": " (sorted in non-decreasing order) and is associated with a binary decision variable alpha_i indicating whether it is packed. The goal is to maximize the total area of all circles packed—that is, maximize " + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\alpha_i * p_i * R_i^2" + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "text", + "content": "—subject to two sets of nonlinear constraints: (1) each packed circle must lie entirely within the container, which is enforced by ensuring that the distance from its center to the container's center plus its radius does not exceed " + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "inline_equation", + "content": "\\mathrm{R\\_0}" + }, + { + "bbox": [ + 50, + 308, + 295, + 452 + ], + "type": "text", + "content": "; and (2) any two packed circles must not overlap, meaning the distance between their centers must be at least the sum of their radii." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 52, + 458, + 231, + 571 + ], + "blocks": [ + { + "bbox": [ + 103, + 258, + 241, + 272 + ], + "lines": [ + { + "bbox": [ + 103, + 258, + 241, + 272 + ], + "spans": [ + { + "bbox": [ + 103, + 258, + 241, + 272 + ], + "type": "text", + "content": "Table 26: Packing unequal circles" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 458, + 231, + 571 + ], + "lines": [ + { + "bbox": [ + 52, + 458, + 231, + 571 + ], + "spans": [ + { + "bbox": [ + 52, + 458, + 231, + 571 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.8767896840297265
BestOfN0.9923476599194556
Refine1.0226692239919217
FunSearch1.0404725950195108
AIDE0.5972138868724692
ReEvo0.9101821460280035
MCTS0.9617483396206504
EoH1.0056059827170811
", + "image_path": "afd684331587462588791f64a1fc570718250bb0ea8219f611f7236ac0250ac0.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 613, + 241, + 625 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 613, + 241, + 625 + ], + "spans": [ + { + "bbox": [ + 50, + 613, + 241, + 625 + ], + "type": "text", + "content": "Packing unequal rectangles and squares" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 627, + 294, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 627, + 294, + 706 + ], + "spans": [ + { + "bbox": [ + 50, + 627, + 294, + 706 + ], + "type": "text", + "content": "We are given a set of n unequal rectangles (or squares), each with specified dimensions, and a fixed circular container of radius R centered at the origin. The problem is to decide which rectangles to pack and where to position them—by choosing binary selection variables and continuous center coordinates—so that every packed rectangle is entirely contained within the circle and no two packed rectangles overlap." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 54, + 560, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 54, + 560, + 133 + ], + "spans": [ + { + "bbox": [ + 315, + 54, + 560, + 133 + ], + "type": "text", + "content": "For each rectangle, the four corners must lie inside the circle, and if an item is not packed it is forced to a dummy position. The objective is to maximize the number of packed items, i.e., maximize " + }, + { + "bbox": [ + 315, + 54, + 560, + 133 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{n} \\text{alpha}_i" + }, + { + "bbox": [ + 315, + 54, + 560, + 133 + ], + "type": "text", + "content": " (or a related sum when each alpha_i is binary). Note that the rotation of the rectangular (by 90 degrees) is sometimes allowed and your algorithm should take that into account." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 317, + 140, + 496, + 254 + ], + "blocks": [ + { + "bbox": [ + 93, + 578, + 251, + 590 + ], + "lines": [ + { + "bbox": [ + 93, + 578, + 251, + 590 + ], + "spans": [ + { + "bbox": [ + 93, + 578, + 251, + 590 + ], + "type": "text", + "content": "Table 27: Packing unequal circles area" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 140, + 496, + 254 + ], + "lines": [ + { + "bbox": [ + 317, + 140, + 496, + 254 + ], + "spans": [ + { + "bbox": [ + 317, + 140, + 496, + 254 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9134625513058007
BestOfN0.8337025039542202
Refine0.932172162950195
FunSearch0.9228828411608733
AIDE0.7950708457573447
ReEvo0.77954425754769
MCTS0.8028450160315149
EoH0.9228828411608733
", + "image_path": "16ae26d8657a16b70285bc50a759134f3021cbfb2790d4037a24eac8b541466d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 297, + 529, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 297, + 529, + 311 + ], + "spans": [ + { + "bbox": [ + 315, + 297, + 529, + 311 + ], + "type": "text", + "content": "Packing unequal rectangles and squares area" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "spans": [ + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": "We consider the problem of selecting and placing a subset of " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " unequal rectangles (or squares) into a fixed-size circular container of radius " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " so as to maximize the total area of the packed items. Each item " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " has given dimensions " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "L_{i}" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "W_{i}" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " (with " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "L_{i} = W_{i}" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " for squares) and an associated area " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "L_{i}W_{i}" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": ". The decision variables include a binary indicator " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "\\alpha_{i}" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " for whether item " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " is packed and continuous variables " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "(x_{i},y_{i})" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " for the placement of its center, along with a rotation angle " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "heta_{i}" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 315, + 312, + 560, + 447 + ], + "type": "text", + "content": " rotations are allowed. The formulation enforces that for every packed item, all four of its rotated corners must lie within the circle, and that no two packed items overlap; if an item is not packed, it is fixed at a dummy position." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 317, + 456, + 496, + 569 + ], + "blocks": [ + { + "bbox": [ + 336, + 260, + 538, + 273 + ], + "lines": [ + { + "bbox": [ + 336, + 260, + 538, + 273 + ], + "spans": [ + { + "bbox": [ + 336, + 260, + 538, + 273 + ], + "type": "text", + "content": "Table 28: Packing unequal rectangles and squares" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 456, + 496, + 569 + ], + "lines": [ + { + "bbox": [ + 317, + 456, + 496, + 569 + ], + "spans": [ + { + "bbox": [ + 317, + 456, + 496, + 569 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.8893527400499813
BestOfN0.9536806816195774
Refine1.0513451711752306
FunSearch1.0839011538182066
AIDE0.8100272732450019
ReEvo0.9435059488868657
MCTS0.995946490673633
EoH0.9566331174271511
", + "image_path": "9bad5f71e7bc3d655bcfadd2bb30a745c49b42f1ff44dc5f582d7ab3bc36ef68.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 326, + 575, + 547, + 588 + ], + "lines": [ + { + "bbox": [ + 326, + 575, + 547, + 588 + ], + "spans": [ + { + "bbox": [ + 326, + 575, + 547, + 588 + ], + "type": "text", + "content": "Table 29: Packing unequal rectangles and squares area" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 315, + 611, + 484, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 611, + 484, + 624 + ], + "spans": [ + { + "bbox": [ + 315, + 611, + 484, + 624 + ], + "type": "text", + "content": "Resource constrained shortest path" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 626, + 559, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 626, + 559, + 706 + ], + "spans": [ + { + "bbox": [ + 315, + 626, + 559, + 706 + ], + "type": "text", + "content": "This problem involves finding the shortest path from vertex 1 to vertex " + }, + { + "bbox": [ + 315, + 626, + 559, + 706 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 315, + 626, + 559, + 706 + ], + "type": "text", + "content": " in a directed graph while satisfying resource constraints. Specifically, each vertex and arc has associated resource consumptions, and the cumulative consumption for each resource must fall within the provided lowerBounds and upperBounds. The input includes the number of vertices (n), arcs (m), resource types (K), resource consumption at" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 54, + 293, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 54, + 293, + 142 + ], + "spans": [ + { + "bbox": [ + 53, + 54, + 293, + 142 + ], + "type": "text", + "content": "each vertex, and a graph represented as a mapping from vertices to lists of arcs (each arc being a tuple of end vertex, cost, and arc resource consumptions). The optimization objective is to minimize the total arc cost of the path, with the condition that the path is valid—meaning it starts at vertex 1, ends at vertex " + }, + { + "bbox": [ + 53, + 54, + 293, + 142 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 53, + 54, + 293, + 142 + ], + "type": "text", + "content": ", follows defined transitions in the graph, and respects all resource bounds; if any of these constraints are not met, the solution receives no score." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 52, + 153, + 229, + 265 + ], + "blocks": [ + { + "bbox": [ + 52, + 153, + 229, + 265 + ], + "lines": [ + { + "bbox": [ + 52, + 153, + 229, + 265 + ], + "spans": [ + { + "bbox": [ + 52, + 153, + 229, + 265 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.7508899529136809
BestOfN0.7508899529136808
Refine0.7284494767232047
FunSearch0.7508899529136808
AIDE0.7508899529136808
ReEvo0.7508899529136808
MCTS0.7284494767232047
EoH0.7508899529136808
", + "image_path": "95920fac789b7820ecb730f44d27964ef594dfc7be07bcbcd6188d00f543279b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 316, + 111, + 329 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 316, + 111, + 329 + ], + "spans": [ + { + "bbox": [ + 53, + 316, + 111, + 329 + ], + "type": "text", + "content": "Set covering" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 335, + 293, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 335, + 293, + 411 + ], + "spans": [ + { + "bbox": [ + 53, + 335, + 293, + 411 + ], + "type": "text", + "content": "Set Covering Problem. The goal is to select a subset of columns, each with an associated cost, such that every row is covered by at least one chosen column. For each row, the available covering columns are provided (as 1-indexed numbers). The objective is to minimize the total cost of the selected columns, and if even one row is left uncovered, then no score is awarded." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 52, + 423, + 229, + 535 + ], + "blocks": [ + { + "bbox": [ + 83, + 274, + 261, + 285 + ], + "lines": [ + { + "bbox": [ + 83, + 274, + 261, + 285 + ], + "spans": [ + { + "bbox": [ + 83, + 274, + 261, + 285 + ], + "type": "text", + "content": "Table 30: Resource constrained shortest path" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 423, + 229, + 535 + ], + "lines": [ + { + "bbox": [ + 52, + 423, + 229, + 535 + ], + "spans": [ + { + "bbox": [ + 52, + 423, + 229, + 535 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.8883906244045974
BestOfN0.8213286754887226
Refine0.9056204467263304
FunSearch0.8887733963981322
AIDE0.8639998129016312
ReEvo0.9360686599803572
MCTS0.8672991644233662
EoH0.8843920544743958
", + "image_path": "c7678567bee486687e7a3bfc0696818b6449fe61f6ca4dfa433cb8b9cc2d7d41.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 586, + 127, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 586, + 127, + 599 + ], + "spans": [ + { + "bbox": [ + 53, + 586, + 127, + 599 + ], + "type": "text", + "content": "Set partitioning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 604, + 293, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 604, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 604, + 293, + 704 + ], + "type": "text", + "content": "This problem involves solving a set partitioning instance where the goal is to choose a subset of columns such that each row is covered exactly once while minimizing the total cost. Each column is associated with a cost and covers a specific set of rows. The optimization problem is defined by selecting columns from a given set so that every row is covered precisely once, and the sum of the selected columns' costs is minimized. If the solution fails to cover every row exactly once, then no score is awarded." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 318, + 53, + 494, + 163 + ], + "blocks": [ + { + "bbox": [ + 127, + 544, + 217, + 555 + ], + "lines": [ + { + "bbox": [ + 127, + 544, + 217, + 555 + ], + "spans": [ + { + "bbox": [ + 127, + 544, + 217, + 555 + ], + "type": "text", + "content": "Table 31: Set covering" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 53, + 494, + 163 + ], + "lines": [ + { + "bbox": [ + 318, + 53, + 494, + 163 + ], + "spans": [ + { + "bbox": [ + 318, + 53, + 494, + 163 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9996401983661346
BestOfN0.8991338255841825
Refine0.7999991398515384
FunSearch0.83333333333333334
AIDE0.9
ReEvo0.8991338255841825
MCTS0.8647769492523454
EoH0.9324671589175159
", + "image_path": "4f321ae8f77c6ec6e4c0c37679e475311aa04ba6f9ffae5b1424d65d1be924bb.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 318, + 203, + 339, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 203, + 339, + 214 + ], + "spans": [ + { + "bbox": [ + 318, + 203, + 339, + 214 + ], + "type": "text", + "content": "TSP" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "spans": [ + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "text", + "content": "The Traveling Salesman Problem (TSP) is a classic combinatorial optimization problem where, given a set of cities with known pairwise distances, the objective is to find the shortest possible tour that visits each city exactly once and returns to the starting city. More formally, given a complete graph " + }, + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "inline_equation", + "content": "\\mathrm{G} = (\\mathrm{V},\\mathrm{E})" + }, + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "text", + "content": " with vertices " + }, + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "inline_equation", + "content": "\\mathrm{V}" + }, + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "text", + "content": " representing cities and edges " + }, + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "inline_equation", + "content": "\\mathrm{E}" + }, + { + "bbox": [ + 318, + 217, + 559, + 317 + ], + "type": "text", + "content": " with weights representing distances, we seek to find a Hamiltonian cycle (a closed path visiting each vertex exactly once) of minimum total weight." + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 318, + 327, + 494, + 437 + ], + "blocks": [ + { + "bbox": [ + 387, + 172, + 488, + 183 + ], + "lines": [ + { + "bbox": [ + 387, + 172, + 488, + 183 + ], + "spans": [ + { + "bbox": [ + 387, + 172, + 488, + 183 + ], + "type": "text", + "content": "Table 32: Set partitioning" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 327, + 494, + 437 + ], + "lines": [ + { + "bbox": [ + 318, + 327, + 494, + 437 + ], + "spans": [ + { + "bbox": [ + 318, + 327, + 494, + 437 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.986
BestOfN0.8590303340408165
Refine0.9399577646813952
FunSearch0.9016741050908584
AIDE0.7710495444635409
ReEvo0.8488918718349553
MCTS0.5961113158302597
EoH0.7935463156320405
", + "image_path": "d4d0b3bfe3fa404ab607b2ba34a1b69a65b540411a4cc6457131ef71821b803f.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 409, + 447, + 466, + 456 + ], + "lines": [ + { + "bbox": [ + 409, + 447, + 466, + 456 + ], + "spans": [ + { + "bbox": [ + 409, + 447, + 466, + 456 + ], + "type": "text", + "content": "Table 33: TSP" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 318, + 479, + 478, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 479, + 478, + 491 + ], + "spans": [ + { + "bbox": [ + 318, + 479, + 478, + 491 + ], + "type": "text", + "content": "Uncapacitated warehouse location" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 494, + 559, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 494, + 559, + 635 + ], + "spans": [ + { + "bbox": [ + 318, + 494, + 559, + 635 + ], + "type": "text", + "content": "The Uncapacitated Warehouse Location Problem aims to determine which warehouses to open and how to assign each customer entirely to an open warehouse in order to minimize the total cost. Given a set of potential warehouse locations, each with a fixed opening cost, and a set of customers, each with an associated assignment cost for being served by each warehouse, the objective is to select a subset of warehouses to open and assign every customer completely to one of these open warehouses. The optimization minimizes the sum of fixed warehouse opening costs and the customer assignment costs. Each customer must be assigned to exactly one warehouse; if any customer is left unassigned or assigned to more than one warehouse, the solution is considered infeasible." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 645, + 469, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 645, + 469, + 658 + ], + "spans": [ + { + "bbox": [ + 318, + 645, + 469, + 658 + ], + "type": "text", + "content": "Unconstrained guillotine cutting" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 318, + 660, + 559, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 660, + 559, + 704 + ], + "spans": [ + { + "bbox": [ + 318, + 660, + 559, + 704 + ], + "type": "text", + "content": "The unconstrained guillotine cutting problem involves selecting and placing a subset of available pieces within a fixed stock rectangle to maximize the total value of the placed pieces. Each piece, defined by its length, width, and value," + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 52, + 229, + 163 + ], + "blocks": [ + { + "bbox": [ + 52, + 52, + 229, + 163 + ], + "lines": [ + { + "bbox": [ + 52, + 52, + 229, + 163 + ], + "spans": [ + { + "bbox": [ + 52, + 52, + 229, + 163 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9968157833494645
BestOfN0.98931916166557
Refine1.00000000000002045
FunSearch0.9978398298062331
AIDE0.9994999857664043
ReEvo0.998083746641369
MCTS0.9951604598088827
EoH0.87499999999978142
", + "image_path": "1c6900dbfa02950750e85d772fc63df86f44ea725e9af1c1346480142cafa5dd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 205, + 293, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 293, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 293, + 315 + ], + "type": "text", + "content": "may be optionally rotated " + }, + { + "bbox": [ + 53, + 205, + 293, + 315 + ], + "type": "inline_equation", + "content": "90^{\\circ}" + }, + { + "bbox": [ + 53, + 205, + 293, + 315 + ], + "type": "text", + "content": " if allowed and used at most once. The challenge is to determine both the selection and the positioning of these pieces such that they do not overlap and lie entirely within the stock's boundaries. This optimization problem formalizes the decision variables as the x and y coordinates for the bottom-left placement of each piece and, if rotation is allowed, a binary variable indicating its orientation, while the objective function is to maximize the sum of the values of the pieces successfully placed within the stock." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 52, + 323, + 230, + 436 + ], + "blocks": [ + { + "bbox": [ + 84, + 173, + 260, + 182 + ], + "lines": [ + { + "bbox": [ + 84, + 173, + 260, + 182 + ], + "spans": [ + { + "bbox": [ + 84, + 173, + 260, + 182 + ], + "type": "text", + "content": "Table 34: Uncapacitated warehouse location" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 323, + 230, + 436 + ], + "lines": [ + { + "bbox": [ + 52, + 323, + 230, + 436 + ], + "spans": [ + { + "bbox": [ + 52, + 323, + 230, + 436 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9725381370960237
BestOfN0.8701275303357732
Refine0.9618177725501762
FunSearch0.9646369625362231
AIDE0.8512970128354943
ReEvo0.9828452190272524
MCTS0.8628525304460628
EoH0.9649480933563296
", + "image_path": "a006c1e5f9cdf229c2d31b9f979e80f46279a28e35df50ba90375221cbe29900.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 481, + 198, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 481, + 198, + 493 + ], + "spans": [ + { + "bbox": [ + 53, + 481, + 198, + 493 + ], + "type": "text", + "content": "Vehicle routing: period routing" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 495, + 293, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 495, + 293, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 495, + 293, + 517 + ], + "type": "text", + "content": "The Period Vehicle Routing Problem requires planning delivery routes over a multi-day planning period." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 517, + 293, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 517, + 293, + 594 + ], + "spans": [ + { + "bbox": [ + 53, + 517, + 293, + 594 + ], + "type": "text", + "content": "Each customer (other than the depot, whose id is 0) is provided with a list of candidate service schedules. A schedule is represented by a binary vector of length equal to the period (e.g., [1, 0, 1] for a 3-day period), where a 1 in a given position indicates that the customer must be visited on that day. The decision maker must select exactly one candidate schedule for each customer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 594, + 293, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 594, + 293, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 594, + 293, + 704 + ], + "type": "text", + "content": "For every day in the planning period, if a customer's chosen schedule indicates a delivery (i.e., a 1), then exactly one vehicle must visit that customer on that day. Otherwise, the customer should not be visited. The decision maker must also design, for each day, the tours for the vehicles. Each tour is a continuous route that starts at the depot (id 0) and, after visiting a subset of customers, returns to the depot. Each vehicle is only allowed to visit the depot once per day—namely, as its starting and ending point—and it is not allowed to return to the depot in the middle of a tour." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 55, + 558, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 55, + 558, + 164 + ], + "spans": [ + { + "bbox": [ + 318, + 55, + 558, + 164 + ], + "type": "text", + "content": "Moreover, each vehicle route must obey a capacity constraint: the total demand of the customers visited on that tour must not exceed the vehicle capacity each day. Although multiple vehicles are available per day (as specified by the input), not all available vehicles have to be used, but the number of tours in a given day cannot exceed the provided number of vehicles. In addition, the tours on each day must cover exactly those customers who require service per the selected schedules, and no customer may be visited more than once in a given day." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 318, + 165, + 558, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 165, + 558, + 208 + ], + "spans": [ + { + "bbox": [ + 318, + 165, + 558, + 208 + ], + "type": "text", + "content": "The objective is to choose a schedule for every customer and plan the daily tours so as to minimize the overall distance traveled by all vehicles during the entire planning period. Distances are measured using Euclidean distance." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 318, + 217, + 500, + 330 + ], + "blocks": [ + { + "bbox": [ + 88, + 445, + 257, + 455 + ], + "lines": [ + { + "bbox": [ + 88, + 445, + 257, + 455 + ], + "spans": [ + { + "bbox": [ + 88, + 445, + 257, + 455 + ], + "type": "text", + "content": "Table 35: Unconstrained guillotine cutting" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 217, + 500, + 330 + ], + "lines": [ + { + "bbox": [ + 318, + 217, + 500, + 330 + ], + "spans": [ + { + "bbox": [ + 318, + 217, + 500, + 330 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.12437943290991642
BestOfN0.42032326191804853
Refine0.48371172427664344
FunSearch0.32385035648314586
AIDE0.5362363612554435
ReEvo0.0
MCTS0.0
EoH0.0
", + "image_path": "8f2560e60912e886f7dbdab354daaf81fa6b15cb2ada6a9420535898615ee0e0.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 356, + 338, + 519, + 349 + ], + "lines": [ + { + "bbox": [ + 356, + 338, + 519, + 349 + ], + "spans": [ + { + "bbox": [ + 356, + 338, + 519, + 349 + ], + "type": "text", + "content": "Table 36: Vehicle routing: period routing" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 318, + 371, + 425, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 371, + 425, + 383 + ], + "spans": [ + { + "bbox": [ + 318, + 371, + 425, + 383 + ], + "type": "text", + "content": "p-median - capacitated" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "spans": [ + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "text", + "content": "The Capacitated P-Median Problem is a facility location optimization problem where the objective is to select exactly " + }, + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "text", + "content": " customers as medians (facility locations) and assign each customer to one of these medians to minimize the total cost, defined as the sum of the Euclidean distances (rounded down to the nearest integer) between customers and their assigned medians. Each median has a capacity constraint " + }, + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "text", + "content": ", meaning the total demand of the customers assigned to it cannot exceed " + }, + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 318, + 384, + 558, + 549 + ], + "type": "text", + "content": ". A feasible solution must respect this capacity constraint for all medians; otherwise, it receives a score of zero. The solution is evaluated by the ratio extscore = rac extbestknown extcomputed_total_cost, where computed_total_cost is the total assignment cost if all constraints are satisfied; otherwise, the score is zero. The output consists of the total cost (if feasible), the selected medians, and the customer assignments." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 570, + 437, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 570, + 437, + 582 + ], + "spans": [ + { + "bbox": [ + 318, + 570, + 437, + 582 + ], + "type": "text", + "content": "p-median - uncapacitated" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "spans": [ + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "text", + "content": "The uncapacitated p-median problem is a combinatorial optimization problem defined on a given graph " + }, + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "inline_equation", + "content": "\\mathrm{G} = (\\mathrm{V},\\mathrm{E})" + }, + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "text", + "content": " with n vertices and m edges. The objective is to select p medians (facility locations) from the set of vertices such that the total assignment cost is minimized. The assignment cost is computed as the sum of the shortest distances from each vertex to its nearest selected median, where distances are given by a precomputed complete cost matrix (obtained via Floyd's algorithm). Formally, given the cost matrix " + }, + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "inline_equation", + "content": "D\\in \\mathbb{R}^{n\\times n}" + }, + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "text", + "content": ", the optimization problem seeks to find a subset " + }, + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "inline_equation", + "content": "S\\subseteq Vwith|S| = p" + }, + { + "bbox": [ + 318, + 583, + 558, + 703 + ], + "type": "text", + "content": " that minimizes the function:" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 53, + 229, + 163 + ], + "blocks": [ + { + "bbox": [ + 52, + 53, + 229, + 163 + ], + "lines": [ + { + "bbox": [ + 52, + 53, + 229, + 163 + ], + "spans": [ + { + "bbox": [ + 52, + 53, + 229, + 163 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.8996179560649475
BestOfN0.9892886172082498
Refine0.9737771618997864
FunSearch0.9748437166838722
AIDE0.7442228395960961
ReEvo0.9786585768154689
MCTS0.9829650705934849
EoH0.9853458094532425
", + "image_path": "e211d9279db57498735684778c25b26ba91157f8392851900afa6babc3a600a8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 205, + 155, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 205, + 155, + 217 + ], + "spans": [ + { + "bbox": [ + 63, + 205, + 155, + 217 + ], + "type": "inline_equation", + "content": "\\sum_{v\\in V}\\min_{s\\in S}D(v,s)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 217, + 292, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 217, + 292, + 249 + ], + "spans": [ + { + "bbox": [ + 52, + 217, + 292, + 249 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 52, + 217, + 292, + 249 + ], + "type": "inline_equation", + "content": "\\mathrm{D}(\\mathrm{v},\\mathrm{s})" + }, + { + "bbox": [ + 52, + 217, + 292, + 249 + ], + "type": "text", + "content": " is the shortest-path distance between vertex v and median s. The solution consists of a list of exactly p distinct vertices representing the chosen medians." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 52, + 259, + 229, + 371 + ], + "blocks": [ + { + "bbox": [ + 107, + 173, + 238, + 183 + ], + "lines": [ + { + "bbox": [ + 107, + 173, + 238, + 183 + ], + "spans": [ + { + "bbox": [ + 107, + 173, + 238, + 183 + ], + "type": "text", + "content": "Table 37: p-median - capacitated" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 259, + 229, + 371 + ], + "lines": [ + { + "bbox": [ + 52, + 259, + 229, + 371 + ], + "spans": [ + { + "bbox": [ + 52, + 259, + 229, + 371 + ], + "type": "table", + "html": "
MethodScore
Classical Solver0.9952341868141825
BestOfN0.9453613019698086
Refine0.9982141349797949
FunSearch0.9996783954983718
AIDE0.9847816841274486
ReEvo0.9983315585722753
MCTS0.9605290267584901
EoH0.9921177098573016
", + "image_path": "c84d31b4f0c52d71f3595153fe6a7cd0caf6c2bafccd0acab9a1f99cfdf1decd.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 102, + 380, + 243, + 391 + ], + "lines": [ + { + "bbox": [ + 102, + 380, + 243, + 391 + ], + "spans": [ + { + "bbox": [ + 102, + 380, + 243, + 391 + ], + "type": "text", + "content": "Table 38: p-median - uncapacitated" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_content_list.json b/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..aba50fbf4386e02da0729b7e2ce1a9178f3c3eb8 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_content_list.json @@ -0,0 +1,2250 @@ +[ + { + "type": "text", + "text": "Neural Parametric Mixtures for Path Guiding", + "text_level": 1, + "bbox": [ + 80, + 95, + 609, + 119 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "HONGHAO DONG, Peking University, China \nGUOPING WANG, Peking University, China \nSHENG LI*, Peking University, China", + "bbox": [ + 78, + 130, + 419, + 181 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Previous path guiding techniques typically rely on spatial subdivision structures to approximate directional target distributions, which may cause failure to capture spatio-directional correlations and introduce parallax issue. In this paper, we present Neural Parametric Mixtures (NPM), a neural formulation to encode target distributions for path guiding algorithms. We propose to use a continuous and compact neural implicit representation for encoding parametric models while decoding them via lightweight neural networks. We then derive a gradient-based optimization strategy to directly train the parameters of NPM with noisy Monte Carlo radiance estimates. Our approach efficiently models the target distribution (incident radiance or the product integrand) for path guiding, and outperforms previous guiding methods by capturing the spatio-directional correlations more accurately. Moreover, our approach is more training efficient and is practical for parallelization on modern GPUs.", + "bbox": [ + 78, + 188, + 482, + 364 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS Concepts: Computing methodologies $\\rightarrow$ Ray tracing; Neural networks.", + "bbox": [ + 78, + 371, + 482, + 397 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Additional Key Words and Phrases: Ray Tracing, Global Illumination, Sampling and Reconstruction, Neural Networks, Mixture Models", + "bbox": [ + 78, + 402, + 482, + 428 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 78, + 434, + 227, + 446 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Honghao Dong, Guoping Wang, and Sheng Li. 2025. Neural Parametric Mixtures for Path Guiding. 1, 1 (April 2025), 10 pages. https://doi.org/10.1145/3588432.3591533", + "bbox": [ + 78, + 446, + 482, + 484 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 80, + 498, + 227, + 511 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The efficiency of path tracing relies heavily on the sampling strategy. To further improve its efficiency and robustness, path guiding algorithms leverage the knowledge gained during rendering to facilitate the process of light-path construction, thereby reducing noise. To acquire better importance sampling distribution, local path guiding techniques employ previous radiance estimates to learn an approximation of spatial incident radiance fields, which are then used to guide the construction of paths. In practice, current methods typically use some representation (e.g., Gaussian mixtures [Herholz et al. 2016; Vorba et al. 2014], quadtrees [Müller et al. 2017]) to approximate the directional distribution of incident radiance. A spatial subdivision structure (e.g., kd-tree [Dodik et al. 2022], or octree [Bus", + "bbox": [ + 78, + 516, + 482, + 683 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Corresponding author.", + "bbox": [ + 78, + 694, + 192, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project website: https://neuropara.github.io.", + "bbox": [ + 78, + 705, + 287, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Authors' addresses: Honghao Dong, Peking University, Beijing, China, cuteday@pku.edu.cn; Guoping Wang, Peking University, Beijing, China, wgp@pku.edu.cn; Sheng Li, Peking University, Beijing, China, lisheng@pku.edu.cn.", + "bbox": [ + 78, + 728, + 482, + 760 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 78, + 771, + 482, + 844 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2025 Association for Computing Machinery.", + "bbox": [ + 78, + 844, + 299, + 854 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "XXXX-XXXX/2025/4-ART $15.00", + "bbox": [ + 78, + 854, + 238, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/3588432.3591533", + "bbox": [ + 78, + 864, + 269, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "and Boubekeur 2017]) is then used to store these distributions, thus accounting for the spatial variations.", + "bbox": [ + 513, + 186, + 915, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, several key deficiencies remain in their paradigm. Most methods learn the marginalized incident radiance distribution within each subdivided spatial region. This fails to capture the spatiodirectional correlations within the spatial discretizations, and could cause artifacts (e.g., parallax error, Fig 1(a)). Moreover, their spatial subdivision structures are subject to frequent reconstruction for finer-grained spatial resolution, which needs extra overhead and require a long training time to converge. Meanwhile, it is challenging to efficiently fit these specific directional distributions from noisy samples, especially in an online manner [Ruppert et al. 2020].", + "bbox": [ + 511, + 215, + 916, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While an adaptive and robust spatial representation is difficult to achieve with manually designed subdivision schemes, we saw the recent success of neural implicit representation in compactly modeling spatially varying functions with fine-grained and high-frequency details [Mildenhall et al. 2020]. In this work, we exploit the great expressiveness of neural implicit representation while preserving the desirable properties of parametric mixture models (e.g. efficient importance sampling) for path guiding algorithms. We thereby present Neural Parametric Mixtures (NPM), which use a continuous and compact implicit representation to encode spati-directional target distributions, and decode them into PMMs with lightweight neural networks for fast importance sampling. We show that our NPM representation, without explicit spatial subdivision schemes, can be efficiently trained simply using gradient-based optimization techniques. Specifically, our method has advantages in the following aspects:", + "bbox": [ + 511, + 354, + 916, + 575 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "First, our continuous implicit representation of spatial radiance fields naturally captures the correlations between spatial positions and directional target distributions. By smoothly interpolating and decoding the implicit representations with neural networks, our method inherently avoids the issues due to spatial discretization, thus resulting in higher performance.", + "bbox": [ + 511, + 575, + 916, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Second, our compact representation avoids the extra overhead and long training time caused by the iterative reconstruction strategies applied to the explicit spatial subdivision structures. Combined with our simple optimization based on stochastic gradient descent, our method outperforms other guiding methods even with fewer training samples. In addition, our method is practical and performant for parallelization on GPU.", + "bbox": [ + 511, + 657, + 916, + 755 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lastly, our method can learn the product distribution (i.e., multiplied by the BSDF and the cosine term). This further reduces the noise with a modest computational overhead while not requiring the extra effort of previous solutions (e.g., fitting each BSDF with pre-computed parametric models).", + "bbox": [ + 511, + 755, + 916, + 825 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.04315v1 [cs.GR] 6 Apr 2025", + "bbox": [ + 22, + 268, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 671, + 893, + 915, + 905 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 80, + 99, + 225, + 112 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Path Guiding. To achieve better sampling strategies, local path guiding techniques leverage previous radiance estimates (either online or during a pre-computation process) to build an approximation of the incident radiance fields, which is used to guide subsequent sampling. Early approaches used simple bases such as histograms for importance sampling, e.g. built from a photon map [Jensen 1995] or collected radiance estimates with 5-D tree structures [Lafortune and Willems 1995]. Subsequent work has developed various techniques to construct the guiding distribution, e.g., Gaussian mixtures [Vorba et al. 2014], quad-trees [Müller et al. 2017], which is often stored in spatial data structures (e.g., kd-tree and octree) to account for spatial variations of the distributions.", + "bbox": [ + 78, + 117, + 482, + 282 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Deep learning techniques have also been explored recently, achieving improvements while often with less practical performance. For example, convolutional networks could be used to reconstruct the learned noisy radiance field [Huo et al. 2020; Zhu et al. 2021]. Specifically designed neural networks could also model complex manifolds [Dinh et al. 2017], while allowing samples to be drawn directly from the learned distribution [Müller et al. 2019]. However, the prohibitive computational cost prevents its practical application [Müller et al. 2019; Vorba et al. 2019]. Instead of directly importance sampling using neural networks, we encode the target distribution into implicit neural representation, and use only lightweight MLPs to decode it into parametric mixtures for efficient sampling. We show that our method can be efficiently trained (< 10s per scene on a single GPU) while being sufficiently robust and practical.", + "bbox": [ + 78, + 284, + 482, + 478 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Parametric Mixture Models. Parametric mixture models (PMMs) are convex combinations of parametric distributions, and are often used to approximate directional distributions in graphics applications. They have many desirable properties, e.g., fast sampling, and closed-form solutions for products, convolutions and integrals. Several types of PMMs (e.g., Gaussian mixtures [Dodik et al. 2022; Vorba et al. 2014] and von Mises-Fisher mixtures [Ruppert et al. 2020]) are widely used in the recently developed path guiding algorithms. Several recent works also use PMMs to fit BSDFs with precomputation [Herholz et al. 2016; Ruppert et al. 2020], and multiply them with the learned incident radiance to achieve product sampling.", + "bbox": [ + 78, + 489, + 482, + 642 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Parametric models can also be predicted by neural networks, enabling new possibilities for e.g. lighting [Currius et al. 2020] and reconstruction [Yu et al. 2021] tasks. In this work, we use neural representations to encode parametric mixtures for efficient sampling. Our method is also naturally extensible to product sampling.", + "bbox": [ + 78, + 642, + 482, + 712 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Implicit Neural Representation. Following the success of using neural networks to represent 3D scenes implicitly [Mildenhall et al. 2020], the concept of neural representation has been popularized and applied to various tasks. They use sparse input images to optimize the spatial radiance fields via a differentiable volume rendering procedure, thus enabling novel view synthesis. Inspired by its recent successful applications [Diolatzis et al. 2022; Müller et al. 2022], we exploit a continuous and compact implicit neural representation to encode the spatio-directional target distributions for path guiding algorithms. While the ground truth target distribution (i.e., the incident radiance or product distribution) is unknown, our NPM", + "bbox": [ + 78, + 723, + 482, + 876 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "representation can be optimized online using minibatch stochastic gradient descent (SGD), where the gradients for training are estimated by Monte Carlo integration using noisy radiance estimates.", + "bbox": [ + 513, + 99, + 916, + 142 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 PRELIMINARY", + "text_level": 1, + "bbox": [ + 514, + 152, + 645, + 166 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Monte Carlo Integration. Light transport algorithms are generally based on the rendering equation [Kajiya 1986]:", + "bbox": [ + 513, + 171, + 916, + 200 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nL _ {0} (\\mathbf {x}, \\omega_ {0}) = L _ {\\mathrm {e}} (\\mathbf {x}, \\omega_ {0}) + \\int_ {\\Omega} f _ {\\mathrm {s}} (\\mathbf {x}, \\omega_ {0}, \\omega_ {\\mathrm {i}}) L _ {\\mathrm {i}} (\\mathbf {x}, \\omega_ {\\mathrm {i}}) | \\cos \\theta_ {\\mathrm {i}} | d \\omega_ {\\mathrm {i}}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 204, + 916, + 233 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "which defines the relationship between the outgoing radiance $L_{\\mathrm{o}}$ , emitted radiance $L_{e}$ , and the integrated incident radiance $L_{\\mathrm{i}}$ , at shading point $\\mathbf{x}$ . Monte Carlo integration is used to obtain an estimate of the reflection integral $L_{r}$ using an average of $N$ samples. In the case where $N = 1$ :", + "bbox": [ + 513, + 234, + 916, + 304 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\langle L _ {\\mathrm {r}} \\left(\\mathbf {x}, \\omega_ {\\mathrm {o}}\\right) \\right\\rangle = \\frac {f _ {\\mathrm {s}} \\left(\\mathbf {x} , \\omega_ {\\mathrm {o}} , \\omega_ {\\mathrm {i}}\\right) L _ {\\mathrm {i}} \\left(\\mathbf {x} , \\omega_ {\\mathrm {i}}\\right) \\left| \\cos \\theta_ {\\mathrm {i}} \\right|}{p \\left(\\omega_ {\\mathrm {i}} \\mid \\mathbf {x} , \\omega_ {\\mathrm {o}}\\right)}, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 580, + 306, + 916, + 339 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\langle L_{\\mathrm{r}}(\\mathbf{x},\\omega_0)\\rangle$ is an unbiased estimate of the outgoing radiance $L_{\\mathrm{r}}(\\mathbf{x},\\omega_0)$ , and $\\omega_{i}$ is the incident direction sampled with some directional probability distribution $p(\\omega_{\\mathrm{i}}\\mid \\mathbf{x},\\omega_{\\mathrm{o}})$ . The variance of this estimator $V[\\langle L_{\\mathrm{r}}\\rangle ]$ can be reduced if the sampling distribution resembles the shape of the integrand, and could even reach zero variance if being proportional to it (i.e., $p\\propto f_s\\cdot L_i\\cos \\theta_i$ ). This, however, is difficult to achieve with only BSDF importance sampling, leaving the remaining part of the integrand (i.e., the incident radiance) unknown, resulting in a relatively high variance of the MC estimator. Path guiding algorithms, on the other hand, manage to obtain better importance sampling strategies often by using previous radiance samples to approximate the incident radiance $L_{\\mathrm{i}}$ or the full integrand $f_{s}\\cdot L_{\\mathrm{i}}\\cos \\theta_{i}$ , which will be discussed later.", + "bbox": [ + 513, + 344, + 916, + 525 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Von Mises-Fisher Mixtures. We use the von Mises-Fisher (vMF) distribution as the basis of NPM. The vMF distribution is defined as:", + "bbox": [ + 513, + 531, + 916, + 559 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nv (\\omega \\mid \\mu , \\kappa) = \\frac {\\kappa}{4 \\pi \\sinh \\kappa} \\exp \\left(\\kappa \\mu^ {T} \\omega\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 561, + 916, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\mu \\in \\mathbb{S}^2$ and $\\kappa \\in [0, +\\infty)$ defines the direction and precision (sharpness) of the vMF distribution. The vMF mixture model (VMM) is thus a convex combination of $K$ vMF components/lobes:", + "bbox": [ + 513, + 590, + 916, + 633 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {V} (\\omega \\mid \\Theta) = \\sum_ {i = 1} ^ {K} \\lambda_ {i} \\cdot v \\left(\\omega \\mid \\mu_ {i}, \\kappa_ {i}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 638, + 916, + 675 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\Theta$ contains the parameters $(\\mu_i,\\kappa_i)$ and weights $(\\lambda_{i})$ of each vMF component. The vMF mixtures have many desirable properties, e.g., fewer parameters (4 floats per component), efficient importance sampling, and closed-form product and integration, which together constitute the reason for choosing it as the basis of NPM.", + "bbox": [ + 513, + 680, + 916, + 748 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our key is to encode the vMF mixtures with our implicit neural representation, then decode them with lightweight MLPs, and train them to effectively model the target distributions for path guiding algorithms. Other parametric basis functions (e.g., Gaussian mixtures) could be integrated into our method using a similar paradigm.", + "bbox": [ + 513, + 750, + 916, + 819 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "4 NEURAL PARAMETRIC MIXTURES", + "text_level": 1, + "bbox": [ + 514, + 829, + 784, + 843 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we present our Neural Parametric Mixtures (NPM) technique for local path guiding. We first show how to encode/decode", + "bbox": [ + 513, + 847, + 919, + 876 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 81, + 69, + 91, + 78 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Honghao Dong, Guoping Wang, and Sheng Li", + "bbox": [ + 112, + 68, + 331, + 80 + ], + "page_idx": 1 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 81, + 893, + 323, + 905 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "target distributions with NPM in a simple setup (i.e., learning incident radiance fields, Sec. 4.1), then we derive the optimization method for NPM based on minibatch stochastic gradient descent (Sec. 4.2). Finally, we show how our NPM could naturally benefit from learning the full integrand (to account for the BSDF term), as well as the other extensions for better learning target distributions (Sec. 4.3). An overview of our method is illustrated in Fig. 2.", + "bbox": [ + 78, + 99, + 483, + 198 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1 Radiance-based NPM", + "text_level": 1, + "bbox": [ + 78, + 210, + 264, + 223 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In order to acquire a better importance sampling strategy, we should obtain an approximation of the incident radiance distribution using previous radiance estimates, known as the radiance-based local path guiding [Herholz et al. 2016; Rath et al. 2020]. Specifically, we want to use the vMF mixtures to be approximately proportional to the incident radiance, at a given shading position $\\mathbf{x}$ :", + "bbox": [ + 78, + 227, + 480, + 311 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {V} (\\omega_ {i} \\mid \\Theta (\\mathbf {x})) \\propto L _ {\\mathrm {i}} (\\mathbf {x}, \\omega_ {i}), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 321, + 480, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\Theta$ is conditioned on $\\mathbf{x}$ to account for the spatial variation of the target distribution. Previous work achieves this with specific spatial subdivision strategies (e.g., kd-tree, octree). However, this spatial discretization introduces artifacts (e.g., resulting from parallax, Fig. 1 (a)), and is subject to frequent reconstruction to converge to a fine grained spatial subdivision, as discussed in Sec. 1.", + "bbox": [ + 78, + 340, + 478, + 424 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instead, we use an implicit neural representation to encode the target distribution compactly. This allows the spatial variation of the distribution to be continuously accounted for, thus better capturing spatio-directional correlations. Technically, given a shading position $\\mathbf{x}$ in the scene, our NPM would output the guiding distribution that approximates the target distribution (Eq. 5). The output guiding distribution is defined using a set of parameters $\\hat{\\Theta}(\\mathbf{x})$ :", + "bbox": [ + 78, + 429, + 480, + 527 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {N P M} (\\mathbf {x} \\mid \\Phi) = \\hat {\\Theta} (\\mathbf {x}), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 215, + 534, + 480, + 549 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\Phi$ are the trainable parameters of the implicit representation, and $\\hat{\\Theta}$ are the output decoded parameters, defining a vMF mixture $\\mathcal{V}(\\omega_i\\mid \\hat{\\Theta} (\\mathbf{x}))$ that is trained to approximate $L_{i}(\\mathbf{x},\\omega_{i})$ (Eq. 5). By continuously conditioning the learned distribution $\\Theta$ on spatial positions $\\mathbf{x}$ , our method inherently avoids the above issues caused by spatial discretizations. We achieve the above mapping by using a lightweight network to decode this parametric distribution from the implicit neural representation. To make sure that we get a valid vMF mixture (i.e., $\\lambda_{i},\\kappa_{i} > 0,\\mu_{i}\\in \\mathbb{S}^{2}$ , and $\\sum_{j = 1}^{K}\\lambda_{j} = 1$ ), we must additionally regularize the raw network output with appropriate mapping functions (see Tab. 1). Specifically, we apply exponential activation to $\\lambda_{i}$ and $\\kappa_{i}$ . Logistic activation is applied to $\\theta_{i}$ and $\\varphi_{i}$ which form the spherical coordinates of $\\mu_{i}$ . Most importantly, we apply the softmax function to all $\\lambda s$ to ensure that the outputs model a valid PDF (i.e., satisfy $\\sum_{i = 1}^{K}\\lambda_{i} = 1$ ).", + "bbox": [ + 76, + 554, + 478, + 766 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Discussion. It is possible to implement different forms of implicit neural representation with trainable parameters $\\Phi$ . While it is straightforward to use a monolithic network to model $\\mathrm{NPM}_{\\Phi} : \\mathbf{x} \\rightarrow \\Theta$ , we find it difficult to fit the high-frequency variations of the target distribution. Thereby, we use a trainable multi-resolution spatial embedding for encoding the distributions, and additionally a lightweight neural network for decoding the parameters. This is", + "bbox": [ + 76, + 777, + 480, + 876 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/38e332d26df913a4036d6a6bb054d9f1724ec4e8b74fdf0ec84b6036b6735101.jpg", + "image_caption": [ + "Fig. 1. Parallax issue caused by spatial discretizations (a). For a subdivided volume $S(\\mathbf{x})$ in (a), the guiding distribution is marginalized with training samples scattered over the volume $S(\\mathbf{x})$ , and is shared by different positions (e.g., $\\mathbf{x}_1$ and $\\mathbf{x}_2$ ). Our method will not suffer from parallax due to NPM implicitly representing a monolithic function, continuously mapping from spatial positions to parametric guiding distributions, as shown in (b)." + ], + "image_footnote": [], + "bbox": [ + 517, + 95, + 915, + 224 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "crucial for our method to achieve better modeling capacity while remaining performant, as will be discussed later.", + "bbox": [ + 513, + 343, + 915, + 371 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.2 Optimizing NPM", + "text_level": 1, + "bbox": [ + 514, + 385, + 669, + 398 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We show how to optimize the divergence between the decoded distribution $\\hat{\\Theta}(\\mathbf{x})$ and the target distribution using minibatch stochastic gradient descent. To achieve this, the gradients of a training objective (or loss function) with respect to the network parameters are necessary. However, it is non-trivial to define such a loss function, given the ground truth output parameters $\\Theta_{\\mathrm{gt}}(\\mathbf{x})$ are unknown. Previous works typically use design optimization algorithms (e.g., expectation-maximization) that iteratively use batches of samples to fit a given set of parameters $\\Theta$ , which often parameterize a marginalized distribution shared by the spatial region covering the samples [Herholz et al. 2016; Ruppert et al. 2020]. However, their methods are applied to explicitly parameterized models, and are therefore not applicable to our method, which models the implicit representation of the function $\\mathbf{NPM}_{\\Phi}: \\mathbf{x} \\rightarrow \\hat{\\Theta}$ .", + "bbox": [ + 511, + 402, + 916, + 595 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We minimize the KL divergence between the decoded vMF mixtures and the target distribution via minibatch stochastic gradient descent, where its gradients with respect to the trainable parameters are estimated using Monte Carlo integration. Other divergence metrics are also available following a similar derivation. Let us start by assuming that the shading position $\\mathbf{x}$ is fixed, thus omitting the dependency of $\\Theta$ on $\\mathbf{x}$ in the equations. For a given position, the KL divergence between the target distribution $\\mathcal{D}$ and our output", + "bbox": [ + 511, + 597, + 916, + 708 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/3d837b6d0b31d48d6b2f3a3a0868f883d039f51de2c3b76dc345e1b7d59a75e7.jpg", + "table_caption": [ + "Table 1. Detailed mapping functions we use to regularize network outputs, where $\\lambda^{\\prime}$ , $\\kappa^{\\prime}$ , $\\theta^{\\prime}$ , $\\varphi^{\\prime}$ denote the raw outputs, and $(\\theta, \\varphi)$ is the normalized spherical coordinate of $\\mu \\in \\mathbb{S}^2$ . Left: parameter notations and their valid ranges; middle: type of activation; right: specific mappings." + ], + "table_footnote": [], + "table_body": "
ParameterActivationMapping
κ ∈ [0,+∞)Exponentialκi = exp(κi')
λ ∈ [0,+∞)Softmaxλi = exp(λi') / ∑j=1K exp(λj')
θ, φ ∈ [0,1]Logisticθi = 1/(1 + exp(-θi'))
", + "bbox": [ + 535, + 799, + 895, + 873 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Neural Parametric Mixtures for Path Guiding", + "bbox": [ + 666, + 68, + 883, + 79 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 893, + 68, + 916, + 78 + ], + "page_idx": 2 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 671, + 893, + 915, + 905 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/012d7d72f38c99a2d6e3ef901578141684e879c1eaa6c4a2513ae3c7899fdb0e.jpg", + "image_caption": [ + "Fig. 2. High-level illustration of our Neural Parametric Mixtures (NPM). We implicitly encode the spatially varying target distributions with the multi-resolution embedding. When the distribution of a spatial location $\\mathbf{x}$ is queried, (1) the features assigned to the nearby grid points surrounding $\\mathbf{x}$ are interpolated at each level, and concatenated with other levels to obtain the spatial embedding $G(\\mathbf{x})$ . (2) the spatial embedding is then combined with other inputs to (3) feed into the lightweight MLP for (4) decoding the parameters $\\Theta$ of the vMF mixture $\\mathcal{V}(\\omega_i \\mid \\Theta)$ with $K$ components. We then (5) use this parametric distribution for importance sampling the scattering direction. The result MC radiance estimate $\\langle L_i(\\mathbf{x}, \\omega_i) \\rangle$ is used to estimate the training gradient $\\nabla_{\\Theta} D_{\\mathrm{KL}}$ (Sec. 4.2), which is then back-propagated through these differentiable stages to optimize our NPM representation (dashed lines)." + ], + "image_footnote": [], + "bbox": [ + 81, + 97, + 915, + 251 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "distribution $\\mathcal{V}$ is defined as:", + "bbox": [ + 78, + 357, + 254, + 369 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nD _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) = \\int_ {\\Omega} \\mathcal {D} (\\omega) \\log \\frac {\\mathcal {D} (\\omega)}{\\mathcal {V} (\\omega | \\hat {\\Theta})} \\mathrm {d} \\omega , \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 377, + 480, + 407 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{D} \\propto L_{\\mathrm{i}}$ in radiance-based path guiding. This integral could now be estimated with the Monte Carlo estimator with $N$ samples:", + "bbox": [ + 78, + 415, + 483, + 444 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nD _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) \\approx \\frac {1}{N} \\sum_ {j = 1} ^ {N} \\frac {\\mathcal {D} (\\omega_ {j})}{\\tilde {p} (\\omega_ {j} \\mid \\hat {\\Theta})} \\log \\frac {\\mathcal {D} (\\omega_ {j})}{\\mathcal {V} (\\omega_ {j} \\mid \\hat {\\Theta})}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 125, + 453, + 480, + 491 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\tilde{p}$ is the distribution from which the samples are drawn, which in our case is a combination of the BSDF importance sampling and guiding distribution. By taking its derivative with respect to $\\Theta$ , we obtain the MC estimate of the gradient $\\nabla_{\\Theta}D_{\\mathrm{KL}}(\\mathcal{D}\\| \\mathcal{V};\\Theta)$ :", + "bbox": [ + 78, + 500, + 480, + 556 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\Theta} D _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) \\approx - \\frac {1}{N} \\sum_ {j = 1} ^ {N} \\frac {\\mathcal {D} \\left(\\omega_ {j}\\right) \\nabla_ {\\Theta} \\mathcal {V} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right)}{\\tilde {p} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right) \\mathcal {V} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right)}, \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 124, + 565, + 480, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the derivatives of the vMF mixtures $\\mathcal{V}$ with respect to their parameters $\\Theta$ are straightforward. The gradients for the trainable NPM parameters $\\Phi$ could then be obtained via back propagation. Since we use the unbiased MC estimate of the training gradients, the parameters are guaranteed to converge to a local minimum.", + "bbox": [ + 78, + 612, + 480, + 680 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In practice, our training sample pairs $(\\mathbf{x},\\omega_{i})\\rightarrow L_{\\mathrm{i}}$ are distributed in different spatial positions $\\mathbf{x}$ , efficiently learning a spatially varying target distribution $\\mathcal{D}(\\mathbf{x})$ . This results in the training objective accounting for the divergence of multiple positions. The expected solution for $\\Phi$ is thus:", + "bbox": [ + 78, + 681, + 480, + 750 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\Phi^ {*} = \\underset {\\Phi} {\\arg \\min } \\mathbb {E} _ {\\mathbf {x}} \\left[ D _ {\\mathrm {K L}} \\left(\\mathcal {D} (\\mathbf {x}) \\| \\mathcal {V}; \\Theta (\\mathbf {x})\\right) \\right]. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 756, + 480, + 784 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For our implicit spatial embedding (i.e., grids of latent features, discussed later), this results in the embedding being optimized with all (and only) its nearby samples. When using the gradient descent method, the samples with the largest gradients (i.e., the most important ones for reducing divergence) would dominate, forming a reasonable design choice for better adaptivity.", + "bbox": [ + 78, + 792, + 480, + 876 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3 Full Integrand Learning", + "text_level": 1, + "bbox": [ + 514, + 357, + 714, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Using path guiding to sample the full integrand $f_{s} \\cdot L_{i} \\cos \\theta_{i}$ can achieve even better performance, which should incorporate the BSDF term and the cosine term into the target distribution. This is challenging since the guiding distribution is now conditioned on 5D inputs (i.e., outgoing direction $\\omega_{0}$ and spatial coordinate $\\mathbf{x}$ ). Previous works fit BSDFs with precomputed parametric models and multiply them with the learned incident radiance distribution to achieve product sampling. However, this often relies on scene-dependent precomputation, discretization over $\\omega_{0}$ , and extra computational overhead [Herholz et al. 2016; Ruppert et al. 2020].", + "bbox": [ + 511, + 375, + 916, + 513 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our neural design can naturally handle the conditions with the extra input of $\\omega_{i}$ . This is essential since a neural network could approximate arbitrary conditional models if being expressive enough. We later show this improves performance through learning a better guiding distribution, with only modest performance overhead. For clarity, we denote the previous radiance-based method as NPM-radiance, and this version as NPM-product.", + "bbox": [ + 513, + 513, + 916, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, by supplementing input $\\omega_{0}$ , we reformulate the learned distribution (Eq. 6) with the outgoing directions. This enables learning the full integrated as:", + "bbox": [ + 513, + 609, + 923, + 651 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {N P M} _ {\\text {p r o d u c t}} (\\mathbf {x}, \\omega_ {\\mathrm {o}} \\mid \\Phi) = \\hat {\\Theta} (\\mathbf {x}, \\omega_ {\\mathrm {o}}), \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 657, + 916, + 674 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\hat{\\Theta}$ now parameterizes the vMF mixture $\\mathcal{V}$ that is trained to approximate the full integrand in Eq. 1, i.e.,", + "bbox": [ + 513, + 681, + 915, + 710 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\mathcal {V} \\left(\\omega_ {i} \\mid \\hat {\\Theta} (\\mathbf {x}, \\omega_ {0})\\right) \\propto f _ {\\mathrm {s}} \\left(\\mathbf {x}, \\omega_ {0}, \\omega_ {\\mathrm {i}}\\right) L _ {\\mathrm {i}} \\left(\\mathbf {x}, \\omega_ {\\mathrm {i}}\\right) \\left| \\cos \\theta_ {\\mathrm {i}} \\right|, \\right. \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 560, + 715, + 916, + 738 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the cosine term could be approximated with a constant vMF lobe [Ruppert et al. 2020], leaving NPM to focus on the remaining part of the integral. Nonetheless, it is still challenging for neural networks to model a 2D directional distribution conditioned on 5D spatio-directional inputs. We further use the following simple extensions to help the network learn these spatially varying distributions:", + "bbox": [ + 511, + 742, + 916, + 827 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Auxiliary Feature Inputs. Following the practices in prior work [Hadadan et al. 2021; Müller et al. 2021], we additionally input the surface normal and roughness as auxiliary features to help", + "bbox": [ + 513, + 834, + 916, + 876 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 81, + 69, + 91, + 78 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Honghao Dong, Guoping Wang, and Sheng Li", + "bbox": [ + 112, + 68, + 331, + 80 + ], + "page_idx": 3 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 81, + 893, + 323, + 905 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the network better correlate the target distribution with e.g., local shading frame (normal) and spatially varying BSDFs (roughness). Experimentally, we find this helps the network to better capture the spatio-directional correlations, while with a small computational overhead due to additional memory traffic.", + "bbox": [ + 78, + 99, + 482, + 170 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Input Encoding. It is challenging for a neural network to model the non-linearity between multidimensional inputs and outputs, especially when our outputs are distributions with high-frequency spatial variations. Therefore, we replace the spatial input $\\mathbf{x}$ with our trainable multi-resolution spatial embedding (discussed in Sec. 5.1). For the other inputs (e.g., outgoing direction $\\omega_{0}$ and surface normals $\\mathbf{n}(\\mathbf{x})$ ), we encode them using the spherical harmonics basis, which is previously established in NeRF [Verbin et al. 2022].", + "bbox": [ + 78, + 178, + 482, + 289 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 IMPLEMENTATION", + "text_level": 1, + "bbox": [ + 80, + 301, + 243, + 316 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this section, we provide the technical details that are crucial to the performance and practicality of our NPM implementation.", + "bbox": [ + 78, + 320, + 480, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1 Multi-resolution Spatial Embedding", + "text_level": 1, + "bbox": [ + 80, + 361, + 359, + 376 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our implicit NPM representation learns a continuous mapping $\\mathrm{NPM}_{\\Phi}:\\mathbf{x}\\rightarrow \\hat{\\Theta}$ (with the additional input $\\omega_{0}\\in \\mathbb{S}^{2}$ in the extended version), where $\\Theta \\in \\mathbb{R}^{4\\times K}$ defines the learned target distribution. While a straightforward solution would be using a multi-layer perceptron (MLP) as the universal function approximator to model $\\mathrm{NPM}_{\\Phi}$ , we experimentally found it difficult to capture the high-frequency spatial variations of the target distributions.", + "bbox": [ + 78, + 378, + 482, + 476 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Therefore, we use a learnable spatial embedding to implicitly encode the learned parametric mixtures. Similar approaches are found successful in recent NeRF-like applications [Müller et al. 2022; Munkberg et al. 2022]. Specifically, we define $L$ 3D uniform grids $G_{l}$ , each covering the entire scene with a spatial resolution of $D_l^3$ , where $G_{l}$ denotes the $l$ -th embedding grid. $D_{l}$ grows exponentially, resulting in multiple resolutions of the embedding. We then assign a learnable embedding (a latent feature vector $\\boldsymbol{v} \\in \\mathbb{R}^{F}$ ) to each lattice point of $G_{l}$ . To query the spatial embedding for $\\mathbf{x}$ , we bilinearly interpolate the features nearby $\\mathbf{x}$ for each resolution, and concatenate them to obtain the final embedding $G(\\mathbf{x})$ . More formally:", + "bbox": [ + 78, + 477, + 482, + 630 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nG (\\mathbf {x} \\mid \\Phi_ {\\mathrm {E}}) = \\underset {l = 1} {\\overset {L} {\\oplus}} \\operatorname {b i l i n e a r} \\left(\\mathbf {x}, V _ {l} [ \\mathbf {x} ]\\right), G: \\mathbb {R} ^ {3} \\rightarrow \\mathbb {R} ^ {L \\times F}, \\tag {13}\n$$\n", + "text_format": "latex", + "bbox": [ + 124, + 637, + 480, + 665 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $V_{l}[\\mathbf{x}]$ is the set of features at the eight corners of the cell enclosing $\\mathbf{x}$ within $G_{l}$ . The spatial embedding $G(\\mathbf{x})$ is then concatenated with other inputs (e.g., $\\omega_0$ and auxiliary features) to the MLP for decoding the parameters $\\Theta$ . We thus formulate the desired mapping (taking Eq. 6 for example) as a two-step procedure:", + "bbox": [ + 78, + 671, + 482, + 742 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {M L P} \\left(G (\\mathbf {x} \\mid \\Phi_ {\\mathrm {E}}) \\mid \\Phi_ {\\mathrm {M}}\\right) = \\hat {\\Theta} (\\mathbf {x}), \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 186, + 750, + 480, + 771 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where the parameters of the spatial embedding $(\\Phi_{\\mathrm{E}})$ and the MLP $(\\Phi_{\\mathrm{M}})$ together constitute the trainable parameters $\\Phi$ of our implicit representation for NPM. Intuitively, a spatial embedding implicitly encodes the target distribution within a specific spatial region, while the multi-resolution design efficiently accounts for different levels of detail (LOD). By smoothly interpolating between the spatial embedding around positions and decoding them using neural networks,", + "bbox": [ + 78, + 777, + 482, + 876 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "we naturally account for the spatial variations of the target distribution. This also lessens the burden of using a single monolithic MLP as the implicit representation, leaving it mainly focusing on decoding it into parametric models $\\Theta$ . This significantly accelerates training/inference with a larger memory footprint.", + "bbox": [ + 513, + 99, + 916, + 170 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2 Online Training Scheme", + "text_level": 1, + "bbox": [ + 514, + 181, + 718, + 196 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "**Renderer Integration.** We implement our method on a custom GPU-accelerated renderer based on OptiX [Parker et al. 2010], where the training and inference procedures are integrated into a wavefront-style path tracer [Laine et al. 2013]. This design choice allows ray casting, importance sampling, and BSDF evaluation to be performed in coherent chunks over large sets of traced paths by splitting the traditional megakernel path tracer into multiple specialized kernels. This improves GPU thread utilization by reducing the control flow divergence. Most importantly, this allows us to efficiently sample and evaluate the guiding distributions at each vertex along the path in parallel, thus significantly accelerating network training/inference.", + "bbox": [ + 513, + 199, + 916, + 366 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Specifically, we place the training/inference samples into queues, where the structure-of-arrays (SoA) memory layout is applied to improve memory locality. At each ray intersection of the chunk of traced paths, the queries for guiding distributions within the queue are processed via batched network inference. The sampling and evaluation procedures are then performed, also using specialized kernels, before entering the next ray-cast kernel. This provides our method with maximum parallelism through large-batch training and inference, minimizing the latency caused by waiting network queries, while avoiding inefficient single-sample inference.", + "bbox": [ + 513, + 366, + 916, + 505 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training Scheme. We use the same configuration to train each scene online during rendering, without any scene-specific fine-tuning or pre-computation. During training, we collect MC radiance estimates along each traced path, and split them into mini-batches for training. The optimization step is performed for each spp, which allows drawing samples to be drawn from the latest guiding distribution. The distribution of the samples (for both rendering and training) is thus gets refined as training proceeds. We stop the training process after a fixed fraction of the total rendering budget (either time or sample count). While we always set this to $25\\%$ in our experiments, we find our NPM technique converges quickly during training, generally reaching a local minimum after about 150spp, which amounts to about 1000 training steps/batches and 15s (including the runtimes of both training and rendering) on GPU.", + "bbox": [ + 513, + 513, + 916, + 708 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.3 Guiding Network", + "text_level": 1, + "bbox": [ + 514, + 719, + 671, + 734 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We implement our network on the tiny-cuda-nn framework [Müller 2021] and integrate it into our renderer. The MLP we used (for both NPM-radiance and NPM-product) contains 3 linear layers of width 64. Each layer with ReLU activation, except for the last layer with our custom mapping functions (Tab. 1). We let the network output $K = 8$ vMF components, i.e., $\\Theta \\in \\mathbb{R}^{8 \\times 4}$ . For the multi-resolution spatial embedding, we use $L = 8$ grids with increasing resolutions for each level. The coarsest level has a resolution of $D_{1} = 8$ while the finest level has $D_{8} = 86$ . The feature of each level contains $F = 4$ floats, resulting in the final spatial embedding $G(\\mathbf{x}) \\in \\mathbb{R}^{8 \\times 4}$ . In practice,", + "bbox": [ + 513, + 737, + 916, + 876 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Neural Parametric Mixtures for Path Guiding", + "bbox": [ + 666, + 68, + 883, + 79 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 901, + 69, + 916, + 78 + ], + "page_idx": 4 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 671, + 893, + 916, + 905 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/7791fafefea912ff395714d5a429e4ef823d518892e47681632a7d70725cece0.jpg", + "image_caption": [ + "Fig. 3. Equal-sample-count (750spp) comparisons for two scenes. We show the error (for both the zoom-in areas and whole images) and time cost of different methods. The yellow plots (as well as the other figures) refer to the results obtained by unidirectional path tracing." + ], + "image_footnote": [], + "bbox": [ + 81, + 94, + 919, + 344 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "we find that the performance of the network could be improved by enlarging the capacity of the MLP or the spatial embedding, leaving this a trade-off between quality and speed.", + "bbox": [ + 78, + 402, + 480, + 444 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For training, we use a fixed learning rate of 0.005 that is large enough to acquire a fast convergence speed. Adaptive momentum techniques like Adam [Kingma and Ba 2015] are used for more robust training and better convergence. For importance sampling the decoded mixtures, we use the numerically stable strategy for vMF [Jakob 2012]. When inference, we also apply exponential moving average (EMA) to the weights of previous training steps, which better reduces the noise of the MC estimated gradients (Eq. 9).", + "bbox": [ + 78, + 444, + 480, + 555 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6 RESULTS AND DISCUSSION", + "text_level": 1, + "bbox": [ + 78, + 571, + 305, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We run all the experiments on an Intel Core i9-11900 CPU and an NVIDIA RTX3070 GPU. Following the similar practices of previous works [Müller 2019; Rath et al. 2020], we disable NEE and Russian roulette for all methods and set the maximum path length to 10. All methods are implemented upon a GPU path tracing renderer.", + "bbox": [ + 78, + 590, + 480, + 659 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We render all images at the resolution of $1280 \\times 720$ , and evaluate image quality using mean relative squared error (relMSE). All the images, additional metrics (MAPE and MRSE), and the false-color maps can be interactively inspected with our supplementary viewer.", + "bbox": [ + 78, + 660, + 480, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.1 Comparisons", + "text_level": 1, + "bbox": [ + 78, + 733, + 209, + 748 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our method is compared against improved PPG [Müller 2019] (an enhanced version of Practical Path Guiding [Müller et al. 2017]), and Variance-aware Path Guiding [Rath et al. 2020]. For the experimental configuration of the compared methods, we use the same as [Rath et al. 2020], except for fixing the BSDF selection probability to $50\\%$ (for both ours and the compared methods). Both compared methods used an iteratively reconstructed subdivision structure (i.e., the spatio-directional trees) to account for spatial variations. A total of 10 different scenes were tested.", + "bbox": [ + 78, + 750, + 482, + 875 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first show equal-spp comparisons on two representative scenes. The VEACH Door scene features strong indirect illumination that is difficult to handle with BSDF importance sampling, while the BATHROOM scene contains many specular and glossy surfaces. As shown in Fig. 3, our proposed method outperforms the other two methods even when only learning incident radiance $L_{\\mathrm{i}}$ (NPM-radiance). The noise is alleviated further with our full integrand learning method (NPM-product), since both of the scenes contain glossy surfaces, where the contribution of samples is strongly influenced by the BSDF term. We also note that our method quickly becomes effective at the very beginning of the training process (see the convergence plots in Fig. 3). This indicates a better training efficiency over classical guiding methods, which will be discussed later. Additional results on more test scenes are shown in Fig. 4 and Tab. 2, as well as the convergence plots in Fig. 5.", + "bbox": [ + 511, + 402, + 916, + 609 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We then show the results of equal-time comparisons between our method and [Rath et al. 2020] in Fig. 6. Since they do not explicitly learn the product sampling distribution (i.e., conditioned on 5D inputs $\\omega_0$ and $\\mathbf{x}$ ), we only use our radiance-based method (NPM-radiance) for fair comparisons. Instead of simply learning the incident radiance distribution $(L_{\\mathrm{i}})$ , they use an improved target distribution to account for the variance and BSDF (marginalized over $\\omega_0$ ). Our method, on the other hand, achieves better performance by learning $L_{\\mathrm{i}}$ only. We attribute this superiority of our method to both the better capacity of capturing spatio-directional correlation and more parallelism.", + "bbox": [ + 511, + 609, + 916, + 763 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.2 Evaluation", + "text_level": 1, + "bbox": [ + 514, + 773, + 627, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Trainable Spatial Embedding. We analyze the performance of different forms of spatial input encoding in terms of convergence and quality (Fig. 8). The spatial embedding (i.e. parametric encoding) uses trainable latent vector grids to model the spatially-varying target distributions, leaving the MLP to focus on decoding this implicit representation into valid vMF mixtures. The other two variants", + "bbox": [ + 511, + 792, + 916, + 876 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 81, + 69, + 91, + 78 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Honghao Dong, Guoping Wang, and Sheng Li", + "bbox": [ + 112, + 68, + 331, + 80 + ], + "page_idx": 5 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 81, + 893, + 323, + 905 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/040ea9ba4caf98b087626d13b7305651b3dff2f6bf994b937ecc1d79ac7b96c9.jpg", + "table_caption": [ + "Table 2. Practical Path Guiding (PPG) [Müller 2019], Variance-aware Path Guiding [Rath et al. 2020], unidirectional path tracing and our method on 10 test scenes. We report relMSE, render time, and speedup using PPG as the baseline. Our NPM technique consistently reduces the error in the test scenes." + ], + "table_footnote": [], + "table_body": "
PT (BSDF)[Müller 2019][Rath et al. 2020]Ours
PPG (baseline)Variance. PGNPM (radiance)NPM (product)
BATHROOM0.090548s0.05301.0 ×106s0.04851.09 ×107s0.02512.11 ×101s0.02032.61 ×108s
BEDROOM0.038340s0.02011.0 ×105s0.01611.26 ×109s0.01501.35 ×84s0.01461.38 ×90s
BREAKFAST ROOM0.009448s0.00691.0 ×100s0.00471.46 ×103s0.00381.80 ×63s0.00351.96 ×71s
LIVING ROOM0.027332s0.01841.0 ×74s0.01461.26 ×80s0.01571.17 ×47s0.01321.39 ×54s
PINK ROOM0.004637s0.00821.0 ×74s0.00611.34 ×76s0.00332.42 ×53s0.00263.21 ×62s
SALLE DE BAIN0.081938s0.02231.0 ×116s0.03460.64 ×116s0.01961.14 ×79s0.01401.59 ×86s
STAIRCASE0.181234s0.02981.0 ×80s0.02611.14 ×86s0.01941.54 ×72s0.01721.74 ×76s
VEACH DOOR0.620833s0.21671.0 ×82s0.19451.11 ×91s0.07502.89 ×65s0.04614.69 ×77s
VEACH EGG8.291833s0.83791.0 ×82s0.78701.07 ×85s0.59841.40 ×62s0.53521.56 ×69s
WHITE ROOM0.030138s0.02781.0 ×107s0.02531.10 ×103s0.01242.25 ×76s0.01002.75 ×87s
", + "bbox": [ + 93, + 136, + 903, + 323 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "do not explicitly separate these two tasks by using a monolithic MLP. The addition of spatial embedding significantly improves convergence, and the multi-resolution design further reduces error by better modeling finer-grained spatio-directional correlations. Furthermore, this does not introduce noticeable computational overhead, as only a small fraction of parameters are involved in each training/inference.", + "bbox": [ + 83, + 344, + 480, + 440 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training Efficiency. The effectiveness of guiding methods under small training budgets is important, especially for applications such as preview rendering or even interactive rendering. We analyze the training efficiency of different guiding methods by comparing their performance under different training budgets (31 spp, 63 spp, 127 spp, respectively) in Fig. 7. Our method quickly converges to a good sampling distribution with only a few training samples and less training time cost (e.g., 31 spp with about 3s), thus outperforming previous guiding methods even with much fewer training samples.", + "bbox": [ + 83, + 449, + 480, + 574 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.3 Discussion", + "text_level": 1, + "bbox": [ + 83, + 587, + 189, + 599 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Path Guiding Extensions. Our method can be extended with many well-established extensions suggested by previous path guiding algorithms. They are straightforward to be integrated and are promising to further improve our performance. For example: (1) the BSDF selection probability could also be learned by our network or by some other caching strategies [Müller et al. 2020], thus better handling the near-specular surfaces; and (2) the improved variance-aware target distribution [Rath et al. 2020] could be learned to account for the variance within the noisy MC estimates.", + "bbox": [ + 83, + 604, + 480, + 728 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Performance Analysis. Our method serves effective means for path guiding while remaining performance practical. Specifically, the measured time cost per NPM evaluation (including both network inference and importance sampling the decoded mixture models) at $1280 \\times 720$ is about 3ms. Meanwhile, a training step (i.e., a batch of $2^{18}$ samples) costs about 10ms, indicating that a typical training process (about 1000 training steps) takes about 10s to converge on a single GPU. NPM contains a total of about 2M learnable parameters, resulting in a memory consumption of $< 10\\mathrm{MB}$ . The compact design of our implicit NPM representation results in less control", + "bbox": [ + 83, + 737, + 480, + 875 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "flow divergence, better memory locality, and better caching performance. Together, this makes our method practical for modern GPU parallelization, which is often harder to achieve with the tree-like spatial subdivision schemes used by most of the previous guiding methods.", + "bbox": [ + 517, + 344, + 915, + 411 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Alternative Solutions. Several studies also aim to tackle the parallel issue. Dodik et al. [2022] use spatio-directional mixtures (i.e., conditioned on $\\mathbf{x}$ and $\\omega_0$ ) to correlate target distributions with spatial positions. Ruppert et al. [2020] design strategies to warp the guiding distributions in the spatial subdivisions to resemble the true distribution. However, these methods adopt sophisticated strategies that are difficult to parallelize efficiently on GPUs (e.g., batched expectation-maximization (EM) applied to a varying number of mixtures) while requiring extra efforts to fit scene BSDFs for product sampling. In contrast, our method exploits trainable spatial embedding to encode the target distributions while using a decoder MLP to model the non-linearity between spatial features and PMMs in a GPU-friendly manner. Nevertheless, incorporating ideas from these studies, such as adaptively controlling the granularity of learned distributions, may further enhance our method.", + "bbox": [ + 517, + 426, + 915, + 632 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "7 CONCLUSION, LIMITATIONS AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 517, + 650, + 900, + 662 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We present Neural Parametric Mixtures, a novel method for learning the target distributions for path guiding techniques. We use a compact implicit neural representation to encode the spatio-directional parametric distributions. Compared to previous non-neural methods that use explicit spatial subdivision structures to store directional distributions, our continuous implicit representation is simpler and more efficient while naturally avoiding the artifacts (e.g., parallax) caused by their discretized subdivision schemes. Our NPM technique could be efficiently trained with stochastic gradient descent to minimize the divergence from the target distribution.", + "bbox": [ + 517, + 667, + 915, + 805 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Despite the simplicity and effectiveness of our method, the main limitation resides in the lack of flexibility of our directional distribution representation, i.e., a fixed number of vMF components. While a similar issue exists in classical methods using PMMs [Dodik et al. 2022; Herholz et al. 2016], recent methods achieve more accurate", + "bbox": [ + 517, + 806, + 915, + 875 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Neural Parametric Mixtures for Path Guiding", + "bbox": [ + 666, + 68, + 883, + 79 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 901, + 69, + 916, + 78 + ], + "page_idx": 6 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 674, + 893, + 915, + 904 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4de0ae7607601ea031f8b26980ba8e4d0c7d3cc17f48b8484fc9982a6dbb12a4.jpg", + "image_caption": [ + "Fig. 4. Visual comparisons using the same experimental setup with Fig. 3, all are rendered with 750spp at $1280 \\times 720$ . We use the online training setup for all the guiding methods, i.e., all the samples are included in the final rendering. Our method exhibits better performance than other guiding methods in most scenes by only learning the incident radiance term while further reducing the error by incorporating the BSDF term (i.e., product sampling). More results on other test scenes, additional error metrics and false-color visualizations are provided in our supplementary interactive viewer." + ], + "image_footnote": [], + "bbox": [ + 91, + 95, + 890, + 801 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 81, + 69, + 91, + 78 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Honghao Dong, Guoping Wang, and Sheng Li", + "bbox": [ + 112, + 68, + 331, + 80 + ], + "page_idx": 7 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 81, + 893, + 323, + 905 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/181fb606997372bc36fe02a725b3aeead1f4cfe80019ae1ba38494763dbe1a09.jpg", + "image_caption": [ + "VEACH DOOR" + ], + "image_footnote": [], + "bbox": [ + 86, + 94, + 261, + 191 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e6fd5634de776096702072077954764b1f851b04c67b47deb142edfa71811598.jpg", + "image_caption": [ + "LIVING ROOM" + ], + "image_footnote": [], + "bbox": [ + 263, + 94, + 419, + 191 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/0dab85787ebf57ece3d409ae13042efbe88173d0445016fac2fde3b91dfca16d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 94, + 578, + 191 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9eca55ab6a658160878a3506294517e5cce89b945cfdf85fec532d3cee6b242a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 580, + 94, + 733, + 191 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9deff805ba49e53de948b70b54dc5f1ad8b4f9c77c5599195f5046879dc11f7a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 735, + 95, + 893, + 191 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/08c2c9d51bb87f869a84fb7ff37be2f1cb560bc73017e4d1f4e3a4637eecad26.jpg", + "image_caption": [ + "VEACH EGG" + ], + "image_footnote": [], + "bbox": [ + 88, + 204, + 263, + 301 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/14fec7d9f02364f9e1b3cc469dbbe763bb1b753ae5ada4e828afb851b65ca47c.jpg", + "image_caption": [ + "SALLE DE BAIN" + ], + "image_footnote": [], + "bbox": [ + 264, + 204, + 419, + 301 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/459f119a1e6f73caa5bd490548bb83413ea566cd62800bcd3a50b97f94ad6164.jpg", + "image_caption": [ + "BATHROOM", + "BREAKFAST ROOM" + ], + "image_footnote": [], + "bbox": [ + 419, + 204, + 576, + 301 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/374aa6caa361e40fa1e2fa0c59419a8d5806a2cf71fb18ce126bd34ae3352425.jpg", + "image_caption": [ + "BEDROOM", + "WHITE ROOM" + ], + "image_footnote": [], + "bbox": [ + 578, + 204, + 733, + 301 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ac207f2b0dc08e9ca00b1f7fd2da77c0830570cdc5793049f393be18285a629d.jpg", + "image_caption": [ + "STAIRCASE", + "PINK ROOM" + ], + "image_footnote": [], + "bbox": [ + 735, + 204, + 893, + 301 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/34654e72da7e2e9ef43f854cd70959d1671f0b7ceb629f08d155c24032847c36.jpg", + "image_caption": [ + "Fig. 5. Convergence plots correspond to Fig. 3 and Fig. 4. Unidirectional path tracing with BSDF importance sampling (PT-BSDF), Practical Path Guiding [Muller 2019], Variance-aware Path Guiding [Rath et al. 2020] and our method with different target distributions (NPM-radiance and NPM-product). Our methods consistently outperform these classical guiding methods, and quickly become effective even with a few training samples and short training time (e.g., 30spp, amounting to about 3 seconds on GPU), indicating practicality for preview or even interactive rendering. We attribute this success to the compact implicit representation and better spatial resolution of our method. The image results and detailed statistics could be inspected in the supplemental materials." + ], + "image_footnote": [], + "bbox": [ + 228, + 316, + 781, + 333 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "directional distributions by adaptively merging and splitting the vMF components [Ruppert et al. 2020]. This, however, is non-trivial to apply to our NPM technique.", + "bbox": [ + 78, + 431, + 480, + 473 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In future work, we will investigate more accurate approaches to implicitly encode parametric distributions while remaining efficient. Finding better basis functions or adaptively controlling the number of output components are two possible but challenging directions. Meanwhile, we would like to improve the efficiency of our method by using either novel architectural designs for neural networks, optimized implementation, or adapting previous extensions to path guiding algorithms. We believe these are important steps to make our method more practical for interactive or even real-time rendering pipelines, as well as other related applications that require", + "bbox": [ + 78, + 474, + 482, + 613 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "fitting distributions with high-frequency spatial variations. In addition, applying our method to bidirectional path tracing [Popov et al. 2015], especially subspace probabilistic connections [Su et al. 2022], will also be an interesting future avenue.", + "bbox": [ + 513, + 431, + 916, + 488 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 514, + 503, + 683, + 517 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This project was supported by the National Key R&D Program of China (No.2022YFB3303400) and NSFC of China (No. 62172013). We also thank the test scenes providers: Mareck (BATHROOM), Slyk-Drako (BEDROOM), Wig42 (BREAKFAST ROOM, LIVING ROOM, PINK ROOM, STAIRCASE), nacinus (SALLE DE BAIN), Jaakko Lehtinen (VEACH DOOR), Jay-Artist (WHITE ROOM), as well as the efforts for converting scene formats by Benedikt Bitterli [2016].", + "bbox": [ + 513, + 522, + 919, + 619 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/a9e3e0bca254a0b92485bb2f58af12c621eb6de5a42b0e3fcc3070347e0f8bf4.jpg", + "image_caption": [ + "SALLE DE BAIN" + ], + "image_footnote": [], + "bbox": [ + 81, + 633, + 267, + 724 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/888d6d0406b223320ab0974c9477a53a96ee8d8238c7d8fd9369753542c3a598.jpg", + "image_caption": [ + "Rath et al." + ], + "image_footnote": [], + "bbox": [ + 269, + 643, + 336, + 684 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/7dae49ea469d37eb159d984887468aa069cec61a542793b609cae33e2b5f8746.jpg", + "image_caption": [ + "0.05407" + ], + "image_footnote": [], + "bbox": [ + 269, + 685, + 336, + 724 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/721cfcdc5d3a0ac27c38adce4f26b3428fb63c7de4fc576c2cfea3db865be04d.jpg", + "image_caption": [ + "NPM (rad.)" + ], + "image_footnote": [], + "bbox": [ + 339, + 643, + 406, + 684 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e21985f77ddb52baeef3fabd66b4f32fc6341b6a7d5b8db27786557bde91e4dc.jpg", + "image_caption": [ + "0.04926" + ], + "image_footnote": [], + "bbox": [ + 339, + 685, + 406, + 724 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/5101dc19c079bc740e073f41696f91e4b45adb66d30b0904a6def2725004c356.jpg", + "image_caption": [ + "Reference" + ], + "image_footnote": [], + "bbox": [ + 408, + 643, + 475, + 684 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/909a175a2fcc3697baabec10f44413f5aa481dea166ea26fc95636d89070df0c.jpg", + "image_caption": [ + "relMSE" + ], + "image_footnote": [], + "bbox": [ + 408, + 685, + 475, + 724 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3f1102130cd97c4c939dd9fa09ed8f97c36f686554ea57ef2748057833451595.jpg", + "image_caption": [ + "BEDROOM", + "Fig. 6. Equal-time comparisons (80s) on two test scenes between NPM(radiance) and Variance-aware Path Guiding [Rath et al. 2020]." + ], + "image_footnote": [], + "bbox": [ + 81, + 738, + 267, + 820 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/eb53553187bc0b187fd83239885ae0a52d57bdcadfd1a255b9daabdc747058c5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 269, + 738, + 336, + 779 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/52f1ca1d2c54273a68d3507fd7757099974cc8b5df5f4e725bd2131d4425e717.jpg", + "image_caption": [ + "0.02176" + ], + "image_footnote": [], + "bbox": [ + 269, + 780, + 336, + 820 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d38b0cf0ea9559e51984689d857719730f7ec7c4438a69b2938c81644d2b5f63.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 339, + 738, + 408, + 779 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f62aaec39bdfee8ed9c26f61ed59b7134f4b5214059c55a13757e21a964bad39.jpg", + "image_caption": [ + "0.01324" + ], + "image_footnote": [], + "bbox": [ + 339, + 780, + 408, + 820 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b37e2d6558a7080380e4c1b42a42cc24b1dc2668cf0b1cc5ffb8a3a703e156f5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 408, + 738, + 477, + 779 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/261222e88bdfcbe95254dde56f97fce4ee0aab79da762475cb7d753ac46716c4.jpg", + "image_caption": [ + "relMSE" + ], + "image_footnote": [], + "bbox": [ + 410, + 780, + 475, + 819 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/7c235d11fcbe63ea0cd6d21d4a1760468d38923fc545f357e03dd55a228e84b9.jpg", + "image_caption": [ + "Fig. 7. We train each guiding method with small training budgets (31 spp, 63 spp, 127 spp, respectively) and render the scene with 500 spp. Our method outperforms previous methods even with much fewer training samples." + ], + "image_footnote": [], + "bbox": [ + 517, + 657, + 915, + 820 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Neural Parametric Mixtures for Path Guiding", + "bbox": [ + 666, + 68, + 882, + 79 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 908, + 69, + 915, + 78 + ], + "page_idx": 8 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 671, + 893, + 915, + 905 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c72949cfaa907720f948811ab1f1c468fcd58c869731f256eaa07f814824b055.jpg", + "image_caption": [ + "Fig. 8. Equal-time comparison (50s) of different input encoding. We report the sample count and error (relMSE) of each method. The dashed line in the plot marks the end of the training phase. The multi-resolution spatial embedding outperforms other methods while remaining training-efficient. Yellow plot refers to path tracing with BSDF importance sampling." + ], + "image_footnote": [], + "bbox": [ + 81, + 97, + 197, + 222 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/877c0d7fc45351dbafd802d190c15c900b000d30915cae9165790ac96d5238e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 197, + 97, + 313, + 222 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/f4055e7418c0dce471dcaa70b30e4d70e2af9a7c639208ff5065e16660bec2b8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 315, + 95, + 483, + 222 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 81, + 324, + 178, + 335 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Benedikt Bitterli. 2016. Rendering resources. https://benedikt-bitterli.me/resources/. Norbert Bus and Tamy Boubekeur. 2017. Double Hierarchies for Directional Importance Sampling in Monte Carlo Rendering. Journal of Computer Graphics Techniques (JCGT) 6, 3 (28 August 2017), 25-37. http://jcgt.org/published/0006/03/02", + "R. R. Currius, D. Dolonius, U. Assarsson, and E. Sintorn. 2020. Spherical Gaussian Light-field Textures for Fast Precomputed Global Illumination. Computer Graphics Forum 39, 2 (2020), 133-146.", + "Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. 2017. Density estimation using Real NVP. In International Conference on Learning Representations.", + "Stavros Diolatzis, Julien Philip, and George Drettakis. 2022. Active Exploration for Neural Global Illumination of Variable Scenes. ACM Transactions on Graphics (2022).", + "Ana Dodik, Marios Papas, Cengiz Öztireli, and Thomas Müller. 2022. Path Guiding Using Spatio-Directional Mixture Models. In Computer Graphics Forum, Vol. 41. Wiley Online Library, 172-189.", + "Saeed Hadadan, Shuhong Chen, and Matthias Zwicker. 2021. Neural radiosity. ACM Transactions on Graphics (TOG) 40, 6 (2021), 1-11.", + "Sebastian Herholz, Oskar Elek, Jiří Vorba, Hendrik Lensch, and Jaroslav Krivánek. 2016. Product importance sampling for light transport path guiding. In Computer Graphics Forum, Vol. 35. Wiley Online Library, 67-77.", + "Yuchi Huo, Rui Wang, Ruzahng Zheng, Hualin Xu, Hujun Bao, and Sung-Eui Yoon. 2020. Adaptive incident radiance field sampling and reconstruction using deep reinforcement learning. ACM Transactions on Graphics (TOG) 39, 1 (2020), 1-17.", + "Wenzel Jakob. 2012. Numerically stable sampling of the von Mises-Fisher distribution on $S^{\\wedge}2$ (and other tricks). Interactive Geometry Lab, ETH Zürich, Tech. Rep (2012), 6.", + "Henrik Wann Jensen. 1995. Importance driven path tracing using the photon map. In Eurographics Workshop on Rendering Techniques. Springer, 326-335.", + "James T. Kajiya. 1986. The Rendering Equation. SIGGRAPH Comput. Graph. (1986).", + "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. *ICLR* (2015).", + "Eric P Lafortune and Yves D Willems. 1995. A 5D tree to reduce the variance of Monte Carlo ray tracing. In Eurographics Workshop on Rendering Techniques. Springer, 11-20.", + "Samuli Laine, Tero Karras, and Timo Aila. 2013. Megakernels considered harmful: Wavefront path tracing on GPUs. In Proceedings of the 5th High-Performance Graphics Conference, 137-143.", + "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. 2020. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. in ECCV.", + "Thomas Müller. 2019. \"Practical Path Guiding\" in Production. In ACM SIGGRAPH 2019 Courses (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages.", + "Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. 2022. Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM Trans. Graph. 41, 4, Article 102 (July 2022), 15 pages.", + "Thomas Müller, Markus Gross, and Jan Novák. 2017. Practical path guiding for efficient light-transport simulation. In Computer Graphics Forum, Vol. 36. Wiley Online Library, 91-100.", + "Thomas Müller, Brian McWilliams, Fabrice Rousselle, Markus Gross, and Jan Novák. 2019. Neural importance sampling. ACM Transactions on Graphics (TOG) 38, 5 (2019), 1-19.", + "Thomas Müller, Fabrice Rousselle, Alexander Keller, and Jan Novák. 2020. Neural control variates. ACM Transactions on Graphics (TOG) 39, 6 (2020), 1-19.", + "Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. 2021. Real-Time Neural Radiance Caching for Path Tracing. ACM Trans. Graph. 40, 4, Article 36 (jul" + ], + "bbox": [ + 81, + 339, + 480, + 875 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "2021), 16 pages.", + "Jacob Munkberg, Jon Hasselgren, Tianchang Shen, Jun Gao, Wenzheng Chen, Alex Evans, Thomas Mueller, and Sanja Fidler. 2022. Extracting Triangular 3D Models, Materials, and Lighting From Images. CVPR (2022).", + "Thomas Muller. 2021. tiny-cuda-nn. https://github.com/NVlabs/tiny-cuda-nn", + "Steven G Parker, James Bigler, Andreas Dietrich, Heiko Friedrich, Jared Hoberock, David Luebke, David McAllister, Morgan McGuire, Keith Morley, Austin Robison, et al. 2010. Optix: a general purpose ray tracing engine. ACM Transactions on Graphics (TOG) 29, 4 (2010), 1-13.", + "S. Popov, R. Ramamoorthi, F. Durand, and G. Drettakis. 2015. Probabilistic Connections for Bidirectional Path Tracing. Computer Graphics Forum 34, 4 (07 2015), 75-86.", + "Alexander Rath, Pascal Grittmann, Sebastian Herholz, Petr Vévoda, Philipp Slusallek, and Jaroslav Křivánek. 2020. Variance-aware path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 151-1.", + "Lukas Ruppert, Sebastian Herholz, and Hendrik PA Lensch. 2020. Robust fitting of parallax-aware mixtures for path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 147-1.", + "Fujia Su, Sheng Li, and Guoping Wang. 2022. SPCBPT: Subspace-Based Probabilistic Connections for Bidirectional Path Tracing. ACM Trans. Graph. 41, 4, Article 77 (jul 2022), 14 pages. https://doi.org/10.1145/3528223.3530183", + "Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. 2022. Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields. CVPR (2022).", + "Jiri Vorba, Johannes Hanika, Sebastian Herholz, Thomas Müller, Jaroslav Krivánek, and Alexander Keller. 2019. Path Guiding in Production. In ACM SIGGRAPH 2019 Courses (Los Angeles, California) (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages.", + "Jiri Vorba, Ondrej Karlik, Martin Sik, Tobias Ritschel, and Jaroslav Krivanek. 2014. On-line learning of parametric mixture models for light transport simulation. ACM Transactions on Graphics (TOG) 33, 4 (2014), 1-11.", + "Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. 2021. PlenOctrees for Real-time Rendering of Neural Radiance Fields. In ICCV.", + "Shilin Zhu, Zexiang Xu, Tiancheng Sun, Alexandr Kuznetsov, Mark Meyer, Henrik Wann Jensen, Hao Su, and Ravi Ramamoorthi. 2021. Hierarchical neural reconstruction for path guiding using hybrid path and photon samples. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-16." + ], + "bbox": [ + 514, + 103, + 916, + 465 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 83, + 69, + 94, + 78 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "- Honghao Dong, Guoping Wang, and Sheng Li", + "bbox": [ + 99, + 68, + 336, + 80 + ], + "page_idx": 9 + }, + { + "type": "footer", + "text": "Vol. 1, No. 1, Article. Publication date: April 2025.", + "bbox": [ + 81, + 893, + 323, + 905 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_model.json b/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..b9b8fcfa2f5bdfe43e2cb6dffd74fc22ef5bf49c --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_model.json @@ -0,0 +1,2783 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.269, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.04315v1 [cs.GR] 6 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.096, + 0.611, + 0.12 + ], + "angle": 0, + "content": "Neural Parametric Mixtures for Path Guiding" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.131, + 0.42, + 0.183 + ], + "angle": 0, + "content": "HONGHAO DONG, Peking University, China \nGUOPING WANG, Peking University, China \nSHENG LI*, Peking University, China" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.189, + 0.483, + 0.365 + ], + "angle": 0, + "content": "Previous path guiding techniques typically rely on spatial subdivision structures to approximate directional target distributions, which may cause failure to capture spatio-directional correlations and introduce parallax issue. In this paper, we present Neural Parametric Mixtures (NPM), a neural formulation to encode target distributions for path guiding algorithms. We propose to use a continuous and compact neural implicit representation for encoding parametric models while decoding them via lightweight neural networks. We then derive a gradient-based optimization strategy to directly train the parameters of NPM with noisy Monte Carlo radiance estimates. Our approach efficiently models the target distribution (incident radiance or the product integrand) for path guiding, and outperforms previous guiding methods by capturing the spatio-directional correlations more accurately. Moreover, our approach is more training efficient and is practical for parallelization on modern GPUs." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.372, + 0.483, + 0.398 + ], + "angle": 0, + "content": "CCS Concepts: Computing methodologies \\(\\rightarrow\\) Ray tracing; Neural networks." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.403, + 0.483, + 0.429 + ], + "angle": 0, + "content": "Additional Key Words and Phrases: Ray Tracing, Global Illumination, Sampling and Reconstruction, Neural Networks, Mixture Models" + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.435, + 0.228, + 0.447 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.448, + 0.483, + 0.485 + ], + "angle": 0, + "content": "Honghao Dong, Guoping Wang, and Sheng Li. 2025. Neural Parametric Mixtures for Path Guiding. 1, 1 (April 2025), 10 pages. https://doi.org/10.1145/3588432.3591533" + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.499, + 0.228, + 0.512 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.517, + 0.483, + 0.684 + ], + "angle": 0, + "content": "The efficiency of path tracing relies heavily on the sampling strategy. To further improve its efficiency and robustness, path guiding algorithms leverage the knowledge gained during rendering to facilitate the process of light-path construction, thereby reducing noise. To acquire better importance sampling distribution, local path guiding techniques employ previous radiance estimates to learn an approximation of spatial incident radiance fields, which are then used to guide the construction of paths. In practice, current methods typically use some representation (e.g., Gaussian mixtures [Herholz et al. 2016; Vorba et al. 2014], quadtrees [Müller et al. 2017]) to approximate the directional distribution of incident radiance. A spatial subdivision structure (e.g., kd-tree [Dodik et al. 2022], or octree [Bus" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.695, + 0.194, + 0.706 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.706, + 0.289, + 0.716 + ], + "angle": 0, + "content": "Project website: https://neuropara.github.io." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.729, + 0.483, + 0.761 + ], + "angle": 0, + "content": "Authors' addresses: Honghao Dong, Peking University, Beijing, China, cuteday@pku.edu.cn; Guoping Wang, Peking University, Beijing, China, wgp@pku.edu.cn; Sheng Li, Peking University, Beijing, China, lisheng@pku.edu.cn." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.772, + 0.483, + 0.845 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.845, + 0.3, + 0.855 + ], + "angle": 0, + "content": "© 2025 Association for Computing Machinery." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.855, + 0.239, + 0.864 + ], + "angle": 0, + "content": "XXXX-XXXX/2025/4-ART $15.00" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.865, + 0.27, + 0.876 + ], + "angle": 0, + "content": "https://doi.org/10.1145/3588432.3591533" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.188, + 0.916, + 0.216 + ], + "angle": 0, + "content": "and Boubekeur 2017]) is then used to store these distributions, thus accounting for the spatial variations." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.217, + 0.917, + 0.355 + ], + "angle": 0, + "content": "However, several key deficiencies remain in their paradigm. Most methods learn the marginalized incident radiance distribution within each subdivided spatial region. This fails to capture the spatiodirectional correlations within the spatial discretizations, and could cause artifacts (e.g., parallax error, Fig 1(a)). Moreover, their spatial subdivision structures are subject to frequent reconstruction for finer-grained spatial resolution, which needs extra overhead and require a long training time to converge. Meanwhile, it is challenging to efficiently fit these specific directional distributions from noisy samples, especially in an online manner [Ruppert et al. 2020]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.355, + 0.917, + 0.576 + ], + "angle": 0, + "content": "While an adaptive and robust spatial representation is difficult to achieve with manually designed subdivision schemes, we saw the recent success of neural implicit representation in compactly modeling spatially varying functions with fine-grained and high-frequency details [Mildenhall et al. 2020]. In this work, we exploit the great expressiveness of neural implicit representation while preserving the desirable properties of parametric mixture models (e.g. efficient importance sampling) for path guiding algorithms. We thereby present Neural Parametric Mixtures (NPM), which use a continuous and compact implicit representation to encode spati-directional target distributions, and decode them into PMMs with lightweight neural networks for fast importance sampling. We show that our NPM representation, without explicit spatial subdivision schemes, can be efficiently trained simply using gradient-based optimization techniques. Specifically, our method has advantages in the following aspects:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.576, + 0.917, + 0.659 + ], + "angle": 0, + "content": "First, our continuous implicit representation of spatial radiance fields naturally captures the correlations between spatial positions and directional target distributions. By smoothly interpolating and decoding the implicit representations with neural networks, our method inherently avoids the issues due to spatial discretization, thus resulting in higher performance." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.659, + 0.917, + 0.756 + ], + "angle": 0, + "content": "Second, our compact representation avoids the extra overhead and long training time caused by the iterative reconstruction strategies applied to the explicit spatial subdivision structures. Combined with our simple optimization based on stochastic gradient descent, our method outperforms other guiding methods even with fewer training samples. In addition, our method is practical and performant for parallelization on GPU." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.756, + 0.917, + 0.826 + ], + "angle": 0, + "content": "Lastly, our method can learn the product distribution (i.e., multiplied by the BSDF and the cosine term). This further reduces the noise with a modest computational overhead while not requiring the extra effort of previous solutions (e.g., fitting each BSDF with pre-computed parametric models)." + }, + { + "type": "footer", + "bbox": [ + 0.673, + 0.894, + 0.916, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.082, + 0.07, + 0.092, + 0.079 + ], + "angle": 0, + "content": "2" + }, + { + "type": "header", + "bbox": [ + 0.114, + 0.069, + 0.333, + 0.081 + ], + "angle": 0, + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.101, + 0.226, + 0.113 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.118, + 0.483, + 0.284 + ], + "angle": 0, + "content": "Path Guiding. To achieve better sampling strategies, local path guiding techniques leverage previous radiance estimates (either online or during a pre-computation process) to build an approximation of the incident radiance fields, which is used to guide subsequent sampling. Early approaches used simple bases such as histograms for importance sampling, e.g. built from a photon map [Jensen 1995] or collected radiance estimates with 5-D tree structures [Lafortune and Willems 1995]. Subsequent work has developed various techniques to construct the guiding distribution, e.g., Gaussian mixtures [Vorba et al. 2014], quad-trees [Müller et al. 2017], which is often stored in spatial data structures (e.g., kd-tree and octree) to account for spatial variations of the distributions." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.285, + 0.483, + 0.479 + ], + "angle": 0, + "content": "Deep learning techniques have also been explored recently, achieving improvements while often with less practical performance. For example, convolutional networks could be used to reconstruct the learned noisy radiance field [Huo et al. 2020; Zhu et al. 2021]. Specifically designed neural networks could also model complex manifolds [Dinh et al. 2017], while allowing samples to be drawn directly from the learned distribution [Müller et al. 2019]. However, the prohibitive computational cost prevents its practical application [Müller et al. 2019; Vorba et al. 2019]. Instead of directly importance sampling using neural networks, we encode the target distribution into implicit neural representation, and use only lightweight MLPs to decode it into parametric mixtures for efficient sampling. We show that our method can be efficiently trained (< 10s per scene on a single GPU) while being sufficiently robust and practical." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.49, + 0.483, + 0.643 + ], + "angle": 0, + "content": "Parametric Mixture Models. Parametric mixture models (PMMs) are convex combinations of parametric distributions, and are often used to approximate directional distributions in graphics applications. They have many desirable properties, e.g., fast sampling, and closed-form solutions for products, convolutions and integrals. Several types of PMMs (e.g., Gaussian mixtures [Dodik et al. 2022; Vorba et al. 2014] and von Mises-Fisher mixtures [Ruppert et al. 2020]) are widely used in the recently developed path guiding algorithms. Several recent works also use PMMs to fit BSDFs with precomputation [Herholz et al. 2016; Ruppert et al. 2020], and multiply them with the learned incident radiance to achieve product sampling." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.643, + 0.483, + 0.713 + ], + "angle": 0, + "content": "Parametric models can also be predicted by neural networks, enabling new possibilities for e.g. lighting [Currius et al. 2020] and reconstruction [Yu et al. 2021] tasks. In this work, we use neural representations to encode parametric mixtures for efficient sampling. Our method is also naturally extensible to product sampling." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.724, + 0.483, + 0.877 + ], + "angle": 0, + "content": "Implicit Neural Representation. Following the success of using neural networks to represent 3D scenes implicitly [Mildenhall et al. 2020], the concept of neural representation has been popularized and applied to various tasks. They use sparse input images to optimize the spatial radiance fields via a differentiable volume rendering procedure, thus enabling novel view synthesis. Inspired by its recent successful applications [Diolatzis et al. 2022; Müller et al. 2022], we exploit a continuous and compact implicit neural representation to encode the spatio-directional target distributions for path guiding algorithms. While the ground truth target distribution (i.e., the incident radiance or product distribution) is unknown, our NPM" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.101, + 0.918, + 0.143 + ], + "angle": 0, + "content": "representation can be optimized online using minibatch stochastic gradient descent (SGD), where the gradients for training are estimated by Monte Carlo integration using noisy radiance estimates." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.154, + 0.646, + 0.167 + ], + "angle": 0, + "content": "3 PRELIMINARY" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.172, + 0.918, + 0.201 + ], + "angle": 0, + "content": "Monte Carlo Integration. Light transport algorithms are generally based on the rendering equation [Kajiya 1986]:" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.205, + 0.918, + 0.234 + ], + "angle": 0, + "content": "\\[\nL _ {0} (\\mathbf {x}, \\omega_ {0}) = L _ {\\mathrm {e}} (\\mathbf {x}, \\omega_ {0}) + \\int_ {\\Omega} f _ {\\mathrm {s}} (\\mathbf {x}, \\omega_ {0}, \\omega_ {\\mathrm {i}}) L _ {\\mathrm {i}} (\\mathbf {x}, \\omega_ {\\mathrm {i}}) | \\cos \\theta_ {\\mathrm {i}} | d \\omega_ {\\mathrm {i}}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.236, + 0.918, + 0.305 + ], + "angle": 0, + "content": "which defines the relationship between the outgoing radiance \\( L_{\\mathrm{o}} \\), emitted radiance \\( L_{e} \\), and the integrated incident radiance \\( L_{\\mathrm{i}} \\), at shading point \\( \\mathbf{x} \\). Monte Carlo integration is used to obtain an estimate of the reflection integral \\( L_{r} \\) using an average of \\( N \\) samples. In the case where \\( N = 1 \\):" + }, + { + "type": "equation", + "bbox": [ + 0.581, + 0.308, + 0.918, + 0.34 + ], + "angle": 0, + "content": "\\[\n\\left\\langle L _ {\\mathrm {r}} \\left(\\mathbf {x}, \\omega_ {\\mathrm {o}}\\right) \\right\\rangle = \\frac {f _ {\\mathrm {s}} \\left(\\mathbf {x} , \\omega_ {\\mathrm {o}} , \\omega_ {\\mathrm {i}}\\right) L _ {\\mathrm {i}} \\left(\\mathbf {x} , \\omega_ {\\mathrm {i}}\\right) \\left| \\cos \\theta_ {\\mathrm {i}} \\right|}{p \\left(\\omega_ {\\mathrm {i}} \\mid \\mathbf {x} , \\omega_ {\\mathrm {o}}\\right)}, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.345, + 0.918, + 0.526 + ], + "angle": 0, + "content": "where \\(\\langle L_{\\mathrm{r}}(\\mathbf{x},\\omega_0)\\rangle\\) is an unbiased estimate of the outgoing radiance \\(L_{\\mathrm{r}}(\\mathbf{x},\\omega_0)\\), and \\(\\omega_{i}\\) is the incident direction sampled with some directional probability distribution \\(p(\\omega_{\\mathrm{i}}\\mid \\mathbf{x},\\omega_{\\mathrm{o}})\\). The variance of this estimator \\(V[\\langle L_{\\mathrm{r}}\\rangle ]\\) can be reduced if the sampling distribution resembles the shape of the integrand, and could even reach zero variance if being proportional to it (i.e., \\(p\\propto f_s\\cdot L_i\\cos \\theta_i\\)). This, however, is difficult to achieve with only BSDF importance sampling, leaving the remaining part of the integrand (i.e., the incident radiance) unknown, resulting in a relatively high variance of the MC estimator. Path guiding algorithms, on the other hand, manage to obtain better importance sampling strategies often by using previous radiance samples to approximate the incident radiance \\(L_{\\mathrm{i}}\\) or the full integrand \\(f_{s}\\cdot L_{\\mathrm{i}}\\cos \\theta_{i}\\), which will be discussed later." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.532, + 0.918, + 0.56 + ], + "angle": 0, + "content": "Von Mises-Fisher Mixtures. We use the von Mises-Fisher (vMF) distribution as the basis of NPM. The vMF distribution is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.562, + 0.918, + 0.587 + ], + "angle": 0, + "content": "\\[\nv (\\omega \\mid \\mu , \\kappa) = \\frac {\\kappa}{4 \\pi \\sinh \\kappa} \\exp \\left(\\kappa \\mu^ {T} \\omega\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.592, + 0.918, + 0.634 + ], + "angle": 0, + "content": "where \\(\\mu \\in \\mathbb{S}^2\\) and \\(\\kappa \\in [0, +\\infty)\\) defines the direction and precision (sharpness) of the vMF distribution. The vMF mixture model (VMM) is thus a convex combination of \\(K\\) vMF components/lobes:" + }, + { + "type": "equation", + "bbox": [ + 0.614, + 0.639, + 0.918, + 0.676 + ], + "angle": 0, + "content": "\\[\n\\mathcal {V} (\\omega \\mid \\Theta) = \\sum_ {i = 1} ^ {K} \\lambda_ {i} \\cdot v \\left(\\omega \\mid \\mu_ {i}, \\kappa_ {i}\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.681, + 0.918, + 0.749 + ], + "angle": 0, + "content": "where \\(\\Theta\\) contains the parameters \\((\\mu_i,\\kappa_i)\\) and weights \\((\\lambda_{i})\\) of each vMF component. The vMF mixtures have many desirable properties, e.g., fewer parameters (4 floats per component), efficient importance sampling, and closed-form product and integration, which together constitute the reason for choosing it as the basis of NPM." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.75, + 0.918, + 0.82 + ], + "angle": 0, + "content": "Our key is to encode the vMF mixtures with our implicit neural representation, then decode them with lightweight MLPs, and train them to effectively model the target distributions for path guiding algorithms. Other parametric basis functions (e.g., Gaussian mixtures) could be integrated into our method using a similar paradigm." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.83, + 0.785, + 0.844 + ], + "angle": 0, + "content": "4 NEURAL PARAMETRIC MIXTURES" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.848, + 0.92, + 0.877 + ], + "angle": 0, + "content": "In this section, we present our Neural Parametric Mixtures (NPM) technique for local path guiding. We first show how to encode/decode" + }, + { + "type": "footer", + "bbox": [ + 0.082, + 0.894, + 0.325, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.668, + 0.069, + 0.884, + 0.08 + ], + "angle": 0, + "content": "Neural Parametric Mixtures for Path Guiding" + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.069, + 0.917, + 0.079 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.1, + 0.484, + 0.199 + ], + "angle": 0, + "content": "target distributions with NPM in a simple setup (i.e., learning incident radiance fields, Sec. 4.1), then we derive the optimization method for NPM based on minibatch stochastic gradient descent (Sec. 4.2). Finally, we show how our NPM could naturally benefit from learning the full integrand (to account for the BSDF term), as well as the other extensions for better learning target distributions (Sec. 4.3). An overview of our method is illustrated in Fig. 2." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.211, + 0.265, + 0.224 + ], + "angle": 0, + "content": "4.1 Radiance-based NPM" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.228, + 0.482, + 0.312 + ], + "angle": 0, + "content": "In order to acquire a better importance sampling strategy, we should obtain an approximation of the incident radiance distribution using previous radiance estimates, known as the radiance-based local path guiding [Herholz et al. 2016; Rath et al. 2020]. Specifically, we want to use the vMF mixtures to be approximately proportional to the incident radiance, at a given shading position \\(\\mathbf{x}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.322, + 0.482, + 0.337 + ], + "angle": 0, + "content": "\\[\n\\mathcal {V} (\\omega_ {i} \\mid \\Theta (\\mathbf {x})) \\propto L _ {\\mathrm {i}} (\\mathbf {x}, \\omega_ {i}), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.341, + 0.48, + 0.425 + ], + "angle": 0, + "content": "where \\(\\Theta\\) is conditioned on \\(\\mathbf{x}\\) to account for the spatial variation of the target distribution. Previous work achieves this with specific spatial subdivision strategies (e.g., kd-tree, octree). However, this spatial discretization introduces artifacts (e.g., resulting from parallax, Fig. 1 (a)), and is subject to frequent reconstruction to converge to a fine grained spatial subdivision, as discussed in Sec. 1." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.43, + 0.481, + 0.528 + ], + "angle": 0, + "content": "Instead, we use an implicit neural representation to encode the target distribution compactly. This allows the spatial variation of the distribution to be continuously accounted for, thus better capturing spatio-directional correlations. Technically, given a shading position \\(\\mathbf{x}\\) in the scene, our NPM would output the guiding distribution that approximates the target distribution (Eq. 5). The output guiding distribution is defined using a set of parameters \\(\\hat{\\Theta}(\\mathbf{x})\\):" + }, + { + "type": "equation", + "bbox": [ + 0.217, + 0.535, + 0.482, + 0.55 + ], + "angle": 0, + "content": "\\[\n\\mathrm {N P M} (\\mathbf {x} \\mid \\Phi) = \\hat {\\Theta} (\\mathbf {x}), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.555, + 0.48, + 0.767 + ], + "angle": 0, + "content": "where \\(\\Phi\\) are the trainable parameters of the implicit representation, and \\(\\hat{\\Theta}\\) are the output decoded parameters, defining a vMF mixture \\(\\mathcal{V}(\\omega_i\\mid \\hat{\\Theta} (\\mathbf{x}))\\) that is trained to approximate \\(L_{i}(\\mathbf{x},\\omega_{i})\\) (Eq. 5). By continuously conditioning the learned distribution \\(\\Theta\\) on spatial positions \\(\\mathbf{x}\\), our method inherently avoids the above issues caused by spatial discretizations. We achieve the above mapping by using a lightweight network to decode this parametric distribution from the implicit neural representation. To make sure that we get a valid vMF mixture (i.e., \\(\\lambda_{i},\\kappa_{i} > 0,\\mu_{i}\\in \\mathbb{S}^{2}\\), and \\(\\sum_{j = 1}^{K}\\lambda_{j} = 1\\)), we must additionally regularize the raw network output with appropriate mapping functions (see Tab. 1). Specifically, we apply exponential activation to \\(\\lambda_{i}\\) and \\(\\kappa_{i}\\). Logistic activation is applied to \\(\\theta_{i}\\) and \\(\\varphi_{i}\\) which form the spherical coordinates of \\(\\mu_{i}\\). Most importantly, we apply the softmax function to all \\(\\lambda s\\) to ensure that the outputs model a valid PDF (i.e., satisfy \\(\\sum_{i = 1}^{K}\\lambda_{i} = 1\\))." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.779, + 0.482, + 0.877 + ], + "angle": 0, + "content": "Discussion. It is possible to implement different forms of implicit neural representation with trainable parameters \\(\\Phi\\). While it is straightforward to use a monolithic network to model \\(\\mathrm{NPM}_{\\Phi} : \\mathbf{x} \\rightarrow \\Theta\\), we find it difficult to fit the high-frequency variations of the target distribution. Thereby, we use a trainable multi-resolution spatial embedding for encoding the distributions, and additionally a lightweight neural network for decoding the parameters. This is" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.097, + 0.916, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.236, + 0.918, + 0.313 + ], + "angle": 0, + "content": "Fig. 1. Parallax issue caused by spatial discretizations (a). For a subdivided volume \\( S(\\mathbf{x}) \\) in (a), the guiding distribution is marginalized with training samples scattered over the volume \\( S(\\mathbf{x}) \\), and is shared by different positions (e.g., \\( \\mathbf{x}_1 \\) and \\( \\mathbf{x}_2 \\)). Our method will not suffer from parallax due to NPM implicitly representing a monolithic function, continuously mapping from spatial positions to parametric guiding distributions, as shown in (b)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.344, + 0.916, + 0.372 + ], + "angle": 0, + "content": "crucial for our method to achieve better modeling capacity while remaining performant, as will be discussed later." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.386, + 0.671, + 0.4 + ], + "angle": 0, + "content": "4.2 Optimizing NPM" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.403, + 0.918, + 0.597 + ], + "angle": 0, + "content": "We show how to optimize the divergence between the decoded distribution \\(\\hat{\\Theta}(\\mathbf{x})\\) and the target distribution using minibatch stochastic gradient descent. To achieve this, the gradients of a training objective (or loss function) with respect to the network parameters are necessary. However, it is non-trivial to define such a loss function, given the ground truth output parameters \\(\\Theta_{\\mathrm{gt}}(\\mathbf{x})\\) are unknown. Previous works typically use design optimization algorithms (e.g., expectation-maximization) that iteratively use batches of samples to fit a given set of parameters \\(\\Theta\\), which often parameterize a marginalized distribution shared by the spatial region covering the samples [Herholz et al. 2016; Ruppert et al. 2020]. However, their methods are applied to explicitly parameterized models, and are therefore not applicable to our method, which models the implicit representation of the function \\(\\mathbf{NPM}_{\\Phi}: \\mathbf{x} \\rightarrow \\hat{\\Theta}\\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.598, + 0.918, + 0.709 + ], + "angle": 0, + "content": "We minimize the KL divergence between the decoded vMF mixtures and the target distribution via minibatch stochastic gradient descent, where its gradients with respect to the trainable parameters are estimated using Monte Carlo integration. Other divergence metrics are also available following a similar derivation. Let us start by assuming that the shading position \\(\\mathbf{x}\\) is fixed, thus omitting the dependency of \\(\\Theta\\) on \\(\\mathbf{x}\\) in the equations. For a given position, the KL divergence between the target distribution \\(\\mathcal{D}\\) and our output" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.736, + 0.918, + 0.788 + ], + "angle": 0, + "content": "Table 1. Detailed mapping functions we use to regularize network outputs, where \\(\\lambda^{\\prime}\\), \\(\\kappa^{\\prime}\\), \\(\\theta^{\\prime}\\), \\(\\varphi^{\\prime}\\) denote the raw outputs, and \\((\\theta, \\varphi)\\) is the normalized spherical coordinate of \\(\\mu \\in \\mathbb{S}^2\\). Left: parameter notations and their valid ranges; middle: type of activation; right: specific mappings." + }, + { + "type": "table", + "bbox": [ + 0.536, + 0.8, + 0.897, + 0.874 + ], + "angle": 0, + "content": "
ParameterActivationMapping
κ ∈ [0,+∞)Exponentialκi = exp(κi')
λ ∈ [0,+∞)Softmaxλi = exp(λi') / ∑j=1K exp(λj')
θ, φ ∈ [0,1]Logisticθi = 1/(1 + exp(-θi'))
" + }, + { + "type": "footer", + "bbox": [ + 0.673, + 0.894, + 0.916, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.082, + 0.07, + 0.092, + 0.079 + ], + "angle": 0, + "content": "4" + }, + { + "type": "header", + "bbox": [ + 0.114, + 0.069, + 0.332, + 0.081 + ], + "angle": 0, + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.098, + 0.916, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.079, + 0.261, + 0.917, + 0.337 + ], + "angle": 0, + "content": "Fig. 2. High-level illustration of our Neural Parametric Mixtures (NPM). We implicitly encode the spatially varying target distributions with the multi-resolution embedding. When the distribution of a spatial location \\(\\mathbf{x}\\) is queried, (1) the features assigned to the nearby grid points surrounding \\(\\mathbf{x}\\) are interpolated at each level, and concatenated with other levels to obtain the spatial embedding \\(G(\\mathbf{x})\\). (2) the spatial embedding is then combined with other inputs to (3) feed into the lightweight MLP for (4) decoding the parameters \\(\\Theta\\) of the vMF mixture \\(\\mathcal{V}(\\omega_i \\mid \\Theta)\\) with \\(K\\) components. We then (5) use this parametric distribution for importance sampling the scattering direction. The result MC radiance estimate \\(\\langle L_i(\\mathbf{x}, \\omega_i) \\rangle\\) is used to estimate the training gradient \\(\\nabla_{\\Theta} D_{\\mathrm{KL}}\\) (Sec. 4.2), which is then back-propagated through these differentiable stages to optimize our NPM representation (dashed lines)." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.358, + 0.255, + 0.371 + ], + "angle": 0, + "content": "distribution \\(\\mathcal{V}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.378, + 0.482, + 0.409 + ], + "angle": 0, + "content": "\\[\nD _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) = \\int_ {\\Omega} \\mathcal {D} (\\omega) \\log \\frac {\\mathcal {D} (\\omega)}{\\mathcal {V} (\\omega | \\hat {\\Theta})} \\mathrm {d} \\omega , \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.416, + 0.484, + 0.445 + ], + "angle": 0, + "content": "where \\(\\mathcal{D} \\propto L_{\\mathrm{i}}\\) in radiance-based path guiding. This integral could now be estimated with the Monte Carlo estimator with \\(N\\) samples:" + }, + { + "type": "equation", + "bbox": [ + 0.126, + 0.454, + 0.482, + 0.492 + ], + "angle": 0, + "content": "\\[\nD _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) \\approx \\frac {1}{N} \\sum_ {j = 1} ^ {N} \\frac {\\mathcal {D} (\\omega_ {j})}{\\tilde {p} (\\omega_ {j} \\mid \\hat {\\Theta})} \\log \\frac {\\mathcal {D} (\\omega_ {j})}{\\mathcal {V} (\\omega_ {j} \\mid \\hat {\\Theta})}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.501, + 0.482, + 0.557 + ], + "angle": 0, + "content": "where \\(\\tilde{p}\\) is the distribution from which the samples are drawn, which in our case is a combination of the BSDF importance sampling and guiding distribution. By taking its derivative with respect to \\(\\Theta\\), we obtain the MC estimate of the gradient \\(\\nabla_{\\Theta}D_{\\mathrm{KL}}(\\mathcal{D}\\| \\mathcal{V};\\Theta)\\):" + }, + { + "type": "equation", + "bbox": [ + 0.125, + 0.566, + 0.482, + 0.605 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\Theta} D _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) \\approx - \\frac {1}{N} \\sum_ {j = 1} ^ {N} \\frac {\\mathcal {D} \\left(\\omega_ {j}\\right) \\nabla_ {\\Theta} \\mathcal {V} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right)}{\\tilde {p} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right) \\mathcal {V} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right)}, \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.613, + 0.482, + 0.681 + ], + "angle": 0, + "content": "where the derivatives of the vMF mixtures \\(\\mathcal{V}\\) with respect to their parameters \\(\\Theta\\) are straightforward. The gradients for the trainable NPM parameters \\(\\Phi\\) could then be obtained via back propagation. Since we use the unbiased MC estimate of the training gradients, the parameters are guaranteed to converge to a local minimum." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.682, + 0.482, + 0.751 + ], + "angle": 0, + "content": "In practice, our training sample pairs \\((\\mathbf{x},\\omega_{i})\\rightarrow L_{\\mathrm{i}}\\) are distributed in different spatial positions \\(\\mathbf{x}\\), efficiently learning a spatially varying target distribution \\(\\mathcal{D}(\\mathbf{x})\\). This results in the training objective accounting for the divergence of multiple positions. The expected solution for \\(\\Phi\\) is thus:" + }, + { + "type": "equation", + "bbox": [ + 0.15, + 0.757, + 0.482, + 0.785 + ], + "angle": 0, + "content": "\\[\n\\Phi^ {*} = \\underset {\\Phi} {\\arg \\min } \\mathbb {E} _ {\\mathbf {x}} \\left[ D _ {\\mathrm {K L}} \\left(\\mathcal {D} (\\mathbf {x}) \\| \\mathcal {V}; \\Theta (\\mathbf {x})\\right) \\right]. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.793, + 0.482, + 0.877 + ], + "angle": 0, + "content": "For our implicit spatial embedding (i.e., grids of latent features, discussed later), this results in the embedding being optimized with all (and only) its nearby samples. When using the gradient descent method, the samples with the largest gradients (i.e., the most important ones for reducing divergence) would dominate, forming a reasonable design choice for better adaptivity." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.358, + 0.715, + 0.373 + ], + "angle": 0, + "content": "4.3 Full Integrand Learning" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.375, + 0.917, + 0.514 + ], + "angle": 0, + "content": "Using path guiding to sample the full integrand \\( f_{s} \\cdot L_{i} \\cos \\theta_{i} \\) can achieve even better performance, which should incorporate the BSDF term and the cosine term into the target distribution. This is challenging since the guiding distribution is now conditioned on 5D inputs (i.e., outgoing direction \\( \\omega_{0} \\) and spatial coordinate \\( \\mathbf{x} \\)). Previous works fit BSDFs with precomputed parametric models and multiply them with the learned incident radiance distribution to achieve product sampling. However, this often relies on scene-dependent precomputation, discretization over \\( \\omega_{0} \\), and extra computational overhead [Herholz et al. 2016; Ruppert et al. 2020]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.514, + 0.918, + 0.61 + ], + "angle": 0, + "content": "Our neural design can naturally handle the conditions with the extra input of \\(\\omega_{i}\\). This is essential since a neural network could approximate arbitrary conditional models if being expressive enough. We later show this improves performance through learning a better guiding distribution, with only modest performance overhead. For clarity, we denote the previous radiance-based method as NPM-radiance, and this version as NPM-product." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.611, + 0.924, + 0.652 + ], + "angle": 0, + "content": "Specifically, by supplementing input \\(\\omega_{0}\\), we reformulate the learned distribution (Eq. 6) with the outgoing directions. This enables learning the full integrated as:" + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.658, + 0.917, + 0.675 + ], + "angle": 0, + "content": "\\[\n\\mathrm {N P M} _ {\\text {p r o d u c t}} (\\mathbf {x}, \\omega_ {\\mathrm {o}} \\mid \\Phi) = \\hat {\\Theta} (\\mathbf {x}, \\omega_ {\\mathrm {o}}), \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.682, + 0.916, + 0.711 + ], + "angle": 0, + "content": "where \\(\\hat{\\Theta}\\) now parameterizes the vMF mixture \\(\\mathcal{V}\\) that is trained to approximate the full integrand in Eq. 1, i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.717, + 0.917, + 0.739 + ], + "angle": 0, + "content": "\\[\n\\left. \\mathcal {V} \\left(\\omega_ {i} \\mid \\hat {\\Theta} (\\mathbf {x}, \\omega_ {0})\\right) \\propto f _ {\\mathrm {s}} \\left(\\mathbf {x}, \\omega_ {0}, \\omega_ {\\mathrm {i}}\\right) L _ {\\mathrm {i}} \\left(\\mathbf {x}, \\omega_ {\\mathrm {i}}\\right) \\left| \\cos \\theta_ {\\mathrm {i}} \\right|, \\right. \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.743, + 0.918, + 0.828 + ], + "angle": 0, + "content": "where the cosine term could be approximated with a constant vMF lobe [Ruppert et al. 2020], leaving NPM to focus on the remaining part of the integral. Nonetheless, it is still challenging for neural networks to model a 2D directional distribution conditioned on 5D spatio-directional inputs. We further use the following simple extensions to help the network learn these spatially varying distributions:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.835, + 0.918, + 0.877 + ], + "angle": 0, + "content": "Auxiliary Feature Inputs. Following the practices in prior work [Hadadan et al. 2021; Müller et al. 2021], we additionally input the surface normal and roughness as auxiliary features to help" + }, + { + "type": "footer", + "bbox": [ + 0.082, + 0.894, + 0.325, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.668, + 0.069, + 0.884, + 0.08 + ], + "angle": 0, + "content": "Neural Parametric Mixtures for Path Guiding" + }, + { + "type": "page_number", + "bbox": [ + 0.902, + 0.07, + 0.917, + 0.079 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.101, + 0.483, + 0.171 + ], + "angle": 0, + "content": "the network better correlate the target distribution with e.g., local shading frame (normal) and spatially varying BSDFs (roughness). Experimentally, we find this helps the network to better capture the spatio-directional correlations, while with a small computational overhead due to additional memory traffic." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.179, + 0.483, + 0.29 + ], + "angle": 0, + "content": "Input Encoding. It is challenging for a neural network to model the non-linearity between multidimensional inputs and outputs, especially when our outputs are distributions with high-frequency spatial variations. Therefore, we replace the spatial input \\(\\mathbf{x}\\) with our trainable multi-resolution spatial embedding (discussed in Sec. 5.1). For the other inputs (e.g., outgoing direction \\(\\omega_{0}\\) and surface normals \\(\\mathbf{n}(\\mathbf{x})\\)), we encode them using the spherical harmonics basis, which is previously established in NeRF [Verbin et al. 2022]." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.303, + 0.245, + 0.317 + ], + "angle": 0, + "content": "5 IMPLEMENTATION" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.321, + 0.482, + 0.35 + ], + "angle": 0, + "content": "In this section, we provide the technical details that are crucial to the performance and practicality of our NPM implementation." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.362, + 0.361, + 0.377 + ], + "angle": 0, + "content": "5.1 Multi-resolution Spatial Embedding" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.38, + 0.483, + 0.477 + ], + "angle": 0, + "content": "Our implicit NPM representation learns a continuous mapping \\(\\mathrm{NPM}_{\\Phi}:\\mathbf{x}\\rightarrow \\hat{\\Theta}\\) (with the additional input \\(\\omega_{0}\\in \\mathbb{S}^{2}\\) in the extended version), where \\(\\Theta \\in \\mathbb{R}^{4\\times K}\\) defines the learned target distribution. While a straightforward solution would be using a multi-layer perceptron (MLP) as the universal function approximator to model \\(\\mathrm{NPM}_{\\Phi}\\), we experimentally found it difficult to capture the high-frequency spatial variations of the target distributions." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.478, + 0.483, + 0.631 + ], + "angle": 0, + "content": "Therefore, we use a learnable spatial embedding to implicitly encode the learned parametric mixtures. Similar approaches are found successful in recent NeRF-like applications [Müller et al. 2022; Munkberg et al. 2022]. Specifically, we define \\(L\\) 3D uniform grids \\(G_{l}\\), each covering the entire scene with a spatial resolution of \\(D_l^3\\), where \\(G_{l}\\) denotes the \\(l\\)-th embedding grid. \\(D_{l}\\) grows exponentially, resulting in multiple resolutions of the embedding. We then assign a learnable embedding (a latent feature vector \\(\\boldsymbol{v} \\in \\mathbb{R}^{F}\\)) to each lattice point of \\(G_{l}\\). To query the spatial embedding for \\(\\mathbf{x}\\), we bilinearly interpolate the features nearby \\(\\mathbf{x}\\) for each resolution, and concatenate them to obtain the final embedding \\(G(\\mathbf{x})\\). More formally:" + }, + { + "type": "equation", + "bbox": [ + 0.125, + 0.638, + 0.482, + 0.666 + ], + "angle": 0, + "content": "\\[\nG (\\mathbf {x} \\mid \\Phi_ {\\mathrm {E}}) = \\underset {l = 1} {\\overset {L} {\\oplus}} \\operatorname {b i l i n e a r} \\left(\\mathbf {x}, V _ {l} [ \\mathbf {x} ]\\right), G: \\mathbb {R} ^ {3} \\rightarrow \\mathbb {R} ^ {L \\times F}, \\tag {13}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.672, + 0.483, + 0.743 + ], + "angle": 0, + "content": "where \\( V_{l}[\\mathbf{x}] \\) is the set of features at the eight corners of the cell enclosing \\( \\mathbf{x} \\) within \\( G_{l} \\). The spatial embedding \\( G(\\mathbf{x}) \\) is then concatenated with other inputs (e.g., \\( \\omega_0 \\) and auxiliary features) to the MLP for decoding the parameters \\( \\Theta \\). We thus formulate the desired mapping (taking Eq. 6 for example) as a two-step procedure:" + }, + { + "type": "equation", + "bbox": [ + 0.187, + 0.75, + 0.482, + 0.772 + ], + "angle": 0, + "content": "\\[\n\\mathbf {M L P} \\left(G (\\mathbf {x} \\mid \\Phi_ {\\mathrm {E}}) \\mid \\Phi_ {\\mathrm {M}}\\right) = \\hat {\\Theta} (\\mathbf {x}), \\tag {14}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.779, + 0.483, + 0.877 + ], + "angle": 0, + "content": "where the parameters of the spatial embedding \\((\\Phi_{\\mathrm{E}})\\) and the MLP \\((\\Phi_{\\mathrm{M}})\\) together constitute the trainable parameters \\(\\Phi\\) of our implicit representation for NPM. Intuitively, a spatial embedding implicitly encodes the target distribution within a specific spatial region, while the multi-resolution design efficiently accounts for different levels of detail (LOD). By smoothly interpolating between the spatial embedding around positions and decoding them using neural networks," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.101, + 0.918, + 0.171 + ], + "angle": 0, + "content": "we naturally account for the spatial variations of the target distribution. This also lessens the burden of using a single monolithic MLP as the implicit representation, leaving it mainly focusing on decoding it into parametric models \\(\\Theta\\). This significantly accelerates training/inference with a larger memory footprint." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.183, + 0.719, + 0.198 + ], + "angle": 0, + "content": "5.2 Online Training Scheme" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.2, + 0.918, + 0.367 + ], + "angle": 0, + "content": "**Renderer Integration.** We implement our method on a custom GPU-accelerated renderer based on OptiX [Parker et al. 2010], where the training and inference procedures are integrated into a wavefront-style path tracer [Laine et al. 2013]. This design choice allows ray casting, importance sampling, and BSDF evaluation to be performed in coherent chunks over large sets of traced paths by splitting the traditional megakernel path tracer into multiple specialized kernels. This improves GPU thread utilization by reducing the control flow divergence. Most importantly, this allows us to efficiently sample and evaluate the guiding distributions at each vertex along the path in parallel, thus significantly accelerating network training/inference." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.367, + 0.918, + 0.506 + ], + "angle": 0, + "content": "Specifically, we place the training/inference samples into queues, where the structure-of-arrays (SoA) memory layout is applied to improve memory locality. At each ray intersection of the chunk of traced paths, the queries for guiding distributions within the queue are processed via batched network inference. The sampling and evaluation procedures are then performed, also using specialized kernels, before entering the next ray-cast kernel. This provides our method with maximum parallelism through large-batch training and inference, minimizing the latency caused by waiting network queries, while avoiding inefficient single-sample inference." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.514, + 0.918, + 0.709 + ], + "angle": 0, + "content": "Training Scheme. We use the same configuration to train each scene online during rendering, without any scene-specific fine-tuning or pre-computation. During training, we collect MC radiance estimates along each traced path, and split them into mini-batches for training. The optimization step is performed for each spp, which allows drawing samples to be drawn from the latest guiding distribution. The distribution of the samples (for both rendering and training) is thus gets refined as training proceeds. We stop the training process after a fixed fraction of the total rendering budget (either time or sample count). While we always set this to \\(25\\%\\) in our experiments, we find our NPM technique converges quickly during training, generally reaching a local minimum after about 150spp, which amounts to about 1000 training steps/batches and 15s (including the runtimes of both training and rendering) on GPU." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.72, + 0.673, + 0.735 + ], + "angle": 0, + "content": "5.3 Guiding Network" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.738, + 0.918, + 0.877 + ], + "angle": 0, + "content": "We implement our network on the tiny-cuda-nn framework [Müller 2021] and integrate it into our renderer. The MLP we used (for both NPM-radiance and NPM-product) contains 3 linear layers of width 64. Each layer with ReLU activation, except for the last layer with our custom mapping functions (Tab. 1). We let the network output \\( K = 8 \\) vMF components, i.e., \\( \\Theta \\in \\mathbb{R}^{8 \\times 4} \\). For the multi-resolution spatial embedding, we use \\( L = 8 \\) grids with increasing resolutions for each level. The coarsest level has a resolution of \\( D_{1} = 8 \\) while the finest level has \\( D_{8} = 86 \\). The feature of each level contains \\( F = 4 \\) floats, resulting in the final spatial embedding \\( G(\\mathbf{x}) \\in \\mathbb{R}^{8 \\times 4} \\). In practice," + }, + { + "type": "footer", + "bbox": [ + 0.673, + 0.894, + 0.917, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.082, + 0.07, + 0.092, + 0.079 + ], + "angle": 0, + "content": "6" + }, + { + "type": "header", + "bbox": [ + 0.114, + 0.069, + 0.333, + 0.081 + ], + "angle": 0, + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.095, + 0.92, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.079, + 0.356, + 0.916, + 0.385 + ], + "angle": 0, + "content": "Fig. 3. Equal-sample-count (750spp) comparisons for two scenes. We show the error (for both the zoom-in areas and whole images) and time cost of different methods. The yellow plots (as well as the other figures) refer to the results obtained by unidirectional path tracing." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.403, + 0.482, + 0.445 + ], + "angle": 0, + "content": "we find that the performance of the network could be improved by enlarging the capacity of the MLP or the spatial embedding, leaving this a trade-off between quality and speed." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.445, + 0.482, + 0.556 + ], + "angle": 0, + "content": "For training, we use a fixed learning rate of 0.005 that is large enough to acquire a fast convergence speed. Adaptive momentum techniques like Adam [Kingma and Ba 2015] are used for more robust training and better convergence. For importance sampling the decoded mixtures, we use the numerically stable strategy for vMF [Jakob 2012]. When inference, we also apply exponential moving average (EMA) to the weights of previous training steps, which better reduces the noise of the MC estimated gradients (Eq. 9)." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.573, + 0.307, + 0.587 + ], + "angle": 0, + "content": "6 RESULTS AND DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.591, + 0.482, + 0.66 + ], + "angle": 0, + "content": "We run all the experiments on an Intel Core i9-11900 CPU and an NVIDIA RTX3070 GPU. Following the similar practices of previous works [Müller 2019; Rath et al. 2020], we disable NEE and Russian roulette for all methods and set the maximum path length to 10. All methods are implemented upon a GPU path tracing renderer." + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.661, + 0.482, + 0.716 + ], + "angle": 0, + "content": "We render all images at the resolution of \\(1280 \\times 720\\), and evaluate image quality using mean relative squared error (relMSE). All the images, additional metrics (MAPE and MRSE), and the false-color maps can be interactively inspected with our supplementary viewer." + }, + { + "type": "title", + "bbox": [ + 0.08, + 0.734, + 0.21, + 0.749 + ], + "angle": 0, + "content": "6.1 Comparisons" + }, + { + "type": "text", + "bbox": [ + 0.079, + 0.751, + 0.483, + 0.876 + ], + "angle": 0, + "content": "Our method is compared against improved PPG [Müller 2019] (an enhanced version of Practical Path Guiding [Müller et al. 2017]), and Variance-aware Path Guiding [Rath et al. 2020]. For the experimental configuration of the compared methods, we use the same as [Rath et al. 2020], except for fixing the BSDF selection probability to \\(50\\%\\) (for both ours and the compared methods). Both compared methods used an iteratively reconstructed subdivision structure (i.e., the spatio-directional trees) to account for spatial variations. A total of 10 different scenes were tested." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.403, + 0.918, + 0.611 + ], + "angle": 0, + "content": "We first show equal-spp comparisons on two representative scenes. The VEACH Door scene features strong indirect illumination that is difficult to handle with BSDF importance sampling, while the BATHROOM scene contains many specular and glossy surfaces. As shown in Fig. 3, our proposed method outperforms the other two methods even when only learning incident radiance \\( L_{\\mathrm{i}} \\) (NPM-radiance). The noise is alleviated further with our full integrand learning method (NPM-product), since both of the scenes contain glossy surfaces, where the contribution of samples is strongly influenced by the BSDF term. We also note that our method quickly becomes effective at the very beginning of the training process (see the convergence plots in Fig. 3). This indicates a better training efficiency over classical guiding methods, which will be discussed later. Additional results on more test scenes are shown in Fig. 4 and Tab. 2, as well as the convergence plots in Fig. 5." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.611, + 0.918, + 0.764 + ], + "angle": 0, + "content": "We then show the results of equal-time comparisons between our method and [Rath et al. 2020] in Fig. 6. Since they do not explicitly learn the product sampling distribution (i.e., conditioned on 5D inputs \\(\\omega_0\\) and \\(\\mathbf{x}\\)), we only use our radiance-based method (NPM-radiance) for fair comparisons. Instead of simply learning the incident radiance distribution \\((L_{\\mathrm{i}})\\), they use an improved target distribution to account for the variance and BSDF (marginalized over \\(\\omega_0\\)). Our method, on the other hand, achieves better performance by learning \\(L_{\\mathrm{i}}\\) only. We attribute this superiority of our method to both the better capacity of capturing spatio-directional correlation and more parallelism." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.775, + 0.628, + 0.789 + ], + "angle": 0, + "content": "6.2 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.793, + 0.918, + 0.877 + ], + "angle": 0, + "content": "Trainable Spatial Embedding. We analyze the performance of different forms of spatial input encoding in terms of convergence and quality (Fig. 8). The spatial embedding (i.e. parametric encoding) uses trainable latent vector grids to model the spatially-varying target distributions, leaving the MLP to focus on decoding this implicit representation into valid vMF mixtures. The other two variants" + }, + { + "type": "footer", + "bbox": [ + 0.082, + 0.894, + 0.325, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.668, + 0.069, + 0.884, + 0.08 + ], + "angle": 0, + "content": "Neural Parametric Mixtures for Path Guiding" + }, + { + "type": "page_number", + "bbox": [ + 0.902, + 0.07, + 0.917, + 0.079 + ], + "angle": 0, + "content": "7" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.099, + 0.916, + 0.123 + ], + "angle": 0, + "content": "Table 2. Practical Path Guiding (PPG) [Müller 2019], Variance-aware Path Guiding [Rath et al. 2020], unidirectional path tracing and our method on 10 test scenes. We report relMSE, render time, and speedup using PPG as the baseline. Our NPM technique consistently reduces the error in the test scenes." + }, + { + "type": "table", + "bbox": [ + 0.094, + 0.137, + 0.905, + 0.324 + ], + "angle": 0, + "content": "
PT (BSDF)[Müller 2019][Rath et al. 2020]Ours
PPG (baseline)Variance. PGNPM (radiance)NPM (product)
BATHROOM0.090548s0.05301.0 ×106s0.04851.09 ×107s0.02512.11 ×101s0.02032.61 ×108s
BEDROOM0.038340s0.02011.0 ×105s0.01611.26 ×109s0.01501.35 ×84s0.01461.38 ×90s
BREAKFAST ROOM0.009448s0.00691.0 ×100s0.00471.46 ×103s0.00381.80 ×63s0.00351.96 ×71s
LIVING ROOM0.027332s0.01841.0 ×74s0.01461.26 ×80s0.01571.17 ×47s0.01321.39 ×54s
PINK ROOM0.004637s0.00821.0 ×74s0.00611.34 ×76s0.00332.42 ×53s0.00263.21 ×62s
SALLE DE BAIN0.081938s0.02231.0 ×116s0.03460.64 ×116s0.01961.14 ×79s0.01401.59 ×86s
STAIRCASE0.181234s0.02981.0 ×80s0.02611.14 ×86s0.01941.54 ×72s0.01721.74 ×76s
VEACH DOOR0.620833s0.21671.0 ×82s0.19451.11 ×91s0.07502.89 ×65s0.04614.69 ×77s
VEACH EGG8.291833s0.83791.0 ×82s0.78701.07 ×85s0.59841.40 ×62s0.53521.56 ×69s
WHITE ROOM0.030138s0.02781.0 ×107s0.02531.10 ×103s0.01242.25 ×76s0.01002.75 ×87s
" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.345, + 0.482, + 0.441 + ], + "angle": 0, + "content": "do not explicitly separate these two tasks by using a monolithic MLP. The addition of spatial embedding significantly improves convergence, and the multi-resolution design further reduces error by better modeling finer-grained spatio-directional correlations. Furthermore, this does not introduce noticeable computational overhead, as only a small fraction of parameters are involved in each training/inference." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.45, + 0.482, + 0.575 + ], + "angle": 0, + "content": "Training Efficiency. The effectiveness of guiding methods under small training budgets is important, especially for applications such as preview rendering or even interactive rendering. We analyze the training efficiency of different guiding methods by comparing their performance under different training budgets (31 spp, 63 spp, 127 spp, respectively) in Fig. 7. Our method quickly converges to a good sampling distribution with only a few training samples and less training time cost (e.g., 31 spp with about 3s), thus outperforming previous guiding methods even with much fewer training samples." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.588, + 0.191, + 0.6 + ], + "angle": 0, + "content": "6.3 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.605, + 0.482, + 0.729 + ], + "angle": 0, + "content": "Path Guiding Extensions. Our method can be extended with many well-established extensions suggested by previous path guiding algorithms. They are straightforward to be integrated and are promising to further improve our performance. For example: (1) the BSDF selection probability could also be learned by our network or by some other caching strategies [Müller et al. 2020], thus better handling the near-specular surfaces; and (2) the improved variance-aware target distribution [Rath et al. 2020] could be learned to account for the variance within the noisy MC estimates." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.738, + 0.482, + 0.876 + ], + "angle": 0, + "content": "Performance Analysis. Our method serves effective means for path guiding while remaining performance practical. Specifically, the measured time cost per NPM evaluation (including both network inference and importance sampling the decoded mixture models) at \\(1280 \\times 720\\) is about 3ms. Meanwhile, a training step (i.e., a batch of \\(2^{18}\\) samples) costs about 10ms, indicating that a typical training process (about 1000 training steps) takes about 10s to converge on a single GPU. NPM contains a total of about 2M learnable parameters, resulting in a memory consumption of \\(< 10\\mathrm{MB}\\). The compact design of our implicit NPM representation results in less control" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.345, + 0.916, + 0.412 + ], + "angle": 0, + "content": "flow divergence, better memory locality, and better caching performance. Together, this makes our method practical for modern GPU parallelization, which is often harder to achieve with the tree-like spatial subdivision schemes used by most of the previous guiding methods." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.427, + 0.916, + 0.633 + ], + "angle": 0, + "content": "Alternative Solutions. Several studies also aim to tackle the parallel issue. Dodik et al. [2022] use spatio-directional mixtures (i.e., conditioned on \\(\\mathbf{x}\\) and \\(\\omega_0\\)) to correlate target distributions with spatial positions. Ruppert et al. [2020] design strategies to warp the guiding distributions in the spatial subdivisions to resemble the true distribution. However, these methods adopt sophisticated strategies that are difficult to parallelize efficiently on GPUs (e.g., batched expectation-maximization (EM) applied to a varying number of mixtures) while requiring extra efforts to fit scene BSDFs for product sampling. In contrast, our method exploits trainable spatial embedding to encode the target distributions while using a decoder MLP to model the non-linearity between spatial features and PMMs in a GPU-friendly manner. Nevertheless, incorporating ideas from these studies, such as adaptively controlling the granularity of learned distributions, may further enhance our method." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.651, + 0.901, + 0.664 + ], + "angle": 0, + "content": "7 CONCLUSION, LIMITATIONS AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.668, + 0.916, + 0.806 + ], + "angle": 0, + "content": "We present Neural Parametric Mixtures, a novel method for learning the target distributions for path guiding techniques. We use a compact implicit neural representation to encode the spatio-directional parametric distributions. Compared to previous non-neural methods that use explicit spatial subdivision structures to store directional distributions, our continuous implicit representation is simpler and more efficient while naturally avoiding the artifacts (e.g., parallax) caused by their discretized subdivision schemes. Our NPM technique could be efficiently trained with stochastic gradient descent to minimize the divergence from the target distribution." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.808, + 0.916, + 0.875 + ], + "angle": 0, + "content": "Despite the simplicity and effectiveness of our method, the main limitation resides in the lack of flexibility of our directional distribution representation, i.e., a fixed number of vMF components. While a similar issue exists in classical methods using PMMs [Dodik et al. 2022; Herholz et al. 2016], recent methods achieve more accurate" + }, + { + "type": "footer", + "bbox": [ + 0.675, + 0.895, + 0.916, + 0.905 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.082, + 0.07, + 0.092, + 0.079 + ], + "angle": 0, + "content": "8" + }, + { + "type": "header", + "bbox": [ + 0.114, + 0.069, + 0.333, + 0.081 + ], + "angle": 0, + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + }, + { + "type": "image", + "bbox": [ + 0.092, + 0.097, + 0.891, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.079, + 0.816, + 0.919, + 0.868 + ], + "angle": 0, + "content": "Fig. 4. Visual comparisons using the same experimental setup with Fig. 3, all are rendered with 750spp at \\(1280 \\times 720\\). We use the online training setup for all the guiding methods, i.e., all the samples are included in the final rendering. Our method exhibits better performance than other guiding methods in most scenes by only learning the incident radiance term while further reducing the error by incorporating the BSDF term (i.e., product sampling). More results on other test scenes, additional error metrics and false-color visualizations are provided in our supplementary interactive viewer." + }, + { + "type": "footer", + "bbox": [ + 0.082, + 0.894, + 0.325, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.668, + 0.069, + 0.883, + 0.08 + ], + "angle": 0, + "content": "Neural Parametric Mixtures for Path Guiding" + }, + { + "type": "page_number", + "bbox": [ + 0.909, + 0.07, + 0.916, + 0.079 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.095, + 0.263, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.154, + 0.195, + 0.222, + 0.204 + ], + "angle": 0, + "content": "VEACH DOOR" + }, + { + "type": "image", + "bbox": [ + 0.264, + 0.095, + 0.421, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.195, + 0.379, + 0.204 + ], + "angle": 0, + "content": "LIVING ROOM" + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.095, + 0.579, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.471, + 0.195, + 0.532, + 0.204 + ], + "angle": 0, + "content": "BATHROOM" + }, + { + "type": "image", + "bbox": [ + 0.581, + 0.095, + 0.735, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.631, + 0.195, + 0.683, + 0.204 + ], + "angle": 0, + "content": "BEDROOM" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.096, + 0.894, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.789, + 0.195, + 0.846, + 0.204 + ], + "angle": 0, + "content": "STAIRCASE" + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.205, + 0.264, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.156, + 0.303, + 0.217, + 0.312 + ], + "angle": 0, + "content": "VEACH EGG" + }, + { + "type": "image", + "bbox": [ + 0.266, + 0.205, + 0.421, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.307, + 0.303, + 0.384, + 0.312 + ], + "angle": 0, + "content": "SALLE DE BAIN" + }, + { + "type": "image", + "bbox": [ + 0.421, + 0.205, + 0.578, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.458, + 0.303, + 0.55, + 0.312 + ], + "angle": 0, + "content": "BREAKFAST ROOM" + }, + { + "type": "image", + "bbox": [ + 0.58, + 0.205, + 0.735, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.621, + 0.303, + 0.694, + 0.312 + ], + "angle": 0, + "content": "WHITE ROOM" + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.205, + 0.894, + 0.302 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.783, + 0.303, + 0.844, + 0.312 + ], + "angle": 0, + "content": "PINK ROOM" + }, + { + "type": "image", + "bbox": [ + 0.23, + 0.318, + 0.782, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.08, + 0.348, + 0.916, + 0.412 + ], + "angle": 0, + "content": "Fig. 5. Convergence plots correspond to Fig. 3 and Fig. 4. Unidirectional path tracing with BSDF importance sampling (PT-BSDF), Practical Path Guiding [Muller 2019], Variance-aware Path Guiding [Rath et al. 2020] and our method with different target distributions (NPM-radiance and NPM-product). Our methods consistently outperform these classical guiding methods, and quickly become effective even with a few training samples and short training time (e.g., 30spp, amounting to about 3 seconds on GPU), indicating practicality for preview or even interactive rendering. We attribute this success to the compact implicit representation and better spatial resolution of our method. The image results and detailed statistics could be inspected in the supplemental materials." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.433, + 0.481, + 0.474 + ], + "angle": 0, + "content": "directional distributions by adaptively merging and splitting the vMF components [Ruppert et al. 2020]. This, however, is non-trivial to apply to our NPM technique." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.475, + 0.483, + 0.614 + ], + "angle": 0, + "content": "In future work, we will investigate more accurate approaches to implicitly encode parametric distributions while remaining efficient. Finding better basis functions or adaptively controlling the number of output components are two possible but challenging directions. Meanwhile, we would like to improve the efficiency of our method by using either novel architectural designs for neural networks, optimized implementation, or adapting previous extensions to path guiding algorithms. We believe these are important steps to make our method more practical for interactive or even real-time rendering pipelines, as well as other related applications that require" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.433, + 0.918, + 0.489 + ], + "angle": 0, + "content": "fitting distributions with high-frequency spatial variations. In addition, applying our method to bidirectional path tracing [Popov et al. 2015], especially subspace probabilistic connections [Su et al. 2022], will also be an interesting future avenue." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.505, + 0.684, + 0.518 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.523, + 0.92, + 0.62 + ], + "angle": 0, + "content": "This project was supported by the National Key R&D Program of China (No.2022YFB3303400) and NSFC of China (No. 62172013). We also thank the test scenes providers: Mareck (BATHROOM), Slyk-Drako (BEDROOM), Wig42 (BREAKFAST ROOM, LIVING ROOM, PINK ROOM, STAIRCASE), nacinus (SALLE DE BAIN), Jaakko Lehtinen (VEACH DOOR), Jay-Artist (WHITE ROOM), as well as the efforts for converting scene formats by Benedikt Bitterli [2016]." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.634, + 0.268, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.142, + 0.728, + 0.218, + 0.737 + ], + "angle": 0, + "content": "SALLE DE BAIN" + }, + { + "type": "image_caption", + "bbox": [ + 0.271, + 0.634, + 0.33, + 0.644 + ], + "angle": 0, + "content": "Rath et al." + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.645, + 0.338, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.686, + 0.338, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.728, + 0.323, + 0.737 + ], + "angle": 0, + "content": "0.05407" + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.634, + 0.402, + 0.644 + ], + "angle": 0, + "content": "NPM (rad.)" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.645, + 0.408, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.686, + 0.408, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.354, + 0.728, + 0.395, + 0.737 + ], + "angle": 0, + "content": "0.04926" + }, + { + "type": "image_caption", + "bbox": [ + 0.418, + 0.634, + 0.47, + 0.644 + ], + "angle": 0, + "content": "Reference" + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.645, + 0.477, + 0.685 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.686, + 0.477, + 0.726 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.424, + 0.728, + 0.463, + 0.737 + ], + "angle": 0, + "content": "relMSE" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.739, + 0.268, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.152, + 0.823, + 0.202, + 0.832 + ], + "angle": 0, + "content": "BEDROOM" + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.739, + 0.338, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.27, + 0.781, + 0.338, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.283, + 0.823, + 0.323, + 0.832 + ], + "angle": 0, + "content": "0.02176" + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.739, + 0.409, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.34, + 0.781, + 0.409, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.353, + 0.823, + 0.395, + 0.832 + ], + "angle": 0, + "content": "0.01324" + }, + { + "type": "image", + "bbox": [ + 0.41, + 0.739, + 0.478, + 0.78 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.411, + 0.781, + 0.477, + 0.82 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.424, + 0.823, + 0.463, + 0.832 + ], + "angle": 0, + "content": "relMSE" + }, + { + "type": "image_caption", + "bbox": [ + 0.08, + 0.847, + 0.481, + 0.872 + ], + "angle": 0, + "content": "Fig. 6. Equal-time comparisons (80s) on two test scenes between NPM(radiance) and Variance-aware Path Guiding [Rath et al. 2020]." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.658, + 0.916, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.834, + 0.918, + 0.872 + ], + "angle": 0, + "content": "Fig. 7. We train each guiding method with small training budgets (31 spp, 63 spp, 127 spp, respectively) and render the scene with 500 spp. Our method outperforms previous methods even with much fewer training samples." + }, + { + "type": "footer", + "bbox": [ + 0.673, + 0.894, + 0.916, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.084, + 0.07, + 0.095, + 0.079 + ], + "angle": 0, + "content": "10" + }, + { + "type": "header", + "bbox": [ + 0.1, + 0.069, + 0.338, + 0.081 + ], + "angle": 0, + "content": "- Honghao Dong, Guoping Wang, and Sheng Li" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.098, + 0.199, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.199, + 0.098, + 0.315, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.316, + 0.097, + 0.484, + 0.223 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.08, + 0.237, + 0.483, + 0.301 + ], + "angle": 0, + "content": "Fig. 8. Equal-time comparison (50s) of different input encoding. We report the sample count and error (relMSE) of each method. The dashed line in the plot marks the end of the training phase. The multi-resolution spatial embedding outperforms other methods while remaining training-efficient. Yellow plot refers to path tracing with BSDF importance sampling." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.325, + 0.179, + 0.337 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.34, + 0.481, + 0.382 + ], + "angle": 0, + "content": "Benedikt Bitterli. 2016. Rendering resources. https://benedikt-bitterli.me/resources/. Norbert Bus and Tamy Boubekeur. 2017. Double Hierarchies for Directional Importance Sampling in Monte Carlo Rendering. Journal of Computer Graphics Techniques (JCGT) 6, 3 (28 August 2017), 25-37. http://jcgt.org/published/0006/03/02" + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.382, + 0.482, + 0.412 + ], + "angle": 0, + "content": "R. R. Currius, D. Dolonius, U. Assarsson, and E. Sintorn. 2020. Spherical Gaussian Light-field Textures for Fast Precomputed Global Illumination. Computer Graphics Forum 39, 2 (2020), 133-146." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.412, + 0.482, + 0.432 + ], + "angle": 0, + "content": "Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. 2017. Density estimation using Real NVP. In International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.432, + 0.482, + 0.453 + ], + "angle": 0, + "content": "Stavros Diolatzis, Julien Philip, and George Drettakis. 2022. Active Exploration for Neural Global Illumination of Variable Scenes. ACM Transactions on Graphics (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.453, + 0.482, + 0.482 + ], + "angle": 0, + "content": "Ana Dodik, Marios Papas, Cengiz Öztireli, and Thomas Müller. 2022. Path Guiding Using Spatio-Directional Mixture Models. In Computer Graphics Forum, Vol. 41. Wiley Online Library, 172-189." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.482, + 0.482, + 0.502 + ], + "angle": 0, + "content": "Saeed Hadadan, Shuhong Chen, and Matthias Zwicker. 2021. Neural radiosity. ACM Transactions on Graphics (TOG) 40, 6 (2021), 1-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.502, + 0.482, + 0.533 + ], + "angle": 0, + "content": "Sebastian Herholz, Oskar Elek, Jiří Vorba, Hendrik Lensch, and Jaroslav Krivánek. 2016. Product importance sampling for light transport path guiding. In Computer Graphics Forum, Vol. 35. Wiley Online Library, 67-77." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.533, + 0.482, + 0.563 + ], + "angle": 0, + "content": "Yuchi Huo, Rui Wang, Ruzahng Zheng, Hualin Xu, Hujun Bao, and Sung-Eui Yoon. 2020. Adaptive incident radiance field sampling and reconstruction using deep reinforcement learning. ACM Transactions on Graphics (TOG) 39, 1 (2020), 1-17." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.563, + 0.482, + 0.583 + ], + "angle": 0, + "content": "Wenzel Jakob. 2012. Numerically stable sampling of the von Mises-Fisher distribution on \\( S^{\\wedge}2 \\) (and other tricks). Interactive Geometry Lab, ETH Zürich, Tech. Rep (2012), 6." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.583, + 0.482, + 0.603 + ], + "angle": 0, + "content": "Henrik Wann Jensen. 1995. Importance driven path tracing using the photon map. In Eurographics Workshop on Rendering Techniques. Springer, 326-335." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.603, + 0.47, + 0.613 + ], + "angle": 0, + "content": "James T. Kajiya. 1986. The Rendering Equation. SIGGRAPH Comput. Graph. (1986)." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.613, + 0.482, + 0.633 + ], + "angle": 0, + "content": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. *ICLR* (2015)." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.633, + 0.482, + 0.663 + ], + "angle": 0, + "content": "Eric P Lafortune and Yves D Willems. 1995. A 5D tree to reduce the variance of Monte Carlo ray tracing. In Eurographics Workshop on Rendering Techniques. Springer, 11-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.663, + 0.482, + 0.693 + ], + "angle": 0, + "content": "Samuli Laine, Tero Karras, and Timo Aila. 2013. Megakernels considered harmful: Wavefront path tracing on GPUs. In Proceedings of the 5th High-Performance Graphics Conference, 137-143." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.693, + 0.482, + 0.723 + ], + "angle": 0, + "content": "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. 2020. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. in ECCV." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.723, + 0.482, + 0.744 + ], + "angle": 0, + "content": "Thomas Müller. 2019. \"Practical Path Guiding\" in Production. In ACM SIGGRAPH 2019 Courses (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.744, + 0.482, + 0.774 + ], + "angle": 0, + "content": "Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. 2022. Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM Trans. Graph. 41, 4, Article 102 (July 2022), 15 pages." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.774, + 0.482, + 0.804 + ], + "angle": 0, + "content": "Thomas Müller, Markus Gross, and Jan Novák. 2017. Practical path guiding for efficient light-transport simulation. In Computer Graphics Forum, Vol. 36. Wiley Online Library, 91-100." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.804, + 0.482, + 0.834 + ], + "angle": 0, + "content": "Thomas Müller, Brian McWilliams, Fabrice Rousselle, Markus Gross, and Jan Novák. 2019. Neural importance sampling. ACM Transactions on Graphics (TOG) 38, 5 (2019), 1-19." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.834, + 0.482, + 0.854 + ], + "angle": 0, + "content": "Thomas Müller, Fabrice Rousselle, Alexander Keller, and Jan Novák. 2020. Neural control variates. ACM Transactions on Graphics (TOG) 39, 6 (2020), 1-19." + }, + { + "type": "ref_text", + "bbox": [ + 0.082, + 0.854, + 0.482, + 0.876 + ], + "angle": 0, + "content": "Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. 2021. Real-Time Neural Radiance Caching for Path Tracing. ACM Trans. Graph. 40, 4, Article 36 (jul" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.34, + 0.482, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.533, + 0.104, + 0.607, + 0.114 + ], + "angle": 0, + "content": "2021), 16 pages." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.114, + 0.917, + 0.144 + ], + "angle": 0, + "content": "Jacob Munkberg, Jon Hasselgren, Tianchang Shen, Jun Gao, Wenzheng Chen, Alex Evans, Thomas Mueller, and Sanja Fidler. 2022. Extracting Triangular 3D Models, Materials, and Lighting From Images. CVPR (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.144, + 0.884, + 0.154 + ], + "angle": 0, + "content": "Thomas Muller. 2021. tiny-cuda-nn. https://github.com/NVlabs/tiny-cuda-nn" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.154, + 0.917, + 0.194 + ], + "angle": 0, + "content": "Steven G Parker, James Bigler, Andreas Dietrich, Heiko Friedrich, Jared Hoberock, David Luebke, David McAllister, Morgan McGuire, Keith Morley, Austin Robison, et al. 2010. Optix: a general purpose ray tracing engine. ACM Transactions on Graphics (TOG) 29, 4 (2010), 1-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.194, + 0.917, + 0.215 + ], + "angle": 0, + "content": "S. Popov, R. Ramamoorthi, F. Durand, and G. Drettakis. 2015. Probabilistic Connections for Bidirectional Path Tracing. Computer Graphics Forum 34, 4 (07 2015), 75-86." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.215, + 0.917, + 0.244 + ], + "angle": 0, + "content": "Alexander Rath, Pascal Grittmann, Sebastian Herholz, Petr Vévoda, Philipp Slusallek, and Jaroslav Křivánek. 2020. Variance-aware path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 151-1." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.244, + 0.917, + 0.274 + ], + "angle": 0, + "content": "Lukas Ruppert, Sebastian Herholz, and Hendrik PA Lensch. 2020. Robust fitting of parallax-aware mixtures for path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 147-1." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.274, + 0.917, + 0.305 + ], + "angle": 0, + "content": "Fujia Su, Sheng Li, and Guoping Wang. 2022. SPCBPT: Subspace-Based Probabilistic Connections for Bidirectional Path Tracing. ACM Trans. Graph. 41, 4, Article 77 (jul 2022), 14 pages. https://doi.org/10.1145/3528223.3530183" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.305, + 0.917, + 0.335 + ], + "angle": 0, + "content": "Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. 2022. Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields. CVPR (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.335, + 0.917, + 0.376 + ], + "angle": 0, + "content": "Jiri Vorba, Johannes Hanika, Sebastian Herholz, Thomas Müller, Jaroslav Krivánek, and Alexander Keller. 2019. Path Guiding in Production. In ACM SIGGRAPH 2019 Courses (Los Angeles, California) (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.376, + 0.917, + 0.405 + ], + "angle": 0, + "content": "Jiri Vorba, Ondrej Karlik, Martin Sik, Tobias Ritschel, and Jaroslav Krivanek. 2014. On-line learning of parametric mixture models for light transport simulation. ACM Transactions on Graphics (TOG) 33, 4 (2014), 1-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.405, + 0.917, + 0.426 + ], + "angle": 0, + "content": "Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. 2021. PlenOctrees for Real-time Rendering of Neural Radiance Fields. In ICCV." + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.426, + 0.917, + 0.466 + ], + "angle": 0, + "content": "Shilin Zhu, Zexiang Xu, Tiancheng Sun, Alexandr Kuznetsov, Mark Meyer, Henrik Wann Jensen, Hao Su, and Ravi Ramamoorthi. 2021. Hierarchical neural reconstruction for path guiding using hybrid path and photon samples. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-16." + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.104, + 0.917, + 0.466 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.082, + 0.894, + 0.325, + 0.906 + ], + "angle": 0, + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_origin.pdf b/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..bbdfc6f70ca9ef3e61acf54feeb481e524318772 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/bc01e0c9-6b51-4b3b-9b99-9ed47940a83c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae309d6a13476dd095bf14117e33c21db87fa1000de31520fbf47a7411d88777 +size 19989329 diff --git a/data/2025/2504_04xxx/2504.04315/full.md b/data/2025/2504_04xxx/2504.04315/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ee95dcfe930204ac659838757ff7d37f38d9b225 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/full.md @@ -0,0 +1,408 @@ +# Neural Parametric Mixtures for Path Guiding + +HONGHAO DONG, Peking University, China +GUOPING WANG, Peking University, China +SHENG LI*, Peking University, China + +Previous path guiding techniques typically rely on spatial subdivision structures to approximate directional target distributions, which may cause failure to capture spatio-directional correlations and introduce parallax issue. In this paper, we present Neural Parametric Mixtures (NPM), a neural formulation to encode target distributions for path guiding algorithms. We propose to use a continuous and compact neural implicit representation for encoding parametric models while decoding them via lightweight neural networks. We then derive a gradient-based optimization strategy to directly train the parameters of NPM with noisy Monte Carlo radiance estimates. Our approach efficiently models the target distribution (incident radiance or the product integrand) for path guiding, and outperforms previous guiding methods by capturing the spatio-directional correlations more accurately. Moreover, our approach is more training efficient and is practical for parallelization on modern GPUs. + +CCS Concepts: Computing methodologies $\rightarrow$ Ray tracing; Neural networks. + +Additional Key Words and Phrases: Ray Tracing, Global Illumination, Sampling and Reconstruction, Neural Networks, Mixture Models + +# ACM Reference Format: + +Honghao Dong, Guoping Wang, and Sheng Li. 2025. Neural Parametric Mixtures for Path Guiding. 1, 1 (April 2025), 10 pages. https://doi.org/10.1145/3588432.3591533 + +# 1 INTRODUCTION + +The efficiency of path tracing relies heavily on the sampling strategy. To further improve its efficiency and robustness, path guiding algorithms leverage the knowledge gained during rendering to facilitate the process of light-path construction, thereby reducing noise. To acquire better importance sampling distribution, local path guiding techniques employ previous radiance estimates to learn an approximation of spatial incident radiance fields, which are then used to guide the construction of paths. In practice, current methods typically use some representation (e.g., Gaussian mixtures [Herholz et al. 2016; Vorba et al. 2014], quadtrees [Müller et al. 2017]) to approximate the directional distribution of incident radiance. A spatial subdivision structure (e.g., kd-tree [Dodik et al. 2022], or octree [Bus + +*Corresponding author. + +Project website: https://neuropara.github.io. + +Authors' addresses: Honghao Dong, Peking University, Beijing, China, cuteday@pku.edu.cn; Guoping Wang, Peking University, Beijing, China, wgp@pku.edu.cn; Sheng Li, Peking University, Beijing, China, lisheng@pku.edu.cn. + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +© 2025 Association for Computing Machinery. + +XXXX-XXXX/2025/4-ART $15.00 + +https://doi.org/10.1145/3588432.3591533 + +and Boubekeur 2017]) is then used to store these distributions, thus accounting for the spatial variations. + +However, several key deficiencies remain in their paradigm. Most methods learn the marginalized incident radiance distribution within each subdivided spatial region. This fails to capture the spatiodirectional correlations within the spatial discretizations, and could cause artifacts (e.g., parallax error, Fig 1(a)). Moreover, their spatial subdivision structures are subject to frequent reconstruction for finer-grained spatial resolution, which needs extra overhead and require a long training time to converge. Meanwhile, it is challenging to efficiently fit these specific directional distributions from noisy samples, especially in an online manner [Ruppert et al. 2020]. + +While an adaptive and robust spatial representation is difficult to achieve with manually designed subdivision schemes, we saw the recent success of neural implicit representation in compactly modeling spatially varying functions with fine-grained and high-frequency details [Mildenhall et al. 2020]. In this work, we exploit the great expressiveness of neural implicit representation while preserving the desirable properties of parametric mixture models (e.g. efficient importance sampling) for path guiding algorithms. We thereby present Neural Parametric Mixtures (NPM), which use a continuous and compact implicit representation to encode spati-directional target distributions, and decode them into PMMs with lightweight neural networks for fast importance sampling. We show that our NPM representation, without explicit spatial subdivision schemes, can be efficiently trained simply using gradient-based optimization techniques. Specifically, our method has advantages in the following aspects: + +First, our continuous implicit representation of spatial radiance fields naturally captures the correlations between spatial positions and directional target distributions. By smoothly interpolating and decoding the implicit representations with neural networks, our method inherently avoids the issues due to spatial discretization, thus resulting in higher performance. + +Second, our compact representation avoids the extra overhead and long training time caused by the iterative reconstruction strategies applied to the explicit spatial subdivision structures. Combined with our simple optimization based on stochastic gradient descent, our method outperforms other guiding methods even with fewer training samples. In addition, our method is practical and performant for parallelization on GPU. + +Lastly, our method can learn the product distribution (i.e., multiplied by the BSDF and the cosine term). This further reduces the noise with a modest computational overhead while not requiring the extra effort of previous solutions (e.g., fitting each BSDF with pre-computed parametric models). + +# 2 RELATED WORK + +Path Guiding. To achieve better sampling strategies, local path guiding techniques leverage previous radiance estimates (either online or during a pre-computation process) to build an approximation of the incident radiance fields, which is used to guide subsequent sampling. Early approaches used simple bases such as histograms for importance sampling, e.g. built from a photon map [Jensen 1995] or collected radiance estimates with 5-D tree structures [Lafortune and Willems 1995]. Subsequent work has developed various techniques to construct the guiding distribution, e.g., Gaussian mixtures [Vorba et al. 2014], quad-trees [Müller et al. 2017], which is often stored in spatial data structures (e.g., kd-tree and octree) to account for spatial variations of the distributions. + +Deep learning techniques have also been explored recently, achieving improvements while often with less practical performance. For example, convolutional networks could be used to reconstruct the learned noisy radiance field [Huo et al. 2020; Zhu et al. 2021]. Specifically designed neural networks could also model complex manifolds [Dinh et al. 2017], while allowing samples to be drawn directly from the learned distribution [Müller et al. 2019]. However, the prohibitive computational cost prevents its practical application [Müller et al. 2019; Vorba et al. 2019]. Instead of directly importance sampling using neural networks, we encode the target distribution into implicit neural representation, and use only lightweight MLPs to decode it into parametric mixtures for efficient sampling. We show that our method can be efficiently trained (< 10s per scene on a single GPU) while being sufficiently robust and practical. + +Parametric Mixture Models. Parametric mixture models (PMMs) are convex combinations of parametric distributions, and are often used to approximate directional distributions in graphics applications. They have many desirable properties, e.g., fast sampling, and closed-form solutions for products, convolutions and integrals. Several types of PMMs (e.g., Gaussian mixtures [Dodik et al. 2022; Vorba et al. 2014] and von Mises-Fisher mixtures [Ruppert et al. 2020]) are widely used in the recently developed path guiding algorithms. Several recent works also use PMMs to fit BSDFs with precomputation [Herholz et al. 2016; Ruppert et al. 2020], and multiply them with the learned incident radiance to achieve product sampling. + +Parametric models can also be predicted by neural networks, enabling new possibilities for e.g. lighting [Currius et al. 2020] and reconstruction [Yu et al. 2021] tasks. In this work, we use neural representations to encode parametric mixtures for efficient sampling. Our method is also naturally extensible to product sampling. + +Implicit Neural Representation. Following the success of using neural networks to represent 3D scenes implicitly [Mildenhall et al. 2020], the concept of neural representation has been popularized and applied to various tasks. They use sparse input images to optimize the spatial radiance fields via a differentiable volume rendering procedure, thus enabling novel view synthesis. Inspired by its recent successful applications [Diolatzis et al. 2022; Müller et al. 2022], we exploit a continuous and compact implicit neural representation to encode the spatio-directional target distributions for path guiding algorithms. While the ground truth target distribution (i.e., the incident radiance or product distribution) is unknown, our NPM + +representation can be optimized online using minibatch stochastic gradient descent (SGD), where the gradients for training are estimated by Monte Carlo integration using noisy radiance estimates. + +# 3 PRELIMINARY + +Monte Carlo Integration. Light transport algorithms are generally based on the rendering equation [Kajiya 1986]: + +$$ +L _ {0} (\mathbf {x}, \omega_ {0}) = L _ {\mathrm {e}} (\mathbf {x}, \omega_ {0}) + \int_ {\Omega} f _ {\mathrm {s}} (\mathbf {x}, \omega_ {0}, \omega_ {\mathrm {i}}) L _ {\mathrm {i}} (\mathbf {x}, \omega_ {\mathrm {i}}) | \cos \theta_ {\mathrm {i}} | d \omega_ {\mathrm {i}}, \tag {1} +$$ + +which defines the relationship between the outgoing radiance $L_{\mathrm{o}}$ , emitted radiance $L_{e}$ , and the integrated incident radiance $L_{\mathrm{i}}$ , at shading point $\mathbf{x}$ . Monte Carlo integration is used to obtain an estimate of the reflection integral $L_{r}$ using an average of $N$ samples. In the case where $N = 1$ : + +$$ +\left\langle L _ {\mathrm {r}} \left(\mathbf {x}, \omega_ {\mathrm {o}}\right) \right\rangle = \frac {f _ {\mathrm {s}} \left(\mathbf {x} , \omega_ {\mathrm {o}} , \omega_ {\mathrm {i}}\right) L _ {\mathrm {i}} \left(\mathbf {x} , \omega_ {\mathrm {i}}\right) \left| \cos \theta_ {\mathrm {i}} \right|}{p \left(\omega_ {\mathrm {i}} \mid \mathbf {x} , \omega_ {\mathrm {o}}\right)}, \tag {2} +$$ + +where $\langle L_{\mathrm{r}}(\mathbf{x},\omega_0)\rangle$ is an unbiased estimate of the outgoing radiance $L_{\mathrm{r}}(\mathbf{x},\omega_0)$ , and $\omega_{i}$ is the incident direction sampled with some directional probability distribution $p(\omega_{\mathrm{i}}\mid \mathbf{x},\omega_{\mathrm{o}})$ . The variance of this estimator $V[\langle L_{\mathrm{r}}\rangle ]$ can be reduced if the sampling distribution resembles the shape of the integrand, and could even reach zero variance if being proportional to it (i.e., $p\propto f_s\cdot L_i\cos \theta_i$ ). This, however, is difficult to achieve with only BSDF importance sampling, leaving the remaining part of the integrand (i.e., the incident radiance) unknown, resulting in a relatively high variance of the MC estimator. Path guiding algorithms, on the other hand, manage to obtain better importance sampling strategies often by using previous radiance samples to approximate the incident radiance $L_{\mathrm{i}}$ or the full integrand $f_{s}\cdot L_{\mathrm{i}}\cos \theta_{i}$ , which will be discussed later. + +Von Mises-Fisher Mixtures. We use the von Mises-Fisher (vMF) distribution as the basis of NPM. The vMF distribution is defined as: + +$$ +v (\omega \mid \mu , \kappa) = \frac {\kappa}{4 \pi \sinh \kappa} \exp \left(\kappa \mu^ {T} \omega\right), \tag {3} +$$ + +where $\mu \in \mathbb{S}^2$ and $\kappa \in [0, +\infty)$ defines the direction and precision (sharpness) of the vMF distribution. The vMF mixture model (VMM) is thus a convex combination of $K$ vMF components/lobes: + +$$ +\mathcal {V} (\omega \mid \Theta) = \sum_ {i = 1} ^ {K} \lambda_ {i} \cdot v \left(\omega \mid \mu_ {i}, \kappa_ {i}\right), \tag {4} +$$ + +where $\Theta$ contains the parameters $(\mu_i,\kappa_i)$ and weights $(\lambda_{i})$ of each vMF component. The vMF mixtures have many desirable properties, e.g., fewer parameters (4 floats per component), efficient importance sampling, and closed-form product and integration, which together constitute the reason for choosing it as the basis of NPM. + +Our key is to encode the vMF mixtures with our implicit neural representation, then decode them with lightweight MLPs, and train them to effectively model the target distributions for path guiding algorithms. Other parametric basis functions (e.g., Gaussian mixtures) could be integrated into our method using a similar paradigm. + +# 4 NEURAL PARAMETRIC MIXTURES + +In this section, we present our Neural Parametric Mixtures (NPM) technique for local path guiding. We first show how to encode/decode + +target distributions with NPM in a simple setup (i.e., learning incident radiance fields, Sec. 4.1), then we derive the optimization method for NPM based on minibatch stochastic gradient descent (Sec. 4.2). Finally, we show how our NPM could naturally benefit from learning the full integrand (to account for the BSDF term), as well as the other extensions for better learning target distributions (Sec. 4.3). An overview of our method is illustrated in Fig. 2. + +# 4.1 Radiance-based NPM + +In order to acquire a better importance sampling strategy, we should obtain an approximation of the incident radiance distribution using previous radiance estimates, known as the radiance-based local path guiding [Herholz et al. 2016; Rath et al. 2020]. Specifically, we want to use the vMF mixtures to be approximately proportional to the incident radiance, at a given shading position $\mathbf{x}$ : + +$$ +\mathcal {V} (\omega_ {i} \mid \Theta (\mathbf {x})) \propto L _ {\mathrm {i}} (\mathbf {x}, \omega_ {i}), \tag {5} +$$ + +where $\Theta$ is conditioned on $\mathbf{x}$ to account for the spatial variation of the target distribution. Previous work achieves this with specific spatial subdivision strategies (e.g., kd-tree, octree). However, this spatial discretization introduces artifacts (e.g., resulting from parallax, Fig. 1 (a)), and is subject to frequent reconstruction to converge to a fine grained spatial subdivision, as discussed in Sec. 1. + +Instead, we use an implicit neural representation to encode the target distribution compactly. This allows the spatial variation of the distribution to be continuously accounted for, thus better capturing spatio-directional correlations. Technically, given a shading position $\mathbf{x}$ in the scene, our NPM would output the guiding distribution that approximates the target distribution (Eq. 5). The output guiding distribution is defined using a set of parameters $\hat{\Theta}(\mathbf{x})$ : + +$$ +\mathrm {N P M} (\mathbf {x} \mid \Phi) = \hat {\Theta} (\mathbf {x}), \tag {6} +$$ + +where $\Phi$ are the trainable parameters of the implicit representation, and $\hat{\Theta}$ are the output decoded parameters, defining a vMF mixture $\mathcal{V}(\omega_i\mid \hat{\Theta} (\mathbf{x}))$ that is trained to approximate $L_{i}(\mathbf{x},\omega_{i})$ (Eq. 5). By continuously conditioning the learned distribution $\Theta$ on spatial positions $\mathbf{x}$ , our method inherently avoids the above issues caused by spatial discretizations. We achieve the above mapping by using a lightweight network to decode this parametric distribution from the implicit neural representation. To make sure that we get a valid vMF mixture (i.e., $\lambda_{i},\kappa_{i} > 0,\mu_{i}\in \mathbb{S}^{2}$ , and $\sum_{j = 1}^{K}\lambda_{j} = 1$ ), we must additionally regularize the raw network output with appropriate mapping functions (see Tab. 1). Specifically, we apply exponential activation to $\lambda_{i}$ and $\kappa_{i}$ . Logistic activation is applied to $\theta_{i}$ and $\varphi_{i}$ which form the spherical coordinates of $\mu_{i}$ . Most importantly, we apply the softmax function to all $\lambda s$ to ensure that the outputs model a valid PDF (i.e., satisfy $\sum_{i = 1}^{K}\lambda_{i} = 1$ ). + +Discussion. It is possible to implement different forms of implicit neural representation with trainable parameters $\Phi$ . While it is straightforward to use a monolithic network to model $\mathrm{NPM}_{\Phi} : \mathbf{x} \rightarrow \Theta$ , we find it difficult to fit the high-frequency variations of the target distribution. Thereby, we use a trainable multi-resolution spatial embedding for encoding the distributions, and additionally a lightweight neural network for decoding the parameters. This is + +![](images/38e332d26df913a4036d6a6bb054d9f1724ec4e8b74fdf0ec84b6036b6735101.jpg) +Fig. 1. Parallax issue caused by spatial discretizations (a). For a subdivided volume $S(\mathbf{x})$ in (a), the guiding distribution is marginalized with training samples scattered over the volume $S(\mathbf{x})$ , and is shared by different positions (e.g., $\mathbf{x}_1$ and $\mathbf{x}_2$ ). Our method will not suffer from parallax due to NPM implicitly representing a monolithic function, continuously mapping from spatial positions to parametric guiding distributions, as shown in (b). + +crucial for our method to achieve better modeling capacity while remaining performant, as will be discussed later. + +# 4.2 Optimizing NPM + +We show how to optimize the divergence between the decoded distribution $\hat{\Theta}(\mathbf{x})$ and the target distribution using minibatch stochastic gradient descent. To achieve this, the gradients of a training objective (or loss function) with respect to the network parameters are necessary. However, it is non-trivial to define such a loss function, given the ground truth output parameters $\Theta_{\mathrm{gt}}(\mathbf{x})$ are unknown. Previous works typically use design optimization algorithms (e.g., expectation-maximization) that iteratively use batches of samples to fit a given set of parameters $\Theta$ , which often parameterize a marginalized distribution shared by the spatial region covering the samples [Herholz et al. 2016; Ruppert et al. 2020]. However, their methods are applied to explicitly parameterized models, and are therefore not applicable to our method, which models the implicit representation of the function $\mathbf{NPM}_{\Phi}: \mathbf{x} \rightarrow \hat{\Theta}$ . + +We minimize the KL divergence between the decoded vMF mixtures and the target distribution via minibatch stochastic gradient descent, where its gradients with respect to the trainable parameters are estimated using Monte Carlo integration. Other divergence metrics are also available following a similar derivation. Let us start by assuming that the shading position $\mathbf{x}$ is fixed, thus omitting the dependency of $\Theta$ on $\mathbf{x}$ in the equations. For a given position, the KL divergence between the target distribution $\mathcal{D}$ and our output + +Table 1. Detailed mapping functions we use to regularize network outputs, where $\lambda^{\prime}$ , $\kappa^{\prime}$ , $\theta^{\prime}$ , $\varphi^{\prime}$ denote the raw outputs, and $(\theta, \varphi)$ is the normalized spherical coordinate of $\mu \in \mathbb{S}^2$ . Left: parameter notations and their valid ranges; middle: type of activation; right: specific mappings. + +
ParameterActivationMapping
κ ∈ [0,+∞)Exponentialκi = exp(κi')
λ ∈ [0,+∞)Softmaxλi = exp(λi') / ∑j=1K exp(λj')
θ, φ ∈ [0,1]Logisticθi = 1/(1 + exp(-θi'))
+ +![](images/012d7d72f38c99a2d6e3ef901578141684e879c1eaa6c4a2513ae3c7899fdb0e.jpg) +Fig. 2. High-level illustration of our Neural Parametric Mixtures (NPM). We implicitly encode the spatially varying target distributions with the multi-resolution embedding. When the distribution of a spatial location $\mathbf{x}$ is queried, (1) the features assigned to the nearby grid points surrounding $\mathbf{x}$ are interpolated at each level, and concatenated with other levels to obtain the spatial embedding $G(\mathbf{x})$ . (2) the spatial embedding is then combined with other inputs to (3) feed into the lightweight MLP for (4) decoding the parameters $\Theta$ of the vMF mixture $\mathcal{V}(\omega_i \mid \Theta)$ with $K$ components. We then (5) use this parametric distribution for importance sampling the scattering direction. The result MC radiance estimate $\langle L_i(\mathbf{x}, \omega_i) \rangle$ is used to estimate the training gradient $\nabla_{\Theta} D_{\mathrm{KL}}$ (Sec. 4.2), which is then back-propagated through these differentiable stages to optimize our NPM representation (dashed lines). + +distribution $\mathcal{V}$ is defined as: + +$$ +D _ {\mathrm {K L}} (\mathcal {D} \| \mathcal {V}; \Theta) = \int_ {\Omega} \mathcal {D} (\omega) \log \frac {\mathcal {D} (\omega)}{\mathcal {V} (\omega | \hat {\Theta})} \mathrm {d} \omega , \tag {7} +$$ + +where $\mathcal{D} \propto L_{\mathrm{i}}$ in radiance-based path guiding. This integral could now be estimated with the Monte Carlo estimator with $N$ samples: + +$$ +D _ {\mathrm {K L}} (\mathcal {D} \| \mathcal {V}; \Theta) \approx \frac {1}{N} \sum_ {j = 1} ^ {N} \frac {\mathcal {D} (\omega_ {j})}{\tilde {p} (\omega_ {j} \mid \hat {\Theta})} \log \frac {\mathcal {D} (\omega_ {j})}{\mathcal {V} (\omega_ {j} \mid \hat {\Theta})}, \tag {8} +$$ + +where $\tilde{p}$ is the distribution from which the samples are drawn, which in our case is a combination of the BSDF importance sampling and guiding distribution. By taking its derivative with respect to $\Theta$ , we obtain the MC estimate of the gradient $\nabla_{\Theta}D_{\mathrm{KL}}(\mathcal{D}\| \mathcal{V};\Theta)$ : + +$$ +\nabla_ {\Theta} D _ {\mathrm {K L}} (\mathcal {D} \| \mathcal {V}; \Theta) \approx - \frac {1}{N} \sum_ {j = 1} ^ {N} \frac {\mathcal {D} \left(\omega_ {j}\right) \nabla_ {\Theta} \mathcal {V} \left(\omega_ {j} \mid \hat {\Theta}\right)}{\tilde {p} \left(\omega_ {j} \mid \hat {\Theta}\right) \mathcal {V} \left(\omega_ {j} \mid \hat {\Theta}\right)}, \tag {9} +$$ + +where the derivatives of the vMF mixtures $\mathcal{V}$ with respect to their parameters $\Theta$ are straightforward. The gradients for the trainable NPM parameters $\Phi$ could then be obtained via back propagation. Since we use the unbiased MC estimate of the training gradients, the parameters are guaranteed to converge to a local minimum. + +In practice, our training sample pairs $(\mathbf{x},\omega_{i})\rightarrow L_{\mathrm{i}}$ are distributed in different spatial positions $\mathbf{x}$ , efficiently learning a spatially varying target distribution $\mathcal{D}(\mathbf{x})$ . This results in the training objective accounting for the divergence of multiple positions. The expected solution for $\Phi$ is thus: + +$$ +\Phi^ {*} = \underset {\Phi} {\arg \min } \mathbb {E} _ {\mathbf {x}} \left[ D _ {\mathrm {K L}} \left(\mathcal {D} (\mathbf {x}) \| \mathcal {V}; \Theta (\mathbf {x})\right) \right]. \tag {10} +$$ + +For our implicit spatial embedding (i.e., grids of latent features, discussed later), this results in the embedding being optimized with all (and only) its nearby samples. When using the gradient descent method, the samples with the largest gradients (i.e., the most important ones for reducing divergence) would dominate, forming a reasonable design choice for better adaptivity. + +# 4.3 Full Integrand Learning + +Using path guiding to sample the full integrand $f_{s} \cdot L_{i} \cos \theta_{i}$ can achieve even better performance, which should incorporate the BSDF term and the cosine term into the target distribution. This is challenging since the guiding distribution is now conditioned on 5D inputs (i.e., outgoing direction $\omega_{0}$ and spatial coordinate $\mathbf{x}$ ). Previous works fit BSDFs with precomputed parametric models and multiply them with the learned incident radiance distribution to achieve product sampling. However, this often relies on scene-dependent precomputation, discretization over $\omega_{0}$ , and extra computational overhead [Herholz et al. 2016; Ruppert et al. 2020]. + +Our neural design can naturally handle the conditions with the extra input of $\omega_{i}$ . This is essential since a neural network could approximate arbitrary conditional models if being expressive enough. We later show this improves performance through learning a better guiding distribution, with only modest performance overhead. For clarity, we denote the previous radiance-based method as NPM-radiance, and this version as NPM-product. + +Specifically, by supplementing input $\omega_{0}$ , we reformulate the learned distribution (Eq. 6) with the outgoing directions. This enables learning the full integrated as: + +$$ +\mathrm {N P M} _ {\text {p r o d u c t}} (\mathbf {x}, \omega_ {\mathrm {o}} \mid \Phi) = \hat {\Theta} (\mathbf {x}, \omega_ {\mathrm {o}}), \tag {11} +$$ + +where $\hat{\Theta}$ now parameterizes the vMF mixture $\mathcal{V}$ that is trained to approximate the full integrand in Eq. 1, i.e., + +$$ +\left. \mathcal {V} \left(\omega_ {i} \mid \hat {\Theta} (\mathbf {x}, \omega_ {0})\right) \propto f _ {\mathrm {s}} \left(\mathbf {x}, \omega_ {0}, \omega_ {\mathrm {i}}\right) L _ {\mathrm {i}} \left(\mathbf {x}, \omega_ {\mathrm {i}}\right) \left| \cos \theta_ {\mathrm {i}} \right|, \right. \tag {12} +$$ + +where the cosine term could be approximated with a constant vMF lobe [Ruppert et al. 2020], leaving NPM to focus on the remaining part of the integral. Nonetheless, it is still challenging for neural networks to model a 2D directional distribution conditioned on 5D spatio-directional inputs. We further use the following simple extensions to help the network learn these spatially varying distributions: + +Auxiliary Feature Inputs. Following the practices in prior work [Hadadan et al. 2021; Müller et al. 2021], we additionally input the surface normal and roughness as auxiliary features to help + +the network better correlate the target distribution with e.g., local shading frame (normal) and spatially varying BSDFs (roughness). Experimentally, we find this helps the network to better capture the spatio-directional correlations, while with a small computational overhead due to additional memory traffic. + +Input Encoding. It is challenging for a neural network to model the non-linearity between multidimensional inputs and outputs, especially when our outputs are distributions with high-frequency spatial variations. Therefore, we replace the spatial input $\mathbf{x}$ with our trainable multi-resolution spatial embedding (discussed in Sec. 5.1). For the other inputs (e.g., outgoing direction $\omega_{0}$ and surface normals $\mathbf{n}(\mathbf{x})$ ), we encode them using the spherical harmonics basis, which is previously established in NeRF [Verbin et al. 2022]. + +# 5 IMPLEMENTATION + +In this section, we provide the technical details that are crucial to the performance and practicality of our NPM implementation. + +# 5.1 Multi-resolution Spatial Embedding + +Our implicit NPM representation learns a continuous mapping $\mathrm{NPM}_{\Phi}:\mathbf{x}\rightarrow \hat{\Theta}$ (with the additional input $\omega_{0}\in \mathbb{S}^{2}$ in the extended version), where $\Theta \in \mathbb{R}^{4\times K}$ defines the learned target distribution. While a straightforward solution would be using a multi-layer perceptron (MLP) as the universal function approximator to model $\mathrm{NPM}_{\Phi}$ , we experimentally found it difficult to capture the high-frequency spatial variations of the target distributions. + +Therefore, we use a learnable spatial embedding to implicitly encode the learned parametric mixtures. Similar approaches are found successful in recent NeRF-like applications [Müller et al. 2022; Munkberg et al. 2022]. Specifically, we define $L$ 3D uniform grids $G_{l}$ , each covering the entire scene with a spatial resolution of $D_l^3$ , where $G_{l}$ denotes the $l$ -th embedding grid. $D_{l}$ grows exponentially, resulting in multiple resolutions of the embedding. We then assign a learnable embedding (a latent feature vector $\boldsymbol{v} \in \mathbb{R}^{F}$ ) to each lattice point of $G_{l}$ . To query the spatial embedding for $\mathbf{x}$ , we bilinearly interpolate the features nearby $\mathbf{x}$ for each resolution, and concatenate them to obtain the final embedding $G(\mathbf{x})$ . More formally: + +$$ +G (\mathbf {x} \mid \Phi_ {\mathrm {E}}) = \underset {l = 1} {\overset {L} {\oplus}} \operatorname {b i l i n e a r} \left(\mathbf {x}, V _ {l} [ \mathbf {x} ]\right), G: \mathbb {R} ^ {3} \rightarrow \mathbb {R} ^ {L \times F}, \tag {13} +$$ + +where $V_{l}[\mathbf{x}]$ is the set of features at the eight corners of the cell enclosing $\mathbf{x}$ within $G_{l}$ . The spatial embedding $G(\mathbf{x})$ is then concatenated with other inputs (e.g., $\omega_0$ and auxiliary features) to the MLP for decoding the parameters $\Theta$ . We thus formulate the desired mapping (taking Eq. 6 for example) as a two-step procedure: + +$$ +\mathbf {M L P} \left(G (\mathbf {x} \mid \Phi_ {\mathrm {E}}) \mid \Phi_ {\mathrm {M}}\right) = \hat {\Theta} (\mathbf {x}), \tag {14} +$$ + +where the parameters of the spatial embedding $(\Phi_{\mathrm{E}})$ and the MLP $(\Phi_{\mathrm{M}})$ together constitute the trainable parameters $\Phi$ of our implicit representation for NPM. Intuitively, a spatial embedding implicitly encodes the target distribution within a specific spatial region, while the multi-resolution design efficiently accounts for different levels of detail (LOD). By smoothly interpolating between the spatial embedding around positions and decoding them using neural networks, + +we naturally account for the spatial variations of the target distribution. This also lessens the burden of using a single monolithic MLP as the implicit representation, leaving it mainly focusing on decoding it into parametric models $\Theta$ . This significantly accelerates training/inference with a larger memory footprint. + +# 5.2 Online Training Scheme + +**Renderer Integration.** We implement our method on a custom GPU-accelerated renderer based on OptiX [Parker et al. 2010], where the training and inference procedures are integrated into a wavefront-style path tracer [Laine et al. 2013]. This design choice allows ray casting, importance sampling, and BSDF evaluation to be performed in coherent chunks over large sets of traced paths by splitting the traditional megakernel path tracer into multiple specialized kernels. This improves GPU thread utilization by reducing the control flow divergence. Most importantly, this allows us to efficiently sample and evaluate the guiding distributions at each vertex along the path in parallel, thus significantly accelerating network training/inference. + +Specifically, we place the training/inference samples into queues, where the structure-of-arrays (SoA) memory layout is applied to improve memory locality. At each ray intersection of the chunk of traced paths, the queries for guiding distributions within the queue are processed via batched network inference. The sampling and evaluation procedures are then performed, also using specialized kernels, before entering the next ray-cast kernel. This provides our method with maximum parallelism through large-batch training and inference, minimizing the latency caused by waiting network queries, while avoiding inefficient single-sample inference. + +Training Scheme. We use the same configuration to train each scene online during rendering, without any scene-specific fine-tuning or pre-computation. During training, we collect MC radiance estimates along each traced path, and split them into mini-batches for training. The optimization step is performed for each spp, which allows drawing samples to be drawn from the latest guiding distribution. The distribution of the samples (for both rendering and training) is thus gets refined as training proceeds. We stop the training process after a fixed fraction of the total rendering budget (either time or sample count). While we always set this to $25\%$ in our experiments, we find our NPM technique converges quickly during training, generally reaching a local minimum after about 150spp, which amounts to about 1000 training steps/batches and 15s (including the runtimes of both training and rendering) on GPU. + +# 5.3 Guiding Network + +We implement our network on the tiny-cuda-nn framework [Müller 2021] and integrate it into our renderer. The MLP we used (for both NPM-radiance and NPM-product) contains 3 linear layers of width 64. Each layer with ReLU activation, except for the last layer with our custom mapping functions (Tab. 1). We let the network output $K = 8$ vMF components, i.e., $\Theta \in \mathbb{R}^{8 \times 4}$ . For the multi-resolution spatial embedding, we use $L = 8$ grids with increasing resolutions for each level. The coarsest level has a resolution of $D_{1} = 8$ while the finest level has $D_{8} = 86$ . The feature of each level contains $F = 4$ floats, resulting in the final spatial embedding $G(\mathbf{x}) \in \mathbb{R}^{8 \times 4}$ . In practice, + +![](images/7791fafefea912ff395714d5a429e4ef823d518892e47681632a7d70725cece0.jpg) +Fig. 3. Equal-sample-count (750spp) comparisons for two scenes. We show the error (for both the zoom-in areas and whole images) and time cost of different methods. The yellow plots (as well as the other figures) refer to the results obtained by unidirectional path tracing. + +we find that the performance of the network could be improved by enlarging the capacity of the MLP or the spatial embedding, leaving this a trade-off between quality and speed. + +For training, we use a fixed learning rate of 0.005 that is large enough to acquire a fast convergence speed. Adaptive momentum techniques like Adam [Kingma and Ba 2015] are used for more robust training and better convergence. For importance sampling the decoded mixtures, we use the numerically stable strategy for vMF [Jakob 2012]. When inference, we also apply exponential moving average (EMA) to the weights of previous training steps, which better reduces the noise of the MC estimated gradients (Eq. 9). + +# 6 RESULTS AND DISCUSSION + +We run all the experiments on an Intel Core i9-11900 CPU and an NVIDIA RTX3070 GPU. Following the similar practices of previous works [Müller 2019; Rath et al. 2020], we disable NEE and Russian roulette for all methods and set the maximum path length to 10. All methods are implemented upon a GPU path tracing renderer. + +We render all images at the resolution of $1280 \times 720$ , and evaluate image quality using mean relative squared error (relMSE). All the images, additional metrics (MAPE and MRSE), and the false-color maps can be interactively inspected with our supplementary viewer. + +# 6.1 Comparisons + +Our method is compared against improved PPG [Müller 2019] (an enhanced version of Practical Path Guiding [Müller et al. 2017]), and Variance-aware Path Guiding [Rath et al. 2020]. For the experimental configuration of the compared methods, we use the same as [Rath et al. 2020], except for fixing the BSDF selection probability to $50\%$ (for both ours and the compared methods). Both compared methods used an iteratively reconstructed subdivision structure (i.e., the spatio-directional trees) to account for spatial variations. A total of 10 different scenes were tested. + +We first show equal-spp comparisons on two representative scenes. The VEACH Door scene features strong indirect illumination that is difficult to handle with BSDF importance sampling, while the BATHROOM scene contains many specular and glossy surfaces. As shown in Fig. 3, our proposed method outperforms the other two methods even when only learning incident radiance $L_{\mathrm{i}}$ (NPM-radiance). The noise is alleviated further with our full integrand learning method (NPM-product), since both of the scenes contain glossy surfaces, where the contribution of samples is strongly influenced by the BSDF term. We also note that our method quickly becomes effective at the very beginning of the training process (see the convergence plots in Fig. 3). This indicates a better training efficiency over classical guiding methods, which will be discussed later. Additional results on more test scenes are shown in Fig. 4 and Tab. 2, as well as the convergence plots in Fig. 5. + +We then show the results of equal-time comparisons between our method and [Rath et al. 2020] in Fig. 6. Since they do not explicitly learn the product sampling distribution (i.e., conditioned on 5D inputs $\omega_0$ and $\mathbf{x}$ ), we only use our radiance-based method (NPM-radiance) for fair comparisons. Instead of simply learning the incident radiance distribution $(L_{\mathrm{i}})$ , they use an improved target distribution to account for the variance and BSDF (marginalized over $\omega_0$ ). Our method, on the other hand, achieves better performance by learning $L_{\mathrm{i}}$ only. We attribute this superiority of our method to both the better capacity of capturing spatio-directional correlation and more parallelism. + +# 6.2 Evaluation + +Trainable Spatial Embedding. We analyze the performance of different forms of spatial input encoding in terms of convergence and quality (Fig. 8). The spatial embedding (i.e. parametric encoding) uses trainable latent vector grids to model the spatially-varying target distributions, leaving the MLP to focus on decoding this implicit representation into valid vMF mixtures. The other two variants + +Table 2. Practical Path Guiding (PPG) [Müller 2019], Variance-aware Path Guiding [Rath et al. 2020], unidirectional path tracing and our method on 10 test scenes. We report relMSE, render time, and speedup using PPG as the baseline. Our NPM technique consistently reduces the error in the test scenes. + +
PT (BSDF)[Müller 2019][Rath et al. 2020]Ours
PPG (baseline)Variance. PGNPM (radiance)NPM (product)
BATHROOM0.090548s0.05301.0 ×106s0.04851.09 ×107s0.02512.11 ×101s0.02032.61 ×108s
BEDROOM0.038340s0.02011.0 ×105s0.01611.26 ×109s0.01501.35 ×84s0.01461.38 ×90s
BREAKFAST ROOM0.009448s0.00691.0 ×100s0.00471.46 ×103s0.00381.80 ×63s0.00351.96 ×71s
LIVING ROOM0.027332s0.01841.0 ×74s0.01461.26 ×80s0.01571.17 ×47s0.01321.39 ×54s
PINK ROOM0.004637s0.00821.0 ×74s0.00611.34 ×76s0.00332.42 ×53s0.00263.21 ×62s
SALLE DE BAIN0.081938s0.02231.0 ×116s0.03460.64 ×116s0.01961.14 ×79s0.01401.59 ×86s
STAIRCASE0.181234s0.02981.0 ×80s0.02611.14 ×86s0.01941.54 ×72s0.01721.74 ×76s
VEACH DOOR0.620833s0.21671.0 ×82s0.19451.11 ×91s0.07502.89 ×65s0.04614.69 ×77s
VEACH EGG8.291833s0.83791.0 ×82s0.78701.07 ×85s0.59841.40 ×62s0.53521.56 ×69s
WHITE ROOM0.030138s0.02781.0 ×107s0.02531.10 ×103s0.01242.25 ×76s0.01002.75 ×87s
+ +do not explicitly separate these two tasks by using a monolithic MLP. The addition of spatial embedding significantly improves convergence, and the multi-resolution design further reduces error by better modeling finer-grained spatio-directional correlations. Furthermore, this does not introduce noticeable computational overhead, as only a small fraction of parameters are involved in each training/inference. + +Training Efficiency. The effectiveness of guiding methods under small training budgets is important, especially for applications such as preview rendering or even interactive rendering. We analyze the training efficiency of different guiding methods by comparing their performance under different training budgets (31 spp, 63 spp, 127 spp, respectively) in Fig. 7. Our method quickly converges to a good sampling distribution with only a few training samples and less training time cost (e.g., 31 spp with about 3s), thus outperforming previous guiding methods even with much fewer training samples. + +# 6.3 Discussion + +Path Guiding Extensions. Our method can be extended with many well-established extensions suggested by previous path guiding algorithms. They are straightforward to be integrated and are promising to further improve our performance. For example: (1) the BSDF selection probability could also be learned by our network or by some other caching strategies [Müller et al. 2020], thus better handling the near-specular surfaces; and (2) the improved variance-aware target distribution [Rath et al. 2020] could be learned to account for the variance within the noisy MC estimates. + +Performance Analysis. Our method serves effective means for path guiding while remaining performance practical. Specifically, the measured time cost per NPM evaluation (including both network inference and importance sampling the decoded mixture models) at $1280 \times 720$ is about 3ms. Meanwhile, a training step (i.e., a batch of $2^{18}$ samples) costs about 10ms, indicating that a typical training process (about 1000 training steps) takes about 10s to converge on a single GPU. NPM contains a total of about 2M learnable parameters, resulting in a memory consumption of $< 10\mathrm{MB}$ . The compact design of our implicit NPM representation results in less control + +flow divergence, better memory locality, and better caching performance. Together, this makes our method practical for modern GPU parallelization, which is often harder to achieve with the tree-like spatial subdivision schemes used by most of the previous guiding methods. + +Alternative Solutions. Several studies also aim to tackle the parallel issue. Dodik et al. [2022] use spatio-directional mixtures (i.e., conditioned on $\mathbf{x}$ and $\omega_0$ ) to correlate target distributions with spatial positions. Ruppert et al. [2020] design strategies to warp the guiding distributions in the spatial subdivisions to resemble the true distribution. However, these methods adopt sophisticated strategies that are difficult to parallelize efficiently on GPUs (e.g., batched expectation-maximization (EM) applied to a varying number of mixtures) while requiring extra efforts to fit scene BSDFs for product sampling. In contrast, our method exploits trainable spatial embedding to encode the target distributions while using a decoder MLP to model the non-linearity between spatial features and PMMs in a GPU-friendly manner. Nevertheless, incorporating ideas from these studies, such as adaptively controlling the granularity of learned distributions, may further enhance our method. + +# 7 CONCLUSION, LIMITATIONS AND FUTURE WORK + +We present Neural Parametric Mixtures, a novel method for learning the target distributions for path guiding techniques. We use a compact implicit neural representation to encode the spatio-directional parametric distributions. Compared to previous non-neural methods that use explicit spatial subdivision structures to store directional distributions, our continuous implicit representation is simpler and more efficient while naturally avoiding the artifacts (e.g., parallax) caused by their discretized subdivision schemes. Our NPM technique could be efficiently trained with stochastic gradient descent to minimize the divergence from the target distribution. + +Despite the simplicity and effectiveness of our method, the main limitation resides in the lack of flexibility of our directional distribution representation, i.e., a fixed number of vMF components. While a similar issue exists in classical methods using PMMs [Dodik et al. 2022; Herholz et al. 2016], recent methods achieve more accurate + +![](images/4de0ae7607601ea031f8b26980ba8e4d0c7d3cc17f48b8484fc9982a6dbb12a4.jpg) +Fig. 4. Visual comparisons using the same experimental setup with Fig. 3, all are rendered with 750spp at $1280 \times 720$ . We use the online training setup for all the guiding methods, i.e., all the samples are included in the final rendering. Our method exhibits better performance than other guiding methods in most scenes by only learning the incident radiance term while further reducing the error by incorporating the BSDF term (i.e., product sampling). More results on other test scenes, additional error metrics and false-color visualizations are provided in our supplementary interactive viewer. + +![](images/181fb606997372bc36fe02a725b3aeead1f4cfe80019ae1ba38494763dbe1a09.jpg) +VEACH DOOR + +![](images/e6fd5634de776096702072077954764b1f851b04c67b47deb142edfa71811598.jpg) +LIVING ROOM + +![](images/0dab85787ebf57ece3d409ae13042efbe88173d0445016fac2fde3b91dfca16d.jpg) + +![](images/9eca55ab6a658160878a3506294517e5cce89b945cfdf85fec532d3cee6b242a.jpg) + +![](images/9deff805ba49e53de948b70b54dc5f1ad8b4f9c77c5599195f5046879dc11f7a.jpg) + +![](images/08c2c9d51bb87f869a84fb7ff37be2f1cb560bc73017e4d1f4e3a4637eecad26.jpg) +VEACH EGG + +![](images/14fec7d9f02364f9e1b3cc469dbbe763bb1b753ae5ada4e828afb851b65ca47c.jpg) +SALLE DE BAIN + +![](images/459f119a1e6f73caa5bd490548bb83413ea566cd62800bcd3a50b97f94ad6164.jpg) +BATHROOM +BREAKFAST ROOM + +![](images/374aa6caa361e40fa1e2fa0c59419a8d5806a2cf71fb18ce126bd34ae3352425.jpg) +BEDROOM +WHITE ROOM + +![](images/ac207f2b0dc08e9ca00b1f7fd2da77c0830570cdc5793049f393be18285a629d.jpg) +STAIRCASE +PINK ROOM + +![](images/34654e72da7e2e9ef43f854cd70959d1671f0b7ceb629f08d155c24032847c36.jpg) +Fig. 5. Convergence plots correspond to Fig. 3 and Fig. 4. Unidirectional path tracing with BSDF importance sampling (PT-BSDF), Practical Path Guiding [Muller 2019], Variance-aware Path Guiding [Rath et al. 2020] and our method with different target distributions (NPM-radiance and NPM-product). Our methods consistently outperform these classical guiding methods, and quickly become effective even with a few training samples and short training time (e.g., 30spp, amounting to about 3 seconds on GPU), indicating practicality for preview or even interactive rendering. We attribute this success to the compact implicit representation and better spatial resolution of our method. The image results and detailed statistics could be inspected in the supplemental materials. + +directional distributions by adaptively merging and splitting the vMF components [Ruppert et al. 2020]. This, however, is non-trivial to apply to our NPM technique. + +In future work, we will investigate more accurate approaches to implicitly encode parametric distributions while remaining efficient. Finding better basis functions or adaptively controlling the number of output components are two possible but challenging directions. Meanwhile, we would like to improve the efficiency of our method by using either novel architectural designs for neural networks, optimized implementation, or adapting previous extensions to path guiding algorithms. We believe these are important steps to make our method more practical for interactive or even real-time rendering pipelines, as well as other related applications that require + +fitting distributions with high-frequency spatial variations. In addition, applying our method to bidirectional path tracing [Popov et al. 2015], especially subspace probabilistic connections [Su et al. 2022], will also be an interesting future avenue. + +# ACKNOWLEDGMENTS + +This project was supported by the National Key R&D Program of China (No.2022YFB3303400) and NSFC of China (No. 62172013). We also thank the test scenes providers: Mareck (BATHROOM), Slyk-Drako (BEDROOM), Wig42 (BREAKFAST ROOM, LIVING ROOM, PINK ROOM, STAIRCASE), nacinus (SALLE DE BAIN), Jaakko Lehtinen (VEACH DOOR), Jay-Artist (WHITE ROOM), as well as the efforts for converting scene formats by Benedikt Bitterli [2016]. + +![](images/a9e3e0bca254a0b92485bb2f58af12c621eb6de5a42b0e3fcc3070347e0f8bf4.jpg) +SALLE DE BAIN + +![](images/888d6d0406b223320ab0974c9477a53a96ee8d8238c7d8fd9369753542c3a598.jpg) +Rath et al. + +![](images/7dae49ea469d37eb159d984887468aa069cec61a542793b609cae33e2b5f8746.jpg) +0.05407 + +![](images/721cfcdc5d3a0ac27c38adce4f26b3428fb63c7de4fc576c2cfea3db865be04d.jpg) +NPM (rad.) + +![](images/e21985f77ddb52baeef3fabd66b4f32fc6341b6a7d5b8db27786557bde91e4dc.jpg) +0.04926 + +![](images/5101dc19c079bc740e073f41696f91e4b45adb66d30b0904a6def2725004c356.jpg) +Reference + +![](images/909a175a2fcc3697baabec10f44413f5aa481dea166ea26fc95636d89070df0c.jpg) +relMSE + +![](images/3f1102130cd97c4c939dd9fa09ed8f97c36f686554ea57ef2748057833451595.jpg) +BEDROOM +Fig. 6. Equal-time comparisons (80s) on two test scenes between NPM(radiance) and Variance-aware Path Guiding [Rath et al. 2020]. + +![](images/eb53553187bc0b187fd83239885ae0a52d57bdcadfd1a255b9daabdc747058c5.jpg) + +![](images/52f1ca1d2c54273a68d3507fd7757099974cc8b5df5f4e725bd2131d4425e717.jpg) +0.02176 + +![](images/d38b0cf0ea9559e51984689d857719730f7ec7c4438a69b2938c81644d2b5f63.jpg) + +![](images/f62aaec39bdfee8ed9c26f61ed59b7134f4b5214059c55a13757e21a964bad39.jpg) +0.01324 + +![](images/b37e2d6558a7080380e4c1b42a42cc24b1dc2668cf0b1cc5ffb8a3a703e156f5.jpg) + +![](images/261222e88bdfcbe95254dde56f97fce4ee0aab79da762475cb7d753ac46716c4.jpg) +relMSE + +![](images/7c235d11fcbe63ea0cd6d21d4a1760468d38923fc545f357e03dd55a228e84b9.jpg) +Fig. 7. We train each guiding method with small training budgets (31 spp, 63 spp, 127 spp, respectively) and render the scene with 500 spp. Our method outperforms previous methods even with much fewer training samples. + +![](images/c72949cfaa907720f948811ab1f1c468fcd58c869731f256eaa07f814824b055.jpg) +Fig. 8. Equal-time comparison (50s) of different input encoding. We report the sample count and error (relMSE) of each method. The dashed line in the plot marks the end of the training phase. The multi-resolution spatial embedding outperforms other methods while remaining training-efficient. Yellow plot refers to path tracing with BSDF importance sampling. + +![](images/877c0d7fc45351dbafd802d190c15c900b000d30915cae9165790ac96d5238e7.jpg) + +![](images/f4055e7418c0dce471dcaa70b30e4d70e2af9a7c639208ff5065e16660bec2b8.jpg) + +# REFERENCES + +Benedikt Bitterli. 2016. Rendering resources. https://benedikt-bitterli.me/resources/. Norbert Bus and Tamy Boubekeur. 2017. Double Hierarchies for Directional Importance Sampling in Monte Carlo Rendering. Journal of Computer Graphics Techniques (JCGT) 6, 3 (28 August 2017), 25-37. http://jcgt.org/published/0006/03/02 +R. R. Currius, D. Dolonius, U. Assarsson, and E. Sintorn. 2020. Spherical Gaussian Light-field Textures for Fast Precomputed Global Illumination. Computer Graphics Forum 39, 2 (2020), 133-146. +Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. 2017. Density estimation using Real NVP. In International Conference on Learning Representations. +Stavros Diolatzis, Julien Philip, and George Drettakis. 2022. Active Exploration for Neural Global Illumination of Variable Scenes. ACM Transactions on Graphics (2022). +Ana Dodik, Marios Papas, Cengiz Öztireli, and Thomas Müller. 2022. Path Guiding Using Spatio-Directional Mixture Models. In Computer Graphics Forum, Vol. 41. Wiley Online Library, 172-189. +Saeed Hadadan, Shuhong Chen, and Matthias Zwicker. 2021. Neural radiosity. ACM Transactions on Graphics (TOG) 40, 6 (2021), 1-11. +Sebastian Herholz, Oskar Elek, Jiří Vorba, Hendrik Lensch, and Jaroslav Krivánek. 2016. Product importance sampling for light transport path guiding. In Computer Graphics Forum, Vol. 35. Wiley Online Library, 67-77. +Yuchi Huo, Rui Wang, Ruzahng Zheng, Hualin Xu, Hujun Bao, and Sung-Eui Yoon. 2020. Adaptive incident radiance field sampling and reconstruction using deep reinforcement learning. ACM Transactions on Graphics (TOG) 39, 1 (2020), 1-17. +Wenzel Jakob. 2012. Numerically stable sampling of the von Mises-Fisher distribution on $S^{\wedge}2$ (and other tricks). Interactive Geometry Lab, ETH Zürich, Tech. Rep (2012), 6. +Henrik Wann Jensen. 1995. Importance driven path tracing using the photon map. In Eurographics Workshop on Rendering Techniques. Springer, 326-335. +James T. Kajiya. 1986. The Rendering Equation. SIGGRAPH Comput. Graph. (1986). +Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. *ICLR* (2015). +Eric P Lafortune and Yves D Willems. 1995. A 5D tree to reduce the variance of Monte Carlo ray tracing. In Eurographics Workshop on Rendering Techniques. Springer, 11-20. +Samuli Laine, Tero Karras, and Timo Aila. 2013. Megakernels considered harmful: Wavefront path tracing on GPUs. In Proceedings of the 5th High-Performance Graphics Conference, 137-143. +Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. 2020. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. in ECCV. +Thomas Müller. 2019. "Practical Path Guiding" in Production. In ACM SIGGRAPH 2019 Courses (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages. +Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. 2022. Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM Trans. Graph. 41, 4, Article 102 (July 2022), 15 pages. +Thomas Müller, Markus Gross, and Jan Novák. 2017. Practical path guiding for efficient light-transport simulation. In Computer Graphics Forum, Vol. 36. Wiley Online Library, 91-100. +Thomas Müller, Brian McWilliams, Fabrice Rousselle, Markus Gross, and Jan Novák. 2019. Neural importance sampling. ACM Transactions on Graphics (TOG) 38, 5 (2019), 1-19. +Thomas Müller, Fabrice Rousselle, Alexander Keller, and Jan Novák. 2020. Neural control variates. ACM Transactions on Graphics (TOG) 39, 6 (2020), 1-19. +Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. 2021. Real-Time Neural Radiance Caching for Path Tracing. ACM Trans. Graph. 40, 4, Article 36 (jul + +2021), 16 pages. +Jacob Munkberg, Jon Hasselgren, Tianchang Shen, Jun Gao, Wenzheng Chen, Alex Evans, Thomas Mueller, and Sanja Fidler. 2022. Extracting Triangular 3D Models, Materials, and Lighting From Images. CVPR (2022). +Thomas Muller. 2021. tiny-cuda-nn. https://github.com/NVlabs/tiny-cuda-nn +Steven G Parker, James Bigler, Andreas Dietrich, Heiko Friedrich, Jared Hoberock, David Luebke, David McAllister, Morgan McGuire, Keith Morley, Austin Robison, et al. 2010. Optix: a general purpose ray tracing engine. ACM Transactions on Graphics (TOG) 29, 4 (2010), 1-13. +S. Popov, R. Ramamoorthi, F. Durand, and G. Drettakis. 2015. Probabilistic Connections for Bidirectional Path Tracing. Computer Graphics Forum 34, 4 (07 2015), 75-86. +Alexander Rath, Pascal Grittmann, Sebastian Herholz, Petr Vévoda, Philipp Slusallek, and Jaroslav Křivánek. 2020. Variance-aware path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 151-1. +Lukas Ruppert, Sebastian Herholz, and Hendrik PA Lensch. 2020. Robust fitting of parallax-aware mixtures for path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 147-1. +Fujia Su, Sheng Li, and Guoping Wang. 2022. SPCBPT: Subspace-Based Probabilistic Connections for Bidirectional Path Tracing. ACM Trans. Graph. 41, 4, Article 77 (jul 2022), 14 pages. https://doi.org/10.1145/3528223.3530183 +Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. 2022. Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields. CVPR (2022). +Jiri Vorba, Johannes Hanika, Sebastian Herholz, Thomas Müller, Jaroslav Krivánek, and Alexander Keller. 2019. Path Guiding in Production. In ACM SIGGRAPH 2019 Courses (Los Angeles, California) (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages. +Jiri Vorba, Ondrej Karlik, Martin Sik, Tobias Ritschel, and Jaroslav Krivanek. 2014. On-line learning of parametric mixture models for light transport simulation. ACM Transactions on Graphics (TOG) 33, 4 (2014), 1-11. +Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. 2021. PlenOctrees for Real-time Rendering of Neural Radiance Fields. In ICCV. +Shilin Zhu, Zexiang Xu, Tiancheng Sun, Alexandr Kuznetsov, Mark Meyer, Henrik Wann Jensen, Hao Su, and Ravi Ramamoorthi. 2021. Hierarchical neural reconstruction for path guiding using hybrid path and photon samples. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-16. \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04315/images/012d7d72f38c99a2d6e3ef901578141684e879c1eaa6c4a2513ae3c7899fdb0e.jpg b/data/2025/2504_04xxx/2504.04315/images/012d7d72f38c99a2d6e3ef901578141684e879c1eaa6c4a2513ae3c7899fdb0e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ad1e291d252297327572c602c4827c8ac8c69cf --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/012d7d72f38c99a2d6e3ef901578141684e879c1eaa6c4a2513ae3c7899fdb0e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7829620183323abb529ec6aca0ba3a3184b00b4105c34075b7642e942e680f97 +size 52336 diff --git a/data/2025/2504_04xxx/2504.04315/images/040ea9ba4caf98b087626d13b7305651b3dff2f6bf994b937ecc1d79ac7b96c9.jpg b/data/2025/2504_04xxx/2504.04315/images/040ea9ba4caf98b087626d13b7305651b3dff2f6bf994b937ecc1d79ac7b96c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e296e91bc9c256ae2fe20ee85f4a69b2c285156c --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/040ea9ba4caf98b087626d13b7305651b3dff2f6bf994b937ecc1d79ac7b96c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aab414fb1527edaa638e4d6941a0d61b72990a4bb2410eefd4d9f52e48386bb8 +size 119232 diff --git a/data/2025/2504_04xxx/2504.04315/images/06c9af7395206a5ca355fde269d48d174d47438dd29d458f5a62c727d51e3ab4.jpg b/data/2025/2504_04xxx/2504.04315/images/06c9af7395206a5ca355fde269d48d174d47438dd29d458f5a62c727d51e3ab4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..676b2e3cc5358b7fc31265066ac898f3a0b1dccf --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/06c9af7395206a5ca355fde269d48d174d47438dd29d458f5a62c727d51e3ab4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a530f0c9abafd8f7ec534b9fda03b75248924d7786da772b4804ec669da90d9e +size 9657 diff --git a/data/2025/2504_04xxx/2504.04315/images/08c2c9d51bb87f869a84fb7ff37be2f1cb560bc73017e4d1f4e3a4637eecad26.jpg b/data/2025/2504_04xxx/2504.04315/images/08c2c9d51bb87f869a84fb7ff37be2f1cb560bc73017e4d1f4e3a4637eecad26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf380f41c07d80f31fff4295e4518f65730df6f1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/08c2c9d51bb87f869a84fb7ff37be2f1cb560bc73017e4d1f4e3a4637eecad26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cc0ae72c9037abebafa45c8e0c267c9bf9e4945ec4eb58b50d4be5d0ad86a02 +size 8964 diff --git a/data/2025/2504_04xxx/2504.04315/images/0dab85787ebf57ece3d409ae13042efbe88173d0445016fac2fde3b91dfca16d.jpg b/data/2025/2504_04xxx/2504.04315/images/0dab85787ebf57ece3d409ae13042efbe88173d0445016fac2fde3b91dfca16d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d6cbf967a57fced14d5b5bc1bff361993c4c78c --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/0dab85787ebf57ece3d409ae13042efbe88173d0445016fac2fde3b91dfca16d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5992f720c2ca54b6321d19c64f067b1ccc46cf4864832b0e27af4867a96b10d1 +size 9040 diff --git a/data/2025/2504_04xxx/2504.04315/images/114f3498be4d0dc6542f3f5af73eb485cc7817dc6f779e6be1392b8d55dcfbea.jpg b/data/2025/2504_04xxx/2504.04315/images/114f3498be4d0dc6542f3f5af73eb485cc7817dc6f779e6be1392b8d55dcfbea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53beff9c9250a8d8bfbc03fbdd12f884315ee1ae --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/114f3498be4d0dc6542f3f5af73eb485cc7817dc6f779e6be1392b8d55dcfbea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98a20229f6fc054c8b7b18f4f9fe7573a9b6f669a1ae4236c153e137207187f8 +size 3548 diff --git a/data/2025/2504_04xxx/2504.04315/images/13804e2693ba41499fff7a99503e9702a3c96d40a68e57a29c67b9e74ebe2169.jpg b/data/2025/2504_04xxx/2504.04315/images/13804e2693ba41499fff7a99503e9702a3c96d40a68e57a29c67b9e74ebe2169.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8c38bf73964b982405db0b95432c19056ee3b43 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/13804e2693ba41499fff7a99503e9702a3c96d40a68e57a29c67b9e74ebe2169.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f2d0be44b9be46f13bc8e28f7d77ddda105caa08312ea0311f58a57632855c4 +size 4527 diff --git a/data/2025/2504_04xxx/2504.04315/images/14fec7d9f02364f9e1b3cc469dbbe763bb1b753ae5ada4e828afb851b65ca47c.jpg b/data/2025/2504_04xxx/2504.04315/images/14fec7d9f02364f9e1b3cc469dbbe763bb1b753ae5ada4e828afb851b65ca47c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a1e75c049deabbae79368e2fc16ae54e6aceb89 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/14fec7d9f02364f9e1b3cc469dbbe763bb1b753ae5ada4e828afb851b65ca47c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29cafc0ef15344c67bc0e5de0a68290e4414c88798ecef09b7099b1371f8c886 +size 8790 diff --git a/data/2025/2504_04xxx/2504.04315/images/181fb606997372bc36fe02a725b3aeead1f4cfe80019ae1ba38494763dbe1a09.jpg b/data/2025/2504_04xxx/2504.04315/images/181fb606997372bc36fe02a725b3aeead1f4cfe80019ae1ba38494763dbe1a09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fa7b8fa4ee69bd5d7e9a4efc2c08a876b52d2b90 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/181fb606997372bc36fe02a725b3aeead1f4cfe80019ae1ba38494763dbe1a09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:898e2ccee0ba0842ff4ed923c205abb1ed267f1a5c932bbf1796132059c6adf9 +size 9939 diff --git a/data/2025/2504_04xxx/2504.04315/images/261222e88bdfcbe95254dde56f97fce4ee0aab79da762475cb7d753ac46716c4.jpg b/data/2025/2504_04xxx/2504.04315/images/261222e88bdfcbe95254dde56f97fce4ee0aab79da762475cb7d753ac46716c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2b8ab336e8a9fff316c476e05785baa643938696 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/261222e88bdfcbe95254dde56f97fce4ee0aab79da762475cb7d753ac46716c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fdcff526195216e2bad82682283c4c61800603775ca618890b66070c8ad7899 +size 2615 diff --git a/data/2025/2504_04xxx/2504.04315/images/336d47376ee5000dcbf36c6bc39487599aaa992dc6d6b3207ccf8dad24c840a5.jpg b/data/2025/2504_04xxx/2504.04315/images/336d47376ee5000dcbf36c6bc39487599aaa992dc6d6b3207ccf8dad24c840a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1c8a4d340de1a41010630ad587d1aa6ec63160a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/336d47376ee5000dcbf36c6bc39487599aaa992dc6d6b3207ccf8dad24c840a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c46a1485cd590fcacdd6b04e06cf0e93fbde62fff57273c96c9714ecbb0546f +size 6619 diff --git a/data/2025/2504_04xxx/2504.04315/images/34654e72da7e2e9ef43f854cd70959d1671f0b7ceb629f08d155c24032847c36.jpg b/data/2025/2504_04xxx/2504.04315/images/34654e72da7e2e9ef43f854cd70959d1671f0b7ceb629f08d155c24032847c36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efc9f37a8fae3145158d7422c15a6890d07a212a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/34654e72da7e2e9ef43f854cd70959d1671f0b7ceb629f08d155c24032847c36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:267818b705d3628f2110664f16791874a747bb255833452c26f6ebd7a3d2fb7a +size 8140 diff --git a/data/2025/2504_04xxx/2504.04315/images/374aa6caa361e40fa1e2fa0c59419a8d5806a2cf71fb18ce126bd34ae3352425.jpg b/data/2025/2504_04xxx/2504.04315/images/374aa6caa361e40fa1e2fa0c59419a8d5806a2cf71fb18ce126bd34ae3352425.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a9381c69f342f66992610969f290d0244a22f4a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/374aa6caa361e40fa1e2fa0c59419a8d5806a2cf71fb18ce126bd34ae3352425.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d9acaa4baf011183296f584573bcfa4de6e8b6c0983fd6e6bc0429b30feb573 +size 9020 diff --git a/data/2025/2504_04xxx/2504.04315/images/38e332d26df913a4036d6a6bb054d9f1724ec4e8b74fdf0ec84b6036b6735101.jpg b/data/2025/2504_04xxx/2504.04315/images/38e332d26df913a4036d6a6bb054d9f1724ec4e8b74fdf0ec84b6036b6735101.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0079a90c00ade104d2af55dd9c181ef25fea2871 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/38e332d26df913a4036d6a6bb054d9f1724ec4e8b74fdf0ec84b6036b6735101.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52ecc40413b8b971e29c32018a8ce9d6710a61a334123e19746de1dc3f86a79d +size 24632 diff --git a/data/2025/2504_04xxx/2504.04315/images/3d837b6d0b31d48d6b2f3a3a0868f883d039f51de2c3b76dc345e1b7d59a75e7.jpg b/data/2025/2504_04xxx/2504.04315/images/3d837b6d0b31d48d6b2f3a3a0868f883d039f51de2c3b76dc345e1b7d59a75e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ce635178c687c0e644ffa791a2f4f37190edcb2 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/3d837b6d0b31d48d6b2f3a3a0868f883d039f51de2c3b76dc345e1b7d59a75e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2548b4127c4c60beeec65c925133ab4a91689370ab298b26277d4d3f3a771e4c +size 21356 diff --git a/data/2025/2504_04xxx/2504.04315/images/3f1102130cd97c4c939dd9fa09ed8f97c36f686554ea57ef2748057833451595.jpg b/data/2025/2504_04xxx/2504.04315/images/3f1102130cd97c4c939dd9fa09ed8f97c36f686554ea57ef2748057833451595.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ced80fae300e19cb18b9a05602032481c362804b --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/3f1102130cd97c4c939dd9fa09ed8f97c36f686554ea57ef2748057833451595.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32ef44a39c185f2eb5600ca0dffdffb0eaeb8789e0cfbb472a5f4bb9bc96604b +size 10128 diff --git a/data/2025/2504_04xxx/2504.04315/images/440acf2b5615533f8d43abb25281699d9d75db99cadf0d5cfb45fa8a9e6d8ae0.jpg b/data/2025/2504_04xxx/2504.04315/images/440acf2b5615533f8d43abb25281699d9d75db99cadf0d5cfb45fa8a9e6d8ae0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3123fae54817c25a7377d39b474de9c7f633fb89 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/440acf2b5615533f8d43abb25281699d9d75db99cadf0d5cfb45fa8a9e6d8ae0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b09af5dc0be884e9978b393ffad11e82a4676890392d1c92bcd55abcdadff66e +size 6002 diff --git a/data/2025/2504_04xxx/2504.04315/images/459f119a1e6f73caa5bd490548bb83413ea566cd62800bcd3a50b97f94ad6164.jpg b/data/2025/2504_04xxx/2504.04315/images/459f119a1e6f73caa5bd490548bb83413ea566cd62800bcd3a50b97f94ad6164.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ae3e235ec7fc9025a40da48f494e7d2d672d12e --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/459f119a1e6f73caa5bd490548bb83413ea566cd62800bcd3a50b97f94ad6164.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d24938bcc0c6af38864e360bc93c6996555bdb8b4919dc3d4d011c033c206fa +size 8707 diff --git a/data/2025/2504_04xxx/2504.04315/images/4de0ae7607601ea031f8b26980ba8e4d0c7d3cc17f48b8484fc9982a6dbb12a4.jpg b/data/2025/2504_04xxx/2504.04315/images/4de0ae7607601ea031f8b26980ba8e4d0c7d3cc17f48b8484fc9982a6dbb12a4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c7ce9cc0afb2e996e26e87645a4bd529a18ab3ae --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/4de0ae7607601ea031f8b26980ba8e4d0c7d3cc17f48b8484fc9982a6dbb12a4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cafe3befa89289ede7ad7eb0e1456813f542d2f3af425a5931f9fdbf175f6dc +size 390237 diff --git a/data/2025/2504_04xxx/2504.04315/images/5101dc19c079bc740e073f41696f91e4b45adb66d30b0904a6def2725004c356.jpg b/data/2025/2504_04xxx/2504.04315/images/5101dc19c079bc740e073f41696f91e4b45adb66d30b0904a6def2725004c356.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0365f56bed18f98fae6e0925477898469c39277 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/5101dc19c079bc740e073f41696f91e4b45adb66d30b0904a6def2725004c356.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38187a81e864920d57c9036f607eed73268cab29232d39c5eb8e65b13c9b236a +size 2507 diff --git a/data/2025/2504_04xxx/2504.04315/images/52f1ca1d2c54273a68d3507fd7757099974cc8b5df5f4e725bd2131d4425e717.jpg b/data/2025/2504_04xxx/2504.04315/images/52f1ca1d2c54273a68d3507fd7757099974cc8b5df5f4e725bd2131d4425e717.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e15df9bfaece34689e9f1493f1f6756aa6ca5c10 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/52f1ca1d2c54273a68d3507fd7757099974cc8b5df5f4e725bd2131d4425e717.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:242227fd963fae1dd09a505fbbd0ff09436fd8978e2e8262e8bcd73dcf99324d +size 3003 diff --git a/data/2025/2504_04xxx/2504.04315/images/70557cae169bc7f6f85ff903873133db7e1d46307a229ddb566e0a499f5c8651.jpg b/data/2025/2504_04xxx/2504.04315/images/70557cae169bc7f6f85ff903873133db7e1d46307a229ddb566e0a499f5c8651.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f09cfa2d2005653f2acf90cf803246433e3d3aa0 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/70557cae169bc7f6f85ff903873133db7e1d46307a229ddb566e0a499f5c8651.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:075ab3976179a0a05d4592ea2a6f4823b8d65d0654cd84cad13360e9a5789a48 +size 9112 diff --git a/data/2025/2504_04xxx/2504.04315/images/721cfcdc5d3a0ac27c38adce4f26b3428fb63c7de4fc576c2cfea3db865be04d.jpg b/data/2025/2504_04xxx/2504.04315/images/721cfcdc5d3a0ac27c38adce4f26b3428fb63c7de4fc576c2cfea3db865be04d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8c7d7fd2cb8ad070e5e2c3d6ab9b479ce318981 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/721cfcdc5d3a0ac27c38adce4f26b3428fb63c7de4fc576c2cfea3db865be04d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50349ba67aaec37fff25bf98e07315820286bc12e6b4b588d9984b4931775d3a +size 2730 diff --git a/data/2025/2504_04xxx/2504.04315/images/7791fafefea912ff395714d5a429e4ef823d518892e47681632a7d70725cece0.jpg b/data/2025/2504_04xxx/2504.04315/images/7791fafefea912ff395714d5a429e4ef823d518892e47681632a7d70725cece0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cd72691b7572a6e761b6e71fbce8081d1bf02dd --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/7791fafefea912ff395714d5a429e4ef823d518892e47681632a7d70725cece0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8866c0394e30224eeca89fe69ee808e6d00737abc312457d112f82363237fae5 +size 123808 diff --git a/data/2025/2504_04xxx/2504.04315/images/7c235d11fcbe63ea0cd6d21d4a1760468d38923fc545f357e03dd55a228e84b9.jpg b/data/2025/2504_04xxx/2504.04315/images/7c235d11fcbe63ea0cd6d21d4a1760468d38923fc545f357e03dd55a228e84b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..873d2eb6e88217b2aa19c07d469d29afa6a483f4 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/7c235d11fcbe63ea0cd6d21d4a1760468d38923fc545f357e03dd55a228e84b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74c7fe55d2953fe6a029829b301cc4c764fb20150a87a23d49a2964e3c58be59 +size 45881 diff --git a/data/2025/2504_04xxx/2504.04315/images/7dae49ea469d37eb159d984887468aa069cec61a542793b609cae33e2b5f8746.jpg b/data/2025/2504_04xxx/2504.04315/images/7dae49ea469d37eb159d984887468aa069cec61a542793b609cae33e2b5f8746.jpg new file mode 100644 index 0000000000000000000000000000000000000000..984b25f25b4dc172103312d2f44b51166100a5c6 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/7dae49ea469d37eb159d984887468aa069cec61a542793b609cae33e2b5f8746.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:393ed07ebcebd3d72a684263ac4cd8b5f08fb8a4e4e685b1b272ca331366963c +size 2889 diff --git a/data/2025/2504_04xxx/2504.04315/images/877c0d7fc45351dbafd802d190c15c900b000d30915cae9165790ac96d5238e7.jpg b/data/2025/2504_04xxx/2504.04315/images/877c0d7fc45351dbafd802d190c15c900b000d30915cae9165790ac96d5238e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e65e2a5914c0c9fb3e2d0231d0befd58e41a9833 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/877c0d7fc45351dbafd802d190c15c900b000d30915cae9165790ac96d5238e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a267b394c0200b3ac5dceb18cd67ecaba057973787d8e74dff4a9f177ba20fd +size 12520 diff --git a/data/2025/2504_04xxx/2504.04315/images/888d6d0406b223320ab0974c9477a53a96ee8d8238c7d8fd9369753542c3a598.jpg b/data/2025/2504_04xxx/2504.04315/images/888d6d0406b223320ab0974c9477a53a96ee8d8238c7d8fd9369753542c3a598.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c9862fc66b70ac265cdb92bc3936e7c2b873dd3 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/888d6d0406b223320ab0974c9477a53a96ee8d8238c7d8fd9369753542c3a598.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b624135a0955eeeb43e0cbf29cc4156b0af4c275f9f8e399d430b874283aa51b +size 3001 diff --git a/data/2025/2504_04xxx/2504.04315/images/8e2773c3b9f41b1d1f0a888ea191830ca3880040a24fbbf6b2bba09bdf4a08d2.jpg b/data/2025/2504_04xxx/2504.04315/images/8e2773c3b9f41b1d1f0a888ea191830ca3880040a24fbbf6b2bba09bdf4a08d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83a586241ce61ecac44a1a48d244a95cdc282ee3 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/8e2773c3b9f41b1d1f0a888ea191830ca3880040a24fbbf6b2bba09bdf4a08d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:713ec0aebe64426351ed9c81651589db68f97f86b253b14779cf98d25f199e87 +size 6464 diff --git a/data/2025/2504_04xxx/2504.04315/images/909a175a2fcc3697baabec10f44413f5aa481dea166ea26fc95636d89070df0c.jpg b/data/2025/2504_04xxx/2504.04315/images/909a175a2fcc3697baabec10f44413f5aa481dea166ea26fc95636d89070df0c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ddd45a748fc3005d73f2a9baabef87c95e9a8cf --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/909a175a2fcc3697baabec10f44413f5aa481dea166ea26fc95636d89070df0c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fa6d877fd263e0fcda341ac2772846d7d379950a3584939eae3cf5b3662e642 +size 2558 diff --git a/data/2025/2504_04xxx/2504.04315/images/986c320f1e4cfbcb1abe6476fb55b4d185b09327f49c6d2c948d2cd40e7b2fb4.jpg b/data/2025/2504_04xxx/2504.04315/images/986c320f1e4cfbcb1abe6476fb55b4d185b09327f49c6d2c948d2cd40e7b2fb4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cc4c6dc38dcff0b50383d7c5061f210bf185f3b5 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/986c320f1e4cfbcb1abe6476fb55b4d185b09327f49c6d2c948d2cd40e7b2fb4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb026d2a8ebb092fb973b00ca300fd9546de6b06f790e363016c239dbb213c30 +size 4828 diff --git a/data/2025/2504_04xxx/2504.04315/images/9deff805ba49e53de948b70b54dc5f1ad8b4f9c77c5599195f5046879dc11f7a.jpg b/data/2025/2504_04xxx/2504.04315/images/9deff805ba49e53de948b70b54dc5f1ad8b4f9c77c5599195f5046879dc11f7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8784dc6854e2ae538a48802547337761b4d62b9e --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/9deff805ba49e53de948b70b54dc5f1ad8b4f9c77c5599195f5046879dc11f7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00f5cd4fc1c8994b1258f53e4a1f6c453ce572d4f22856a1f46f852016197a53 +size 8888 diff --git a/data/2025/2504_04xxx/2504.04315/images/9eca55ab6a658160878a3506294517e5cce89b945cfdf85fec532d3cee6b242a.jpg b/data/2025/2504_04xxx/2504.04315/images/9eca55ab6a658160878a3506294517e5cce89b945cfdf85fec532d3cee6b242a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..abe4bf3304341737b68668e12e4e9a6c13dd9fb1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/9eca55ab6a658160878a3506294517e5cce89b945cfdf85fec532d3cee6b242a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b48262ea74656810851bc69003e7932ea1f87e80f9faf8122f398046ac0d6b2 +size 8163 diff --git a/data/2025/2504_04xxx/2504.04315/images/a8cf85955cee9b1cabbf151d19f4dd028a439f2aa721b530832c8cf383f8faac.jpg b/data/2025/2504_04xxx/2504.04315/images/a8cf85955cee9b1cabbf151d19f4dd028a439f2aa721b530832c8cf383f8faac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9ef4a6c29d7bdb4c8d68d7e514bddd97312a4c2 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/a8cf85955cee9b1cabbf151d19f4dd028a439f2aa721b530832c8cf383f8faac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a627a42d85ff89c3bb1bcd0294698f091fcf80a1c9154c4bb62eb4c225c94e10 +size 3331 diff --git a/data/2025/2504_04xxx/2504.04315/images/a9e3e0bca254a0b92485bb2f58af12c621eb6de5a42b0e3fcc3070347e0f8bf4.jpg b/data/2025/2504_04xxx/2504.04315/images/a9e3e0bca254a0b92485bb2f58af12c621eb6de5a42b0e3fcc3070347e0f8bf4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22f528a8735e65690392fc1aa4c50c3e26bd6724 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/a9e3e0bca254a0b92485bb2f58af12c621eb6de5a42b0e3fcc3070347e0f8bf4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e9a153e6de753c6e384c140591ca06c296dd74b5d77cd0f8128451a5f1b69db +size 9845 diff --git a/data/2025/2504_04xxx/2504.04315/images/ac207f2b0dc08e9ca00b1f7fd2da77c0830570cdc5793049f393be18285a629d.jpg b/data/2025/2504_04xxx/2504.04315/images/ac207f2b0dc08e9ca00b1f7fd2da77c0830570cdc5793049f393be18285a629d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e88dd73a7cae8d74bb774105e584bb185f0b5241 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/ac207f2b0dc08e9ca00b1f7fd2da77c0830570cdc5793049f393be18285a629d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb9d270f8cd8065f559b41b15cf4b898151ef192802507aa0b4f8f968a4c301d +size 9271 diff --git a/data/2025/2504_04xxx/2504.04315/images/b19615e5a3ede50f569434550e990b805b884c04382bfcf3f74269025fb14c49.jpg b/data/2025/2504_04xxx/2504.04315/images/b19615e5a3ede50f569434550e990b805b884c04382bfcf3f74269025fb14c49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d09d3130fc560c3572c6ce4aff9b3fd94896a9b7 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/b19615e5a3ede50f569434550e990b805b884c04382bfcf3f74269025fb14c49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c7c476c2b3c04f2399463a08e3c8a25731d0dcd6a9ef744c7ae66cfe6db4c47 +size 6778 diff --git a/data/2025/2504_04xxx/2504.04315/images/b37e2d6558a7080380e4c1b42a42cc24b1dc2668cf0b1cc5ffb8a3a703e156f5.jpg b/data/2025/2504_04xxx/2504.04315/images/b37e2d6558a7080380e4c1b42a42cc24b1dc2668cf0b1cc5ffb8a3a703e156f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c3287d683e70954be83142005d3cbb50af34922 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/b37e2d6558a7080380e4c1b42a42cc24b1dc2668cf0b1cc5ffb8a3a703e156f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af800eaa97976c31a54a51fec6a7ad9216f6e4177ae5241a3b10897db341a497 +size 2777 diff --git a/data/2025/2504_04xxx/2504.04315/images/bb3e8c4f8883ed1a419dd0791f442f644ccdbf4d0d08c9d0fee74d1545d4ee4c.jpg b/data/2025/2504_04xxx/2504.04315/images/bb3e8c4f8883ed1a419dd0791f442f644ccdbf4d0d08c9d0fee74d1545d4ee4c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f007cb3dea43a357b99d07c7a4d0872cb8d0a80 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/bb3e8c4f8883ed1a419dd0791f442f644ccdbf4d0d08c9d0fee74d1545d4ee4c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8edb0a2d13b8cdccee10f36246761ee57f36803b9fe2117a65c947db433f4293 +size 4953 diff --git a/data/2025/2504_04xxx/2504.04315/images/c72949cfaa907720f948811ab1f1c468fcd58c869731f256eaa07f814824b055.jpg b/data/2025/2504_04xxx/2504.04315/images/c72949cfaa907720f948811ab1f1c468fcd58c869731f256eaa07f814824b055.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ad35bf00ff231a8ad546216916f15bc834bcea7 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/c72949cfaa907720f948811ab1f1c468fcd58c869731f256eaa07f814824b055.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17e3c8af1e8e690653b157d06250506b7be0ba8a8909b9361be12ca77d5fd2bc +size 12279 diff --git a/data/2025/2504_04xxx/2504.04315/images/c8f2e9942956186ed669ad52cd26876b7478f8b87e302b3e711bf9fba9be9b65.jpg b/data/2025/2504_04xxx/2504.04315/images/c8f2e9942956186ed669ad52cd26876b7478f8b87e302b3e711bf9fba9be9b65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..782dd369239dca9c7decab1cd216fd05950ef9d5 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/c8f2e9942956186ed669ad52cd26876b7478f8b87e302b3e711bf9fba9be9b65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fe2739db3ea95d2111f756589fad41f2f4997c69ba62e917200723b3299efc7 +size 7192 diff --git a/data/2025/2504_04xxx/2504.04315/images/d38b0cf0ea9559e51984689d857719730f7ec7c4438a69b2938c81644d2b5f63.jpg b/data/2025/2504_04xxx/2504.04315/images/d38b0cf0ea9559e51984689d857719730f7ec7c4438a69b2938c81644d2b5f63.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d43fe6139d1e745245aab474d6aa681d5f44638 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/d38b0cf0ea9559e51984689d857719730f7ec7c4438a69b2938c81644d2b5f63.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c04d7d0b973152dfb109c11241331812cafd7145b0c98410f3e629362f9e018f +size 2832 diff --git a/data/2025/2504_04xxx/2504.04315/images/dd07904043260f0a009dc49ec665e10ee10b78a2c27644e203ca8e7682aa49a3.jpg b/data/2025/2504_04xxx/2504.04315/images/dd07904043260f0a009dc49ec665e10ee10b78a2c27644e203ca8e7682aa49a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ac73c50bea7bed8401929cc25248199c26dc341 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/dd07904043260f0a009dc49ec665e10ee10b78a2c27644e203ca8e7682aa49a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d38c9fabece2d24172709bc50786d24c9a9ee5765f153e2b9625663a9036db30 +size 4310 diff --git a/data/2025/2504_04xxx/2504.04315/images/e21985f77ddb52baeef3fabd66b4f32fc6341b6a7d5b8db27786557bde91e4dc.jpg b/data/2025/2504_04xxx/2504.04315/images/e21985f77ddb52baeef3fabd66b4f32fc6341b6a7d5b8db27786557bde91e4dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..934bbe8ca1bae2100d60b2b3650c027bd2c38d04 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/e21985f77ddb52baeef3fabd66b4f32fc6341b6a7d5b8db27786557bde91e4dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f8fe59f2d6ac9ef339b2a9185ebd27b1d2673ca0e7240bf01024a808f58cca1 +size 2659 diff --git a/data/2025/2504_04xxx/2504.04315/images/e6fd5634de776096702072077954764b1f851b04c67b47deb142edfa71811598.jpg b/data/2025/2504_04xxx/2504.04315/images/e6fd5634de776096702072077954764b1f851b04c67b47deb142edfa71811598.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21cdaf43f5c688b07b26cb6d3a2d48d4dbb0821e --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/e6fd5634de776096702072077954764b1f851b04c67b47deb142edfa71811598.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb7908ea4c6cc88e7dfa69dc6bd5b7e8c6c610093b2c29047913b41783d2d025 +size 8781 diff --git a/data/2025/2504_04xxx/2504.04315/images/eb53553187bc0b187fd83239885ae0a52d57bdcadfd1a255b9daabdc747058c5.jpg b/data/2025/2504_04xxx/2504.04315/images/eb53553187bc0b187fd83239885ae0a52d57bdcadfd1a255b9daabdc747058c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f2d80b534c4d3055af08fda207b0c9373e5e6aa --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/eb53553187bc0b187fd83239885ae0a52d57bdcadfd1a255b9daabdc747058c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3118fd96c6eba6410553e553902b17038360a981deb3481746417b5cea1c5cd +size 3016 diff --git a/data/2025/2504_04xxx/2504.04315/images/ed320d36144b5356fcf647125f5d2db6245585542c764e5086e388579fd22675.jpg b/data/2025/2504_04xxx/2504.04315/images/ed320d36144b5356fcf647125f5d2db6245585542c764e5086e388579fd22675.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8ac7b2fb8ff75f03b10a148a8c86c4e5d636398 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/ed320d36144b5356fcf647125f5d2db6245585542c764e5086e388579fd22675.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba9e7a17a31328caefd1789003839c8ae8d61d8bc19ccdcc623b5823823b1826 +size 7544 diff --git a/data/2025/2504_04xxx/2504.04315/images/f4055e7418c0dce471dcaa70b30e4d70e2af9a7c639208ff5065e16660bec2b8.jpg b/data/2025/2504_04xxx/2504.04315/images/f4055e7418c0dce471dcaa70b30e4d70e2af9a7c639208ff5065e16660bec2b8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..192f4798b3b396209ecfb6e9dee64d69d3e99042 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/f4055e7418c0dce471dcaa70b30e4d70e2af9a7c639208ff5065e16660bec2b8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bbdb896206f94d85c5e2d028fc9301fc9bd8348be247aea6987a7ccd01fb44b +size 10617 diff --git a/data/2025/2504_04xxx/2504.04315/images/f62aaec39bdfee8ed9c26f61ed59b7134f4b5214059c55a13757e21a964bad39.jpg b/data/2025/2504_04xxx/2504.04315/images/f62aaec39bdfee8ed9c26f61ed59b7134f4b5214059c55a13757e21a964bad39.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1d63da2603a6b2c7800b0aad2b3fda1728b7bc68 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/images/f62aaec39bdfee8ed9c26f61ed59b7134f4b5214059c55a13757e21a964bad39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e21aa068084428e2d89245d54a53c8d793d79f52572b091358c4730739218e33 +size 2987 diff --git a/data/2025/2504_04xxx/2504.04315/layout.json b/data/2025/2504_04xxx/2504.04315/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a4faa31dd24df35ea7f79c6ce224202540398336 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04315/layout.json @@ -0,0 +1,11637 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 49, + 76, + 373, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 76, + 373, + 95 + ], + "spans": [ + { + "bbox": [ + 49, + 76, + 373, + 95 + ], + "type": "text", + "content": "Neural Parametric Mixtures for Path Guiding" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 103, + 257, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 103, + 257, + 144 + ], + "spans": [ + { + "bbox": [ + 48, + 103, + 257, + 144 + ], + "type": "text", + "content": "HONGHAO DONG, Peking University, China \nGUOPING WANG, Peking University, China \nSHENG LI*, Peking University, China" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 149, + 295, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 149, + 295, + 289 + ], + "spans": [ + { + "bbox": [ + 48, + 149, + 295, + 289 + ], + "type": "text", + "content": "Previous path guiding techniques typically rely on spatial subdivision structures to approximate directional target distributions, which may cause failure to capture spatio-directional correlations and introduce parallax issue. In this paper, we present Neural Parametric Mixtures (NPM), a neural formulation to encode target distributions for path guiding algorithms. We propose to use a continuous and compact neural implicit representation for encoding parametric models while decoding them via lightweight neural networks. We then derive a gradient-based optimization strategy to directly train the parameters of NPM with noisy Monte Carlo radiance estimates. Our approach efficiently models the target distribution (incident radiance or the product integrand) for path guiding, and outperforms previous guiding methods by capturing the spatio-directional correlations more accurately. Moreover, our approach is more training efficient and is practical for parallelization on modern GPUs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 294, + 295, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 294, + 295, + 315 + ], + "spans": [ + { + "bbox": [ + 48, + 294, + 295, + 315 + ], + "type": "text", + "content": "CCS Concepts: Computing methodologies " + }, + { + "bbox": [ + 48, + 294, + 295, + 315 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 48, + 294, + 295, + 315 + ], + "type": "text", + "content": " Ray tracing; Neural networks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 319, + 295, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 295, + 339 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 295, + 339 + ], + "type": "text", + "content": "Additional Key Words and Phrases: Ray Tracing, Global Illumination, Sampling and Reconstruction, Neural Networks, Mixture Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 344, + 139, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 344, + 139, + 354 + ], + "spans": [ + { + "bbox": [ + 48, + 344, + 139, + 354 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 354, + 295, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 354, + 295, + 384 + ], + "spans": [ + { + "bbox": [ + 48, + 354, + 295, + 384 + ], + "type": "text", + "content": "Honghao Dong, Guoping Wang, and Sheng Li. 2025. Neural Parametric Mixtures for Path Guiding. 1, 1 (April 2025), 10 pages. https://doi.org/10.1145/3588432.3591533" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 395, + 139, + 405 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 395, + 139, + 405 + ], + "spans": [ + { + "bbox": [ + 49, + 395, + 139, + 405 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 409, + 295, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 409, + 295, + 541 + ], + "spans": [ + { + "bbox": [ + 48, + 409, + 295, + 541 + ], + "type": "text", + "content": "The efficiency of path tracing relies heavily on the sampling strategy. To further improve its efficiency and robustness, path guiding algorithms leverage the knowledge gained during rendering to facilitate the process of light-path construction, thereby reducing noise. To acquire better importance sampling distribution, local path guiding techniques employ previous radiance estimates to learn an approximation of spatial incident radiance fields, which are then used to guide the construction of paths. In practice, current methods typically use some representation (e.g., Gaussian mixtures [Herholz et al. 2016; Vorba et al. 2014], quadtrees [Müller et al. 2017]) to approximate the directional distribution of incident radiance. A spatial subdivision structure (e.g., kd-tree [Dodik et al. 2022], or octree [Bus" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 550, + 118, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 550, + 118, + 559 + ], + "spans": [ + { + "bbox": [ + 48, + 550, + 118, + 559 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 559, + 176, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 559, + 176, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 559, + 176, + 567 + ], + "type": "text", + "content": "Project website: https://neuropara.github.io." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 577, + 295, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 577, + 295, + 602 + ], + "spans": [ + { + "bbox": [ + 48, + 577, + 295, + 602 + ], + "type": "text", + "content": "Authors' addresses: Honghao Dong, Peking University, Beijing, China, cuteday@pku.edu.cn; Guoping Wang, Peking University, Beijing, China, wgp@pku.edu.cn; Sheng Li, Peking University, Beijing, China, lisheng@pku.edu.cn." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 611, + 295, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 611, + 295, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 611, + 295, + 669 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 48, + 669, + 183, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 669, + 183, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 669, + 183, + 677 + ], + "type": "text", + "content": "© 2025 Association for Computing Machinery." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 48, + 677, + 146, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 677, + 146, + 684 + ], + "spans": [ + { + "bbox": [ + 48, + 677, + 146, + 684 + ], + "type": "text", + "content": "XXXX-XXXX/2025/4-ART $15.00" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 48, + 685, + 165, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 685, + 165, + 693 + ], + "spans": [ + { + "bbox": [ + 48, + 685, + 165, + 693 + ], + "type": "text", + "content": "https://doi.org/10.1145/3588432.3591533" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 148, + 560, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 148, + 560, + 171 + ], + "spans": [ + { + "bbox": [ + 314, + 148, + 560, + 171 + ], + "type": "text", + "content": "and Boubekeur 2017]) is then used to store these distributions, thus accounting for the spatial variations." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 171, + 561, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 171, + 561, + 281 + ], + "spans": [ + { + "bbox": [ + 313, + 171, + 561, + 281 + ], + "type": "text", + "content": "However, several key deficiencies remain in their paradigm. Most methods learn the marginalized incident radiance distribution within each subdivided spatial region. This fails to capture the spatiodirectional correlations within the spatial discretizations, and could cause artifacts (e.g., parallax error, Fig 1(a)). Moreover, their spatial subdivision structures are subject to frequent reconstruction for finer-grained spatial resolution, which needs extra overhead and require a long training time to converge. Meanwhile, it is challenging to efficiently fit these specific directional distributions from noisy samples, especially in an online manner [Ruppert et al. 2020]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 281, + 561, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 561, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 561, + 456 + ], + "type": "text", + "content": "While an adaptive and robust spatial representation is difficult to achieve with manually designed subdivision schemes, we saw the recent success of neural implicit representation in compactly modeling spatially varying functions with fine-grained and high-frequency details [Mildenhall et al. 2020]. In this work, we exploit the great expressiveness of neural implicit representation while preserving the desirable properties of parametric mixture models (e.g. efficient importance sampling) for path guiding algorithms. We thereby present Neural Parametric Mixtures (NPM), which use a continuous and compact implicit representation to encode spati-directional target distributions, and decode them into PMMs with lightweight neural networks for fast importance sampling. We show that our NPM representation, without explicit spatial subdivision schemes, can be efficiently trained simply using gradient-based optimization techniques. Specifically, our method has advantages in the following aspects:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 456, + 561, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 561, + 521 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 561, + 521 + ], + "type": "text", + "content": "First, our continuous implicit representation of spatial radiance fields naturally captures the correlations between spatial positions and directional target distributions. By smoothly interpolating and decoding the implicit representations with neural networks, our method inherently avoids the issues due to spatial discretization, thus resulting in higher performance." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 521, + 561, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 561, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 561, + 598 + ], + "type": "text", + "content": "Second, our compact representation avoids the extra overhead and long training time caused by the iterative reconstruction strategies applied to the explicit spatial subdivision structures. Combined with our simple optimization based on stochastic gradient descent, our method outperforms other guiding methods even with fewer training samples. In addition, our method is practical and performant for parallelization on GPU." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 598, + 561, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 598, + 561, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 598, + 561, + 654 + ], + "type": "text", + "content": "Lastly, our method can learn the product distribution (i.e., multiplied by the BSDF and the cosine term). This further reduces the noise with a modest computational overhead while not requiring the extra effort of previous solutions (e.g., fitting each BSDF with pre-computed parametric models)." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.04315v1 [cs.GR] 6 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "spans": [ + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 79, + 138, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 79, + 138, + 89 + ], + "spans": [ + { + "bbox": [ + 49, + 79, + 138, + 89 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 93, + 295, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 93, + 295, + 224 + ], + "spans": [ + { + "bbox": [ + 48, + 93, + 295, + 224 + ], + "type": "text", + "content": "Path Guiding. To achieve better sampling strategies, local path guiding techniques leverage previous radiance estimates (either online or during a pre-computation process) to build an approximation of the incident radiance fields, which is used to guide subsequent sampling. Early approaches used simple bases such as histograms for importance sampling, e.g. built from a photon map [Jensen 1995] or collected radiance estimates with 5-D tree structures [Lafortune and Willems 1995]. Subsequent work has developed various techniques to construct the guiding distribution, e.g., Gaussian mixtures [Vorba et al. 2014], quad-trees [Müller et al. 2017], which is often stored in spatial data structures (e.g., kd-tree and octree) to account for spatial variations of the distributions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 225, + 295, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 225, + 295, + 379 + ], + "spans": [ + { + "bbox": [ + 48, + 225, + 295, + 379 + ], + "type": "text", + "content": "Deep learning techniques have also been explored recently, achieving improvements while often with less practical performance. For example, convolutional networks could be used to reconstruct the learned noisy radiance field [Huo et al. 2020; Zhu et al. 2021]. Specifically designed neural networks could also model complex manifolds [Dinh et al. 2017], while allowing samples to be drawn directly from the learned distribution [Müller et al. 2019]. However, the prohibitive computational cost prevents its practical application [Müller et al. 2019; Vorba et al. 2019]. Instead of directly importance sampling using neural networks, we encode the target distribution into implicit neural representation, and use only lightweight MLPs to decode it into parametric mixtures for efficient sampling. We show that our method can be efficiently trained (< 10s per scene on a single GPU) while being sufficiently robust and practical." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 388, + 295, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 388, + 295, + 509 + ], + "spans": [ + { + "bbox": [ + 48, + 388, + 295, + 509 + ], + "type": "text", + "content": "Parametric Mixture Models. Parametric mixture models (PMMs) are convex combinations of parametric distributions, and are often used to approximate directional distributions in graphics applications. They have many desirable properties, e.g., fast sampling, and closed-form solutions for products, convolutions and integrals. Several types of PMMs (e.g., Gaussian mixtures [Dodik et al. 2022; Vorba et al. 2014] and von Mises-Fisher mixtures [Ruppert et al. 2020]) are widely used in the recently developed path guiding algorithms. Several recent works also use PMMs to fit BSDFs with precomputation [Herholz et al. 2016; Ruppert et al. 2020], and multiply them with the learned incident radiance to achieve product sampling." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 509, + 295, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 509, + 295, + 564 + ], + "spans": [ + { + "bbox": [ + 48, + 509, + 295, + 564 + ], + "type": "text", + "content": "Parametric models can also be predicted by neural networks, enabling new possibilities for e.g. lighting [Currius et al. 2020] and reconstruction [Yu et al. 2021] tasks. In this work, we use neural representations to encode parametric mixtures for efficient sampling. Our method is also naturally extensible to product sampling." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 573, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 573, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 48, + 573, + 295, + 694 + ], + "type": "text", + "content": "Implicit Neural Representation. Following the success of using neural networks to represent 3D scenes implicitly [Mildenhall et al. 2020], the concept of neural representation has been popularized and applied to various tasks. They use sparse input images to optimize the spatial radiance fields via a differentiable volume rendering procedure, thus enabling novel view synthesis. Inspired by its recent successful applications [Diolatzis et al. 2022; Müller et al. 2022], we exploit a continuous and compact implicit neural representation to encode the spatio-directional target distributions for path guiding algorithms. While the ground truth target distribution (i.e., the incident radiance or product distribution) is unknown, our NPM" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 79, + 561, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 79, + 561, + 113 + ], + "spans": [ + { + "bbox": [ + 314, + 79, + 561, + 113 + ], + "type": "text", + "content": "representation can be optimized online using minibatch stochastic gradient descent (SGD), where the gradients for training are estimated by Monte Carlo integration using noisy radiance estimates." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 121, + 395, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 121, + 395, + 132 + ], + "spans": [ + { + "bbox": [ + 315, + 121, + 395, + 132 + ], + "type": "text", + "content": "3 PRELIMINARY" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 136, + 561, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 136, + 561, + 159 + ], + "spans": [ + { + "bbox": [ + 314, + 136, + 561, + 159 + ], + "type": "text", + "content": "Monte Carlo Integration. Light transport algorithms are generally based on the rendering equation [Kajiya 1986]:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 319, + 162, + 561, + 185 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 162, + 561, + 185 + ], + "spans": [ + { + "bbox": [ + 319, + 162, + 561, + 185 + ], + "type": "interline_equation", + "content": "L _ {0} (\\mathbf {x}, \\omega_ {0}) = L _ {\\mathrm {e}} (\\mathbf {x}, \\omega_ {0}) + \\int_ {\\Omega} f _ {\\mathrm {s}} (\\mathbf {x}, \\omega_ {0}, \\omega_ {\\mathrm {i}}) L _ {\\mathrm {i}} (\\mathbf {x}, \\omega_ {\\mathrm {i}}) | \\cos \\theta_ {\\mathrm {i}} | d \\omega_ {\\mathrm {i}}, \\tag {1}", + "image_path": "c8f2e9942956186ed669ad52cd26876b7478f8b87e302b3e711bf9fba9be9b65.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "spans": [ + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": "which defines the relationship between the outgoing radiance " + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{o}}" + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": ", emitted radiance " + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "inline_equation", + "content": "L_{e}" + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": ", and the integrated incident radiance " + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{i}}" + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": ", at shading point " + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": ". Monte Carlo integration is used to obtain an estimate of the reflection integral " + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "inline_equation", + "content": "L_{r}" + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": " using an average of " + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": " samples. In the case where " + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "inline_equation", + "content": "N = 1" + }, + { + "bbox": [ + 314, + 186, + 561, + 241 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 355, + 243, + 561, + 269 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 355, + 243, + 561, + 269 + ], + "spans": [ + { + "bbox": [ + 355, + 243, + 561, + 269 + ], + "type": "interline_equation", + "content": "\\left\\langle L _ {\\mathrm {r}} \\left(\\mathbf {x}, \\omega_ {\\mathrm {o}}\\right) \\right\\rangle = \\frac {f _ {\\mathrm {s}} \\left(\\mathbf {x} , \\omega_ {\\mathrm {o}} , \\omega_ {\\mathrm {i}}\\right) L _ {\\mathrm {i}} \\left(\\mathbf {x} , \\omega_ {\\mathrm {i}}\\right) \\left| \\cos \\theta_ {\\mathrm {i}} \\right|}{p \\left(\\omega_ {\\mathrm {i}} \\mid \\mathbf {x} , \\omega_ {\\mathrm {o}}\\right)}, \\tag {2}", + "image_path": "ed320d36144b5356fcf647125f5d2db6245585542c764e5086e388579fd22675.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "spans": [ + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "\\langle L_{\\mathrm{r}}(\\mathbf{x},\\omega_0)\\rangle" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": " is an unbiased estimate of the outgoing radiance " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{r}}(\\mathbf{x},\\omega_0)" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "\\omega_{i}" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": " is the incident direction sampled with some directional probability distribution " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "p(\\omega_{\\mathrm{i}}\\mid \\mathbf{x},\\omega_{\\mathrm{o}})" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": ". The variance of this estimator " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "V[\\langle L_{\\mathrm{r}}\\rangle ]" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": " can be reduced if the sampling distribution resembles the shape of the integrand, and could even reach zero variance if being proportional to it (i.e., " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "p\\propto f_s\\cdot L_i\\cos \\theta_i" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": "). This, however, is difficult to achieve with only BSDF importance sampling, leaving the remaining part of the integrand (i.e., the incident radiance) unknown, resulting in a relatively high variance of the MC estimator. Path guiding algorithms, on the other hand, manage to obtain better importance sampling strategies often by using previous radiance samples to approximate the incident radiance " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{i}}" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": " or the full integrand " + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "inline_equation", + "content": "f_{s}\\cdot L_{\\mathrm{i}}\\cos \\theta_{i}" + }, + { + "bbox": [ + 314, + 273, + 561, + 416 + ], + "type": "text", + "content": ", which will be discussed later." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 421, + 561, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 421, + 561, + 443 + ], + "spans": [ + { + "bbox": [ + 314, + 421, + 561, + 443 + ], + "type": "text", + "content": "Von Mises-Fisher Mixtures. We use the von Mises-Fisher (vMF) distribution as the basis of NPM. The vMF distribution is defined as:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 370, + 445, + 561, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 445, + 561, + 464 + ], + "spans": [ + { + "bbox": [ + 370, + 445, + 561, + 464 + ], + "type": "interline_equation", + "content": "v (\\omega \\mid \\mu , \\kappa) = \\frac {\\kappa}{4 \\pi \\sinh \\kappa} \\exp \\left(\\kappa \\mu^ {T} \\omega\\right), \\tag {3}", + "image_path": "986c320f1e4cfbcb1abe6476fb55b4d185b09327f49c6d2c948d2cd40e7b2fb4.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "spans": [ + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "inline_equation", + "content": "\\mu \\in \\mathbb{S}^2" + }, + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "inline_equation", + "content": "\\kappa \\in [0, +\\infty)" + }, + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "text", + "content": " defines the direction and precision (sharpness) of the vMF distribution. The vMF mixture model (VMM) is thus a convex combination of " + }, + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 314, + 468, + 561, + 502 + ], + "type": "text", + "content": " vMF components/lobes:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 375, + 506, + 561, + 535 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 506, + 561, + 535 + ], + "spans": [ + { + "bbox": [ + 375, + 506, + 561, + 535 + ], + "type": "interline_equation", + "content": "\\mathcal {V} (\\omega \\mid \\Theta) = \\sum_ {i = 1} ^ {K} \\lambda_ {i} \\cdot v \\left(\\omega \\mid \\mu_ {i}, \\kappa_ {i}\\right), \\tag {4}", + "image_path": "bb3e8c4f8883ed1a419dd0791f442f644ccdbf4d0d08c9d0fee74d1545d4ee4c.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "spans": [ + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "text", + "content": " contains the parameters " + }, + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "inline_equation", + "content": "(\\mu_i,\\kappa_i)" + }, + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "text", + "content": " and weights " + }, + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "inline_equation", + "content": "(\\lambda_{i})" + }, + { + "bbox": [ + 314, + 539, + 561, + 593 + ], + "type": "text", + "content": " of each vMF component. The vMF mixtures have many desirable properties, e.g., fewer parameters (4 floats per component), efficient importance sampling, and closed-form product and integration, which together constitute the reason for choosing it as the basis of NPM." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 594, + 561, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 594, + 561, + 649 + ], + "spans": [ + { + "bbox": [ + 314, + 594, + 561, + 649 + ], + "type": "text", + "content": "Our key is to encode the vMF mixtures with our implicit neural representation, then decode them with lightweight MLPs, and train them to effectively model the target distributions for path guiding algorithms. Other parametric basis functions (e.g., Gaussian mixtures) could be integrated into our method using a similar paradigm." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 657, + 480, + 668 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 657, + 480, + 668 + ], + "spans": [ + { + "bbox": [ + 315, + 657, + 480, + 668 + ], + "type": "text", + "content": "4 NEURAL PARAMETRIC MIXTURES" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 671, + 563, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 671, + 563, + 694 + ], + "spans": [ + { + "bbox": [ + 314, + 671, + 563, + 694 + ], + "type": "text", + "content": "In this section, we present our Neural Parametric Mixtures (NPM) technique for local path guiding. We first show how to encode/decode" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "spans": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "spans": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "text", + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "spans": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 79, + 296, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 79, + 296, + 157 + ], + "spans": [ + { + "bbox": [ + 48, + 79, + 296, + 157 + ], + "type": "text", + "content": "target distributions with NPM in a simple setup (i.e., learning incident radiance fields, Sec. 4.1), then we derive the optimization method for NPM based on minibatch stochastic gradient descent (Sec. 4.2). Finally, we show how our NPM could naturally benefit from learning the full integrand (to account for the BSDF term), as well as the other extensions for better learning target distributions (Sec. 4.3). An overview of our method is illustrated in Fig. 2." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 167, + 162, + 177 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 167, + 162, + 177 + ], + "spans": [ + { + "bbox": [ + 48, + 167, + 162, + 177 + ], + "type": "text", + "content": "4.1 Radiance-based NPM" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 180, + 294, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 180, + 294, + 247 + ], + "spans": [ + { + "bbox": [ + 48, + 180, + 294, + 247 + ], + "type": "text", + "content": "In order to acquire a better importance sampling strategy, we should obtain an approximation of the incident radiance distribution using previous radiance estimates, known as the radiance-based local path guiding [Herholz et al. 2016; Rath et al. 2020]. Specifically, we want to use the vMF mixtures to be approximately proportional to the incident radiance, at a given shading position " + }, + { + "bbox": [ + 48, + 180, + 294, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 180, + 294, + 247 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 255, + 294, + 266 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 255, + 294, + 266 + ], + "spans": [ + { + "bbox": [ + 121, + 255, + 294, + 266 + ], + "type": "interline_equation", + "content": "\\mathcal {V} (\\omega_ {i} \\mid \\Theta (\\mathbf {x})) \\propto L _ {\\mathrm {i}} (\\mathbf {x}, \\omega_ {i}), \\tag {5}", + "image_path": "114f3498be4d0dc6542f3f5af73eb485cc7817dc6f779e6be1392b8d55dcfbea.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 270, + 293, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 270, + 293, + 336 + ], + "spans": [ + { + "bbox": [ + 48, + 270, + 293, + 336 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 48, + 270, + 293, + 336 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 48, + 270, + 293, + 336 + ], + "type": "text", + "content": " is conditioned on " + }, + { + "bbox": [ + 48, + 270, + 293, + 336 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 270, + 293, + 336 + ], + "type": "text", + "content": " to account for the spatial variation of the target distribution. Previous work achieves this with specific spatial subdivision strategies (e.g., kd-tree, octree). However, this spatial discretization introduces artifacts (e.g., resulting from parallax, Fig. 1 (a)), and is subject to frequent reconstruction to converge to a fine grained spatial subdivision, as discussed in Sec. 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 340, + 294, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 340, + 294, + 418 + ], + "spans": [ + { + "bbox": [ + 48, + 340, + 294, + 418 + ], + "type": "text", + "content": "Instead, we use an implicit neural representation to encode the target distribution compactly. This allows the spatial variation of the distribution to be continuously accounted for, thus better capturing spatio-directional correlations. Technically, given a shading position " + }, + { + "bbox": [ + 48, + 340, + 294, + 418 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 340, + 294, + 418 + ], + "type": "text", + "content": " in the scene, our NPM would output the guiding distribution that approximates the target distribution (Eq. 5). The output guiding distribution is defined using a set of parameters " + }, + { + "bbox": [ + 48, + 340, + 294, + 418 + ], + "type": "inline_equation", + "content": "\\hat{\\Theta}(\\mathbf{x})" + }, + { + "bbox": [ + 48, + 340, + 294, + 418 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 423, + 294, + 435 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 423, + 294, + 435 + ], + "spans": [ + { + "bbox": [ + 132, + 423, + 294, + 435 + ], + "type": "interline_equation", + "content": "\\mathrm {N P M} (\\mathbf {x} \\mid \\Phi) = \\hat {\\Theta} (\\mathbf {x}), \\tag {6}", + "image_path": "a8cf85955cee9b1cabbf151d19f4dd028a439f2aa721b530832c8cf383f8faac.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "spans": [ + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " are the trainable parameters of the implicit representation, and " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\hat{\\Theta}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " are the output decoded parameters, defining a vMF mixture " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\mathcal{V}(\\omega_i\\mid \\hat{\\Theta} (\\mathbf{x}))" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " that is trained to approximate " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "L_{i}(\\mathbf{x},\\omega_{i})" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " (Eq. 5). By continuously conditioning the learned distribution " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " on spatial positions " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": ", our method inherently avoids the above issues caused by spatial discretizations. We achieve the above mapping by using a lightweight network to decode this parametric distribution from the implicit neural representation. To make sure that we get a valid vMF mixture (i.e., " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\lambda_{i},\\kappa_{i} > 0,\\mu_{i}\\in \\mathbb{S}^{2}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\sum_{j = 1}^{K}\\lambda_{j} = 1" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": "), we must additionally regularize the raw network output with appropriate mapping functions (see Tab. 1). Specifically, we apply exponential activation to " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\lambda_{i}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\kappa_{i}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": ". Logistic activation is applied to " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\theta_{i}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\varphi_{i}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " which form the spherical coordinates of " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\mu_{i}" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": ". Most importantly, we apply the softmax function to all " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\lambda s" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": " to ensure that the outputs model a valid PDF (i.e., satisfy " + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "inline_equation", + "content": "\\sum_{i = 1}^{K}\\lambda_{i} = 1" + }, + { + "bbox": [ + 47, + 439, + 293, + 607 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 616, + 294, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 616, + 294, + 694 + ], + "spans": [ + { + "bbox": [ + 47, + 616, + 294, + 694 + ], + "type": "text", + "content": "Discussion. It is possible to implement different forms of implicit neural representation with trainable parameters " + }, + { + "bbox": [ + 47, + 616, + 294, + 694 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 47, + 616, + 294, + 694 + ], + "type": "text", + "content": ". While it is straightforward to use a monolithic network to model " + }, + { + "bbox": [ + 47, + 616, + 294, + 694 + ], + "type": "inline_equation", + "content": "\\mathrm{NPM}_{\\Phi} : \\mathbf{x} \\rightarrow \\Theta" + }, + { + "bbox": [ + 47, + 616, + 294, + 694 + ], + "type": "text", + "content": ", we find it difficult to fit the high-frequency variations of the target distribution. Thereby, we use a trainable multi-resolution spatial embedding for encoding the distributions, and additionally a lightweight neural network for decoding the parameters. This is" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 317, + 76, + 560, + 178 + ], + "blocks": [ + { + "bbox": [ + 317, + 76, + 560, + 178 + ], + "lines": [ + { + "bbox": [ + 317, + 76, + 560, + 178 + ], + "spans": [ + { + "bbox": [ + 317, + 76, + 560, + 178 + ], + "type": "image", + "image_path": "38e332d26df913a4036d6a6bb054d9f1724ec4e8b74fdf0ec84b6036b6735101.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "lines": [ + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "spans": [ + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "text", + "content": "Fig. 1. Parallax issue caused by spatial discretizations (a). For a subdivided volume " + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "inline_equation", + "content": "S(\\mathbf{x})" + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "text", + "content": " in (a), the guiding distribution is marginalized with training samples scattered over the volume " + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "inline_equation", + "content": "S(\\mathbf{x})" + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "text", + "content": ", and is shared by different positions (e.g., " + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_1" + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_2" + }, + { + "bbox": [ + 314, + 186, + 561, + 247 + ], + "type": "text", + "content": "). Our method will not suffer from parallax due to NPM implicitly representing a monolithic function, continuously mapping from spatial positions to parametric guiding distributions, as shown in (b)." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 272, + 560, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 272, + 560, + 294 + ], + "spans": [ + { + "bbox": [ + 314, + 272, + 560, + 294 + ], + "type": "text", + "content": "crucial for our method to achieve better modeling capacity while remaining performant, as will be discussed later." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 305, + 410, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 305, + 410, + 316 + ], + "spans": [ + { + "bbox": [ + 315, + 305, + 410, + 316 + ], + "type": "text", + "content": "4.2 Optimizing NPM" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "text", + "content": "We show how to optimize the divergence between the decoded distribution " + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "inline_equation", + "content": "\\hat{\\Theta}(\\mathbf{x})" + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "text", + "content": " and the target distribution using minibatch stochastic gradient descent. To achieve this, the gradients of a training objective (or loss function) with respect to the network parameters are necessary. However, it is non-trivial to define such a loss function, given the ground truth output parameters " + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "inline_equation", + "content": "\\Theta_{\\mathrm{gt}}(\\mathbf{x})" + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "text", + "content": " are unknown. Previous works typically use design optimization algorithms (e.g., expectation-maximization) that iteratively use batches of samples to fit a given set of parameters " + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "text", + "content": ", which often parameterize a marginalized distribution shared by the spatial region covering the samples [Herholz et al. 2016; Ruppert et al. 2020]. However, their methods are applied to explicitly parameterized models, and are therefore not applicable to our method, which models the implicit representation of the function " + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "inline_equation", + "content": "\\mathbf{NPM}_{\\Phi}: \\mathbf{x} \\rightarrow \\hat{\\Theta}" + }, + { + "bbox": [ + 313, + 319, + 561, + 472 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "spans": [ + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "text", + "content": "We minimize the KL divergence between the decoded vMF mixtures and the target distribution via minibatch stochastic gradient descent, where its gradients with respect to the trainable parameters are estimated using Monte Carlo integration. Other divergence metrics are also available following a similar derivation. Let us start by assuming that the shading position " + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "text", + "content": " is fixed, thus omitting the dependency of " + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "text", + "content": " in the equations. For a given position, the KL divergence between the target distribution " + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 313, + 473, + 561, + 561 + ], + "type": "text", + "content": " and our output" + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 328, + 633, + 548, + 692 + ], + "blocks": [ + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "lines": [ + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "spans": [ + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "text", + "content": "Table 1. Detailed mapping functions we use to regularize network outputs, where " + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "inline_equation", + "content": "\\lambda^{\\prime}" + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "inline_equation", + "content": "\\kappa^{\\prime}" + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "inline_equation", + "content": "\\theta^{\\prime}" + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "inline_equation", + "content": "\\varphi^{\\prime}" + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "text", + "content": " denote the raw outputs, and " + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "inline_equation", + "content": "(\\theta, \\varphi)" + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "text", + "content": " is the normalized spherical coordinate of " + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "inline_equation", + "content": "\\mu \\in \\mathbb{S}^2" + }, + { + "bbox": [ + 314, + 582, + 561, + 624 + ], + "type": "text", + "content": ". Left: parameter notations and their valid ranges; middle: type of activation; right: specific mappings." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 328, + 633, + 548, + 692 + ], + "lines": [ + { + "bbox": [ + 328, + 633, + 548, + 692 + ], + "spans": [ + { + "bbox": [ + 328, + 633, + 548, + 692 + ], + "type": "table", + "html": "
ParameterActivationMapping
κ ∈ [0,+∞)Exponentialκi = exp(κi')
λ ∈ [0,+∞)Softmaxλi = exp(λi') / ∑j=1K exp(λj')
θ, φ ∈ [0,1]Logisticθi = 1/(1 + exp(-θi'))
", + "image_path": "3d837b6d0b31d48d6b2f3a3a0868f883d039f51de2c3b76dc345e1b7d59a75e7.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "spans": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "type": "text", + "content": "Neural Parametric Mixtures for Path Guiding" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 54, + 561, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 54, + 561, + 62 + ], + "spans": [ + { + "bbox": [ + 547, + 54, + 561, + 62 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "spans": [ + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 77, + 560, + 199 + ], + "blocks": [ + { + "bbox": [ + 50, + 77, + 560, + 199 + ], + "lines": [ + { + "bbox": [ + 50, + 77, + 560, + 199 + ], + "spans": [ + { + "bbox": [ + 50, + 77, + 560, + 199 + ], + "type": "image", + "image_path": "012d7d72f38c99a2d6e3ef901578141684e879c1eaa6c4a2513ae3c7899fdb0e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "lines": [ + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "spans": [ + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": "Fig. 2. High-level illustration of our Neural Parametric Mixtures (NPM). We implicitly encode the spatially varying target distributions with the multi-resolution embedding. When the distribution of a spatial location " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": " is queried, (1) the features assigned to the nearby grid points surrounding " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": " are interpolated at each level, and concatenated with other levels to obtain the spatial embedding " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "G(\\mathbf{x})" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": ". (2) the spatial embedding is then combined with other inputs to (3) feed into the lightweight MLP for (4) decoding the parameters " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": " of the vMF mixture " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{V}(\\omega_i \\mid \\Theta)" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": " components. We then (5) use this parametric distribution for importance sampling the scattering direction. The result MC radiance estimate " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "\\langle L_i(\\mathbf{x}, \\omega_i) \\rangle" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": " is used to estimate the training gradient " + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "inline_equation", + "content": "\\nabla_{\\Theta} D_{\\mathrm{KL}}" + }, + { + "bbox": [ + 48, + 206, + 561, + 266 + ], + "type": "text", + "content": " (Sec. 4.2), which is then back-propagated through these differentiable stages to optimize our NPM representation (dashed lines)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 283, + 156, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 283, + 156, + 293 + ], + "spans": [ + { + "bbox": [ + 48, + 283, + 156, + 293 + ], + "type": "text", + "content": "distribution " + }, + { + "bbox": [ + 48, + 283, + 156, + 293 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 48, + 283, + 156, + 293 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 87, + 299, + 294, + 323 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 299, + 294, + 323 + ], + "spans": [ + { + "bbox": [ + 87, + 299, + 294, + 323 + ], + "type": "interline_equation", + "content": "D _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) = \\int_ {\\Omega} \\mathcal {D} (\\omega) \\log \\frac {\\mathcal {D} (\\omega)}{\\mathcal {V} (\\omega | \\hat {\\Theta})} \\mathrm {d} \\omega , \\tag {7}", + "image_path": "336d47376ee5000dcbf36c6bc39487599aaa992dc6d6b3207ccf8dad24c840a5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 329, + 296, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 329, + 296, + 352 + ], + "spans": [ + { + "bbox": [ + 48, + 329, + 296, + 352 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 48, + 329, + 296, + 352 + ], + "type": "inline_equation", + "content": "\\mathcal{D} \\propto L_{\\mathrm{i}}" + }, + { + "bbox": [ + 48, + 329, + 296, + 352 + ], + "type": "text", + "content": " in radiance-based path guiding. This integral could now be estimated with the Monte Carlo estimator with " + }, + { + "bbox": [ + 48, + 329, + 296, + 352 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 48, + 329, + 296, + 352 + ], + "type": "text", + "content": " samples:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 359, + 294, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 359, + 294, + 389 + ], + "spans": [ + { + "bbox": [ + 77, + 359, + 294, + 389 + ], + "type": "interline_equation", + "content": "D _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) \\approx \\frac {1}{N} \\sum_ {j = 1} ^ {N} \\frac {\\mathcal {D} (\\omega_ {j})}{\\tilde {p} (\\omega_ {j} \\mid \\hat {\\Theta})} \\log \\frac {\\mathcal {D} (\\omega_ {j})}{\\mathcal {V} (\\omega_ {j} \\mid \\hat {\\Theta})}, \\tag {8}", + "image_path": "70557cae169bc7f6f85ff903873133db7e1d46307a229ddb566e0a499f5c8651.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "spans": [ + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "inline_equation", + "content": "\\tilde{p}" + }, + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "text", + "content": " is the distribution from which the samples are drawn, which in our case is a combination of the BSDF importance sampling and guiding distribution. By taking its derivative with respect to " + }, + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "text", + "content": ", we obtain the MC estimate of the gradient " + }, + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "inline_equation", + "content": "\\nabla_{\\Theta}D_{\\mathrm{KL}}(\\mathcal{D}\\| \\mathcal{V};\\Theta)" + }, + { + "bbox": [ + 48, + 396, + 294, + 441 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 76, + 448, + 294, + 479 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 448, + 294, + 479 + ], + "spans": [ + { + "bbox": [ + 76, + 448, + 294, + 479 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\Theta} D _ {\\mathrm {K L}} (\\mathcal {D} \\| \\mathcal {V}; \\Theta) \\approx - \\frac {1}{N} \\sum_ {j = 1} ^ {N} \\frac {\\mathcal {D} \\left(\\omega_ {j}\\right) \\nabla_ {\\Theta} \\mathcal {V} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right)}{\\tilde {p} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right) \\mathcal {V} \\left(\\omega_ {j} \\mid \\hat {\\Theta}\\right)}, \\tag {9}", + "image_path": "06c9af7395206a5ca355fde269d48d174d47438dd29d458f5a62c727d51e3ab4.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "spans": [ + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "text", + "content": "where the derivatives of the vMF mixtures " + }, + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "text", + "content": " with respect to their parameters " + }, + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "text", + "content": " are straightforward. The gradients for the trainable NPM parameters " + }, + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 48, + 485, + 294, + 539 + ], + "type": "text", + "content": " could then be obtained via back propagation. Since we use the unbiased MC estimate of the training gradients, the parameters are guaranteed to converge to a local minimum." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "spans": [ + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "text", + "content": "In practice, our training sample pairs " + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "inline_equation", + "content": "(\\mathbf{x},\\omega_{i})\\rightarrow L_{\\mathrm{i}}" + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "text", + "content": " are distributed in different spatial positions " + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "text", + "content": ", efficiently learning a spatially varying target distribution " + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "inline_equation", + "content": "\\mathcal{D}(\\mathbf{x})" + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "text", + "content": ". This results in the training objective accounting for the divergence of multiple positions. The expected solution for " + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 48, + 540, + 294, + 594 + ], + "type": "text", + "content": " is thus:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 91, + 599, + 294, + 621 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 599, + 294, + 621 + ], + "spans": [ + { + "bbox": [ + 91, + 599, + 294, + 621 + ], + "type": "interline_equation", + "content": "\\Phi^ {*} = \\underset {\\Phi} {\\arg \\min } \\mathbb {E} _ {\\mathbf {x}} \\left[ D _ {\\mathrm {K L}} \\left(\\mathcal {D} (\\mathbf {x}) \\| \\mathcal {V}; \\Theta (\\mathbf {x})\\right) \\right]. \\tag {10}", + "image_path": "440acf2b5615533f8d43abb25281699d9d75db99cadf0d5cfb45fa8a9e6d8ae0.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 48, + 628, + 294, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 628, + 294, + 694 + ], + "spans": [ + { + "bbox": [ + 48, + 628, + 294, + 694 + ], + "type": "text", + "content": "For our implicit spatial embedding (i.e., grids of latent features, discussed later), this results in the embedding being optimized with all (and only) its nearby samples. When using the gradient descent method, the samples with the largest gradients (i.e., the most important ones for reducing divergence) would dominate, forming a reasonable design choice for better adaptivity." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 283, + 437, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 283, + 437, + 295 + ], + "spans": [ + { + "bbox": [ + 315, + 283, + 437, + 295 + ], + "type": "text", + "content": "4.3 Full Integrand Learning" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "text", + "content": "Using path guiding to sample the full integrand " + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "inline_equation", + "content": "f_{s} \\cdot L_{i} \\cos \\theta_{i}" + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "text", + "content": " can achieve even better performance, which should incorporate the BSDF term and the cosine term into the target distribution. This is challenging since the guiding distribution is now conditioned on 5D inputs (i.e., outgoing direction " + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "inline_equation", + "content": "\\omega_{0}" + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "text", + "content": " and spatial coordinate " + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "text", + "content": "). Previous works fit BSDFs with precomputed parametric models and multiply them with the learned incident radiance distribution to achieve product sampling. However, this often relies on scene-dependent precomputation, discretization over " + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "inline_equation", + "content": "\\omega_{0}" + }, + { + "bbox": [ + 313, + 297, + 561, + 407 + ], + "type": "text", + "content": ", and extra computational overhead [Herholz et al. 2016; Ruppert et al. 2020]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 407, + 561, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 407, + 561, + 483 + ], + "spans": [ + { + "bbox": [ + 314, + 407, + 561, + 483 + ], + "type": "text", + "content": "Our neural design can naturally handle the conditions with the extra input of " + }, + { + "bbox": [ + 314, + 407, + 561, + 483 + ], + "type": "inline_equation", + "content": "\\omega_{i}" + }, + { + "bbox": [ + 314, + 407, + 561, + 483 + ], + "type": "text", + "content": ". This is essential since a neural network could approximate arbitrary conditional models if being expressive enough. We later show this improves performance through learning a better guiding distribution, with only modest performance overhead. For clarity, we denote the previous radiance-based method as NPM-radiance, and this version as NPM-product." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 483, + 565, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 483, + 565, + 516 + ], + "spans": [ + { + "bbox": [ + 314, + 483, + 565, + 516 + ], + "type": "text", + "content": "Specifically, by supplementing input " + }, + { + "bbox": [ + 314, + 483, + 565, + 516 + ], + "type": "inline_equation", + "content": "\\omega_{0}" + }, + { + "bbox": [ + 314, + 483, + 565, + 516 + ], + "type": "text", + "content": ", we reformulate the learned distribution (Eq. 6) with the outgoing directions. This enables learning the full integrated as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 373, + 521, + 561, + 534 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 521, + 561, + 534 + ], + "spans": [ + { + "bbox": [ + 373, + 521, + 561, + 534 + ], + "type": "interline_equation", + "content": "\\mathrm {N P M} _ {\\text {p r o d u c t}} (\\mathbf {x}, \\omega_ {\\mathrm {o}} \\mid \\Phi) = \\hat {\\Theta} (\\mathbf {x}, \\omega_ {\\mathrm {o}}), \\tag {11}", + "image_path": "13804e2693ba41499fff7a99503e9702a3c96d40a68e57a29c67b9e74ebe2169.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 540, + 560, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 540, + 560, + 563 + ], + "spans": [ + { + "bbox": [ + 314, + 540, + 560, + 563 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 540, + 560, + 563 + ], + "type": "inline_equation", + "content": "\\hat{\\Theta}" + }, + { + "bbox": [ + 314, + 540, + 560, + 563 + ], + "type": "text", + "content": " now parameterizes the vMF mixture " + }, + { + "bbox": [ + 314, + 540, + 560, + 563 + ], + "type": "inline_equation", + "content": "\\mathcal{V}" + }, + { + "bbox": [ + 314, + 540, + 560, + 563 + ], + "type": "text", + "content": " that is trained to approximate the full integrand in Eq. 1, i.e.," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 343, + 567, + 561, + 585 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 567, + 561, + 585 + ], + "spans": [ + { + "bbox": [ + 343, + 567, + 561, + 585 + ], + "type": "interline_equation", + "content": "\\left. \\mathcal {V} \\left(\\omega_ {i} \\mid \\hat {\\Theta} (\\mathbf {x}, \\omega_ {0})\\right) \\propto f _ {\\mathrm {s}} \\left(\\mathbf {x}, \\omega_ {0}, \\omega_ {\\mathrm {i}}\\right) L _ {\\mathrm {i}} \\left(\\mathbf {x}, \\omega_ {\\mathrm {i}}\\right) \\left| \\cos \\theta_ {\\mathrm {i}} \\right|, \\right. \\tag {12}", + "image_path": "b19615e5a3ede50f569434550e990b805b884c04382bfcf3f74269025fb14c49.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 588, + 561, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 588, + 561, + 655 + ], + "spans": [ + { + "bbox": [ + 313, + 588, + 561, + 655 + ], + "type": "text", + "content": "where the cosine term could be approximated with a constant vMF lobe [Ruppert et al. 2020], leaving NPM to focus on the remaining part of the integral. Nonetheless, it is still challenging for neural networks to model a 2D directional distribution conditioned on 5D spatio-directional inputs. We further use the following simple extensions to help the network learn these spatially varying distributions:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 661, + 561, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 661, + 561, + 694 + ], + "spans": [ + { + "bbox": [ + 314, + 661, + 561, + 694 + ], + "type": "text", + "content": "Auxiliary Feature Inputs. Following the practices in prior work [Hadadan et al. 2021; Müller et al. 2021], we additionally input the surface normal and roughness as auxiliary features to help" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "spans": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "spans": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "text", + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "spans": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 79, + 295, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 79, + 295, + 135 + ], + "spans": [ + { + "bbox": [ + 48, + 79, + 295, + 135 + ], + "type": "text", + "content": "the network better correlate the target distribution with e.g., local shading frame (normal) and spatially varying BSDFs (roughness). Experimentally, we find this helps the network to better capture the spatio-directional correlations, while with a small computational overhead due to additional memory traffic." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "spans": [ + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "text", + "content": "Input Encoding. It is challenging for a neural network to model the non-linearity between multidimensional inputs and outputs, especially when our outputs are distributions with high-frequency spatial variations. Therefore, we replace the spatial input " + }, + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "text", + "content": " with our trainable multi-resolution spatial embedding (discussed in Sec. 5.1). For the other inputs (e.g., outgoing direction " + }, + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "inline_equation", + "content": "\\omega_{0}" + }, + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "text", + "content": " and surface normals " + }, + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "inline_equation", + "content": "\\mathbf{n}(\\mathbf{x})" + }, + { + "bbox": [ + 48, + 141, + 295, + 229 + ], + "type": "text", + "content": "), we encode them using the spherical harmonics basis, which is previously established in NeRF [Verbin et al. 2022]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 239, + 149, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 239, + 149, + 251 + ], + "spans": [ + { + "bbox": [ + 49, + 239, + 149, + 251 + ], + "type": "text", + "content": "5 IMPLEMENTATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 254, + 294, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 254, + 294, + 277 + ], + "spans": [ + { + "bbox": [ + 48, + 254, + 294, + 277 + ], + "type": "text", + "content": "In this section, we provide the technical details that are crucial to the performance and practicality of our NPM implementation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 286, + 220, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 286, + 220, + 298 + ], + "spans": [ + { + "bbox": [ + 49, + 286, + 220, + 298 + ], + "type": "text", + "content": "5.1 Multi-resolution Spatial Embedding" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "spans": [ + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "text", + "content": "Our implicit NPM representation learns a continuous mapping " + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "inline_equation", + "content": "\\mathrm{NPM}_{\\Phi}:\\mathbf{x}\\rightarrow \\hat{\\Theta}" + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "text", + "content": " (with the additional input " + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "inline_equation", + "content": "\\omega_{0}\\in \\mathbb{S}^{2}" + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "text", + "content": " in the extended version), where " + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "inline_equation", + "content": "\\Theta \\in \\mathbb{R}^{4\\times K}" + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "text", + "content": " defines the learned target distribution. While a straightforward solution would be using a multi-layer perceptron (MLP) as the universal function approximator to model " + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "inline_equation", + "content": "\\mathrm{NPM}_{\\Phi}" + }, + { + "bbox": [ + 48, + 300, + 295, + 377 + ], + "type": "text", + "content": ", we experimentally found it difficult to capture the high-frequency spatial variations of the target distributions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "spans": [ + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": "Therefore, we use a learnable spatial embedding to implicitly encode the learned parametric mixtures. Similar approaches are found successful in recent NeRF-like applications [Müller et al. 2022; Munkberg et al. 2022]. Specifically, we define " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": " 3D uniform grids " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "G_{l}" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": ", each covering the entire scene with a spatial resolution of " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "D_l^3" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "G_{l}" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": "-th embedding grid. " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "D_{l}" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": " grows exponentially, resulting in multiple resolutions of the embedding. We then assign a learnable embedding (a latent feature vector " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "\\boldsymbol{v} \\in \\mathbb{R}^{F}" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": ") to each lattice point of " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "G_{l}" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": ". To query the spatial embedding for " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": ", we bilinearly interpolate the features nearby " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": " for each resolution, and concatenate them to obtain the final embedding " + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "inline_equation", + "content": "G(\\mathbf{x})" + }, + { + "bbox": [ + 48, + 378, + 295, + 499 + ], + "type": "text", + "content": ". More formally:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 76, + 505, + 294, + 527 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 505, + 294, + 527 + ], + "spans": [ + { + "bbox": [ + 76, + 505, + 294, + 527 + ], + "type": "interline_equation", + "content": "G (\\mathbf {x} \\mid \\Phi_ {\\mathrm {E}}) = \\underset {l = 1} {\\overset {L} {\\oplus}} \\operatorname {b i l i n e a r} \\left(\\mathbf {x}, V _ {l} [ \\mathbf {x} ]\\right), G: \\mathbb {R} ^ {3} \\rightarrow \\mathbb {R} ^ {L \\times F}, \\tag {13}", + "image_path": "8e2773c3b9f41b1d1f0a888ea191830ca3880040a24fbbf6b2bba09bdf4a08d2.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "spans": [ + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "inline_equation", + "content": "V_{l}[\\mathbf{x}]" + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "content": " is the set of features at the eight corners of the cell enclosing " + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "content": " within " + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "inline_equation", + "content": "G_{l}" + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "content": ". The spatial embedding " + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "inline_equation", + "content": "G(\\mathbf{x})" + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "content": " is then concatenated with other inputs (e.g., " + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "inline_equation", + "content": "\\omega_0" + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "content": " and auxiliary features) to the MLP for decoding the parameters " + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 48, + 532, + 295, + 588 + ], + "type": "text", + "content": ". We thus formulate the desired mapping (taking Eq. 6 for example) as a two-step procedure:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 594, + 294, + 611 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 594, + 294, + 611 + ], + "spans": [ + { + "bbox": [ + 114, + 594, + 294, + 611 + ], + "type": "interline_equation", + "content": "\\mathbf {M L P} \\left(G (\\mathbf {x} \\mid \\Phi_ {\\mathrm {E}}) \\mid \\Phi_ {\\mathrm {M}}\\right) = \\hat {\\Theta} (\\mathbf {x}), \\tag {14}", + "image_path": "dd07904043260f0a009dc49ec665e10ee10b78a2c27644e203ca8e7682aa49a3.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "text", + "content": "where the parameters of the spatial embedding " + }, + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "inline_equation", + "content": "(\\Phi_{\\mathrm{E}})" + }, + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "text", + "content": " and the MLP " + }, + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "inline_equation", + "content": "(\\Phi_{\\mathrm{M}})" + }, + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "text", + "content": " together constitute the trainable parameters " + }, + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "inline_equation", + "content": "\\Phi" + }, + { + "bbox": [ + 48, + 616, + 295, + 694 + ], + "type": "text", + "content": " of our implicit representation for NPM. Intuitively, a spatial embedding implicitly encodes the target distribution within a specific spatial region, while the multi-resolution design efficiently accounts for different levels of detail (LOD). By smoothly interpolating between the spatial embedding around positions and decoding them using neural networks," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 79, + 561, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 79, + 561, + 135 + ], + "spans": [ + { + "bbox": [ + 314, + 79, + 561, + 135 + ], + "type": "text", + "content": "we naturally account for the spatial variations of the target distribution. This also lessens the burden of using a single monolithic MLP as the implicit representation, leaving it mainly focusing on decoding it into parametric models " + }, + { + "bbox": [ + 314, + 79, + 561, + 135 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 314, + 79, + 561, + 135 + ], + "type": "text", + "content": ". This significantly accelerates training/inference with a larger memory footprint." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 144, + 440, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 144, + 440, + 156 + ], + "spans": [ + { + "bbox": [ + 315, + 144, + 440, + 156 + ], + "type": "text", + "content": "5.2 Online Training Scheme" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 158, + 561, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 158, + 561, + 290 + ], + "spans": [ + { + "bbox": [ + 314, + 158, + 561, + 290 + ], + "type": "text", + "content": "**Renderer Integration.** We implement our method on a custom GPU-accelerated renderer based on OptiX [Parker et al. 2010], where the training and inference procedures are integrated into a wavefront-style path tracer [Laine et al. 2013]. This design choice allows ray casting, importance sampling, and BSDF evaluation to be performed in coherent chunks over large sets of traced paths by splitting the traditional megakernel path tracer into multiple specialized kernels. This improves GPU thread utilization by reducing the control flow divergence. Most importantly, this allows us to efficiently sample and evaluate the guiding distributions at each vertex along the path in parallel, thus significantly accelerating network training/inference." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 290, + 561, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 290, + 561, + 400 + ], + "spans": [ + { + "bbox": [ + 314, + 290, + 561, + 400 + ], + "type": "text", + "content": "Specifically, we place the training/inference samples into queues, where the structure-of-arrays (SoA) memory layout is applied to improve memory locality. At each ray intersection of the chunk of traced paths, the queries for guiding distributions within the queue are processed via batched network inference. The sampling and evaluation procedures are then performed, also using specialized kernels, before entering the next ray-cast kernel. This provides our method with maximum parallelism through large-batch training and inference, minimizing the latency caused by waiting network queries, while avoiding inefficient single-sample inference." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 407, + 561, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 407, + 561, + 561 + ], + "spans": [ + { + "bbox": [ + 314, + 407, + 561, + 561 + ], + "type": "text", + "content": "Training Scheme. We use the same configuration to train each scene online during rendering, without any scene-specific fine-tuning or pre-computation. During training, we collect MC radiance estimates along each traced path, and split them into mini-batches for training. The optimization step is performed for each spp, which allows drawing samples to be drawn from the latest guiding distribution. The distribution of the samples (for both rendering and training) is thus gets refined as training proceeds. We stop the training process after a fixed fraction of the total rendering budget (either time or sample count). While we always set this to " + }, + { + "bbox": [ + 314, + 407, + 561, + 561 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 314, + 407, + 561, + 561 + ], + "type": "text", + "content": " in our experiments, we find our NPM technique converges quickly during training, generally reaching a local minimum after about 150spp, which amounts to about 1000 training steps/batches and 15s (including the runtimes of both training and rendering) on GPU." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 570, + 411, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 570, + 411, + 582 + ], + "spans": [ + { + "bbox": [ + 315, + 570, + 411, + 582 + ], + "type": "text", + "content": "5.3 Guiding Network" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "spans": [ + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": "We implement our network on the tiny-cuda-nn framework [Müller 2021] and integrate it into our renderer. The MLP we used (for both NPM-radiance and NPM-product) contains 3 linear layers of width 64. Each layer with ReLU activation, except for the last layer with our custom mapping functions (Tab. 1). We let the network output " + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "inline_equation", + "content": "K = 8" + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": " vMF components, i.e., " + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "inline_equation", + "content": "\\Theta \\in \\mathbb{R}^{8 \\times 4}" + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": ". For the multi-resolution spatial embedding, we use " + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "inline_equation", + "content": "L = 8" + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": " grids with increasing resolutions for each level. The coarsest level has a resolution of " + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "inline_equation", + "content": "D_{1} = 8" + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": " while the finest level has " + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "inline_equation", + "content": "D_{8} = 86" + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": ". The feature of each level contains " + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "inline_equation", + "content": "F = 4" + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": " floats, resulting in the final spatial embedding " + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "inline_equation", + "content": "G(\\mathbf{x}) \\in \\mathbb{R}^{8 \\times 4}" + }, + { + "bbox": [ + 314, + 584, + 561, + 694 + ], + "type": "text", + "content": ". In practice," + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "spans": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "type": "text", + "content": "Neural Parametric Mixtures for Path Guiding" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 55, + 561, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 55, + 561, + 62 + ], + "spans": [ + { + "bbox": [ + 552, + 55, + 561, + 62 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 411, + 708, + 561, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 708, + 561, + 717 + ], + "spans": [ + { + "bbox": [ + 411, + 708, + 561, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 75, + 563, + 273 + ], + "blocks": [ + { + "bbox": [ + 50, + 75, + 563, + 273 + ], + "lines": [ + { + "bbox": [ + 50, + 75, + 563, + 273 + ], + "spans": [ + { + "bbox": [ + 50, + 75, + 563, + 273 + ], + "type": "image", + "image_path": "7791fafefea912ff395714d5a429e4ef823d518892e47681632a7d70725cece0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 281, + 560, + 304 + ], + "lines": [ + { + "bbox": [ + 48, + 281, + 560, + 304 + ], + "spans": [ + { + "bbox": [ + 48, + 281, + 560, + 304 + ], + "type": "text", + "content": "Fig. 3. Equal-sample-count (750spp) comparisons for two scenes. We show the error (for both the zoom-in areas and whole images) and time cost of different methods. The yellow plots (as well as the other figures) refer to the results obtained by unidirectional path tracing." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 319, + 294, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 319, + 294, + 352 + ], + "spans": [ + { + "bbox": [ + 48, + 319, + 294, + 352 + ], + "type": "text", + "content": "we find that the performance of the network could be improved by enlarging the capacity of the MLP or the spatial embedding, leaving this a trade-off between quality and speed." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 352, + 294, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 352, + 294, + 440 + ], + "spans": [ + { + "bbox": [ + 48, + 352, + 294, + 440 + ], + "type": "text", + "content": "For training, we use a fixed learning rate of 0.005 that is large enough to acquire a fast convergence speed. Adaptive momentum techniques like Adam [Kingma and Ba 2015] are used for more robust training and better convergence. For importance sampling the decoded mixtures, we use the numerically stable strategy for vMF [Jakob 2012]. When inference, we also apply exponential moving average (EMA) to the weights of previous training steps, which better reduces the noise of the MC estimated gradients (Eq. 9)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 453, + 187, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 453, + 187, + 464 + ], + "spans": [ + { + "bbox": [ + 48, + 453, + 187, + 464 + ], + "type": "text", + "content": "6 RESULTS AND DISCUSSION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 468, + 294, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 468, + 294, + 522 + ], + "spans": [ + { + "bbox": [ + 48, + 468, + 294, + 522 + ], + "type": "text", + "content": "We run all the experiments on an Intel Core i9-11900 CPU and an NVIDIA RTX3070 GPU. Following the similar practices of previous works [Müller 2019; Rath et al. 2020], we disable NEE and Russian roulette for all methods and set the maximum path length to 10. All methods are implemented upon a GPU path tracing renderer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 523, + 294, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 523, + 294, + 567 + ], + "spans": [ + { + "bbox": [ + 48, + 523, + 294, + 567 + ], + "type": "text", + "content": "We render all images at the resolution of " + }, + { + "bbox": [ + 48, + 523, + 294, + 567 + ], + "type": "inline_equation", + "content": "1280 \\times 720" + }, + { + "bbox": [ + 48, + 523, + 294, + 567 + ], + "type": "text", + "content": ", and evaluate image quality using mean relative squared error (relMSE). All the images, additional metrics (MAPE and MRSE), and the false-color maps can be interactively inspected with our supplementary viewer." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 581, + 128, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 581, + 128, + 593 + ], + "spans": [ + { + "bbox": [ + 48, + 581, + 128, + 593 + ], + "type": "text", + "content": "6.1 Comparisons" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 594, + 295, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 594, + 295, + 693 + ], + "spans": [ + { + "bbox": [ + 48, + 594, + 295, + 693 + ], + "type": "text", + "content": "Our method is compared against improved PPG [Müller 2019] (an enhanced version of Practical Path Guiding [Müller et al. 2017]), and Variance-aware Path Guiding [Rath et al. 2020]. For the experimental configuration of the compared methods, we use the same as [Rath et al. 2020], except for fixing the BSDF selection probability to " + }, + { + "bbox": [ + 48, + 594, + 295, + 693 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 48, + 594, + 295, + 693 + ], + "type": "text", + "content": " (for both ours and the compared methods). Both compared methods used an iteratively reconstructed subdivision structure (i.e., the spatio-directional trees) to account for spatial variations. A total of 10 different scenes were tested." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 319, + 561, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 561, + 483 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 561, + 483 + ], + "type": "text", + "content": "We first show equal-spp comparisons on two representative scenes. The VEACH Door scene features strong indirect illumination that is difficult to handle with BSDF importance sampling, while the BATHROOM scene contains many specular and glossy surfaces. As shown in Fig. 3, our proposed method outperforms the other two methods even when only learning incident radiance " + }, + { + "bbox": [ + 313, + 319, + 561, + 483 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{i}}" + }, + { + "bbox": [ + 313, + 319, + 561, + 483 + ], + "type": "text", + "content": " (NPM-radiance). The noise is alleviated further with our full integrand learning method (NPM-product), since both of the scenes contain glossy surfaces, where the contribution of samples is strongly influenced by the BSDF term. We also note that our method quickly becomes effective at the very beginning of the training process (see the convergence plots in Fig. 3). This indicates a better training efficiency over classical guiding methods, which will be discussed later. Additional results on more test scenes are shown in Fig. 4 and Tab. 2, as well as the convergence plots in Fig. 5." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "text", + "content": "We then show the results of equal-time comparisons between our method and [Rath et al. 2020] in Fig. 6. Since they do not explicitly learn the product sampling distribution (i.e., conditioned on 5D inputs " + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "inline_equation", + "content": "\\omega_0" + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "text", + "content": "), we only use our radiance-based method (NPM-radiance) for fair comparisons. Instead of simply learning the incident radiance distribution " + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "inline_equation", + "content": "(L_{\\mathrm{i}})" + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "text", + "content": ", they use an improved target distribution to account for the variance and BSDF (marginalized over " + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "inline_equation", + "content": "\\omega_0" + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "text", + "content": "). Our method, on the other hand, achieves better performance by learning " + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "inline_equation", + "content": "L_{\\mathrm{i}}" + }, + { + "bbox": [ + 313, + 483, + 561, + 605 + ], + "type": "text", + "content": " only. We attribute this superiority of our method to both the better capacity of capturing spatio-directional correlation and more parallelism." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 613, + 384, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 613, + 384, + 624 + ], + "spans": [ + { + "bbox": [ + 315, + 613, + 384, + 624 + ], + "type": "text", + "content": "6.2 Evaluation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 628, + 561, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 628, + 561, + 694 + ], + "spans": [ + { + "bbox": [ + 313, + 628, + 561, + 694 + ], + "type": "text", + "content": "Trainable Spatial Embedding. We analyze the performance of different forms of spatial input encoding in terms of convergence and quality (Fig. 8). The spatial embedding (i.e. parametric encoding) uses trainable latent vector grids to model the spatially-varying target distributions, leaving the MLP to focus on decoding this implicit representation into valid vMF mixtures. The other two variants" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "spans": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "spans": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "text", + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "spans": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 57, + 108, + 553, + 256 + ], + "blocks": [ + { + "bbox": [ + 50, + 78, + 560, + 97 + ], + "lines": [ + { + "bbox": [ + 50, + 78, + 560, + 97 + ], + "spans": [ + { + "bbox": [ + 50, + 78, + 560, + 97 + ], + "type": "text", + "content": "Table 2. Practical Path Guiding (PPG) [Müller 2019], Variance-aware Path Guiding [Rath et al. 2020], unidirectional path tracing and our method on 10 test scenes. We report relMSE, render time, and speedup using PPG as the baseline. Our NPM technique consistently reduces the error in the test scenes." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 57, + 108, + 553, + 256 + ], + "lines": [ + { + "bbox": [ + 57, + 108, + 553, + 256 + ], + "spans": [ + { + "bbox": [ + 57, + 108, + 553, + 256 + ], + "type": "table", + "html": "
PT (BSDF)[Müller 2019][Rath et al. 2020]Ours
PPG (baseline)Variance. PGNPM (radiance)NPM (product)
BATHROOM0.090548s0.05301.0 ×106s0.04851.09 ×107s0.02512.11 ×101s0.02032.61 ×108s
BEDROOM0.038340s0.02011.0 ×105s0.01611.26 ×109s0.01501.35 ×84s0.01461.38 ×90s
BREAKFAST ROOM0.009448s0.00691.0 ×100s0.00471.46 ×103s0.00381.80 ×63s0.00351.96 ×71s
LIVING ROOM0.027332s0.01841.0 ×74s0.01461.26 ×80s0.01571.17 ×47s0.01321.39 ×54s
PINK ROOM0.004637s0.00821.0 ×74s0.00611.34 ×76s0.00332.42 ×53s0.00263.21 ×62s
SALLE DE BAIN0.081938s0.02231.0 ×116s0.03460.64 ×116s0.01961.14 ×79s0.01401.59 ×86s
STAIRCASE0.181234s0.02981.0 ×80s0.02611.14 ×86s0.01941.54 ×72s0.01721.74 ×76s
VEACH DOOR0.620833s0.21671.0 ×82s0.19451.11 ×91s0.07502.89 ×65s0.04614.69 ×77s
VEACH EGG8.291833s0.83791.0 ×82s0.78701.07 ×85s0.59841.40 ×62s0.53521.56 ×69s
WHITE ROOM0.030138s0.02781.0 ×107s0.02531.10 ×103s0.01242.25 ×76s0.01002.75 ×87s
", + "image_path": "040ea9ba4caf98b087626d13b7305651b3dff2f6bf994b937ecc1d79ac7b96c9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 273, + 294, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 273, + 294, + 349 + ], + "spans": [ + { + "bbox": [ + 51, + 273, + 294, + 349 + ], + "type": "text", + "content": "do not explicitly separate these two tasks by using a monolithic MLP. The addition of spatial embedding significantly improves convergence, and the multi-resolution design further reduces error by better modeling finer-grained spatio-directional correlations. Furthermore, this does not introduce noticeable computational overhead, as only a small fraction of parameters are involved in each training/inference." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 356, + 294, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 356, + 294, + 455 + ], + "spans": [ + { + "bbox": [ + 51, + 356, + 294, + 455 + ], + "type": "text", + "content": "Training Efficiency. The effectiveness of guiding methods under small training budgets is important, especially for applications such as preview rendering or even interactive rendering. We analyze the training efficiency of different guiding methods by comparing their performance under different training budgets (31 spp, 63 spp, 127 spp, respectively) in Fig. 7. Our method quickly converges to a good sampling distribution with only a few training samples and less training time cost (e.g., 31 spp with about 3s), thus outperforming previous guiding methods even with much fewer training samples." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 465, + 116, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 465, + 116, + 475 + ], + "spans": [ + { + "bbox": [ + 51, + 465, + 116, + 475 + ], + "type": "text", + "content": "6.3 Discussion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 479, + 294, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 479, + 294, + 577 + ], + "spans": [ + { + "bbox": [ + 51, + 479, + 294, + 577 + ], + "type": "text", + "content": "Path Guiding Extensions. Our method can be extended with many well-established extensions suggested by previous path guiding algorithms. They are straightforward to be integrated and are promising to further improve our performance. For example: (1) the BSDF selection probability could also be learned by our network or by some other caching strategies [Müller et al. 2020], thus better handling the near-specular surfaces; and (2) the improved variance-aware target distribution [Rath et al. 2020] could be learned to account for the variance within the noisy MC estimates." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "spans": [ + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "text", + "content": "Performance Analysis. Our method serves effective means for path guiding while remaining performance practical. Specifically, the measured time cost per NPM evaluation (including both network inference and importance sampling the decoded mixture models) at " + }, + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "inline_equation", + "content": "1280 \\times 720" + }, + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "text", + "content": " is about 3ms. Meanwhile, a training step (i.e., a batch of " + }, + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "inline_equation", + "content": "2^{18}" + }, + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "text", + "content": " samples) costs about 10ms, indicating that a typical training process (about 1000 training steps) takes about 10s to converge on a single GPU. NPM contains a total of about 2M learnable parameters, resulting in a memory consumption of " + }, + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "inline_equation", + "content": "< 10\\mathrm{MB}" + }, + { + "bbox": [ + 51, + 584, + 294, + 693 + ], + "type": "text", + "content": ". The compact design of our implicit NPM representation results in less control" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 273, + 560, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 273, + 560, + 326 + ], + "spans": [ + { + "bbox": [ + 317, + 273, + 560, + 326 + ], + "type": "text", + "content": "flow divergence, better memory locality, and better caching performance. Together, this makes our method practical for modern GPU parallelization, which is often harder to achieve with the tree-like spatial subdivision schemes used by most of the previous guiding methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 338, + 560, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 338, + 560, + 501 + ], + "spans": [ + { + "bbox": [ + 317, + 338, + 560, + 501 + ], + "type": "text", + "content": "Alternative Solutions. Several studies also aim to tackle the parallel issue. Dodik et al. [2022] use spatio-directional mixtures (i.e., conditioned on " + }, + { + "bbox": [ + 317, + 338, + 560, + 501 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 317, + 338, + 560, + 501 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 317, + 338, + 560, + 501 + ], + "type": "inline_equation", + "content": "\\omega_0" + }, + { + "bbox": [ + 317, + 338, + 560, + 501 + ], + "type": "text", + "content": ") to correlate target distributions with spatial positions. Ruppert et al. [2020] design strategies to warp the guiding distributions in the spatial subdivisions to resemble the true distribution. However, these methods adopt sophisticated strategies that are difficult to parallelize efficiently on GPUs (e.g., batched expectation-maximization (EM) applied to a varying number of mixtures) while requiring extra efforts to fit scene BSDFs for product sampling. In contrast, our method exploits trainable spatial embedding to encode the target distributions while using a decoder MLP to model the non-linearity between spatial features and PMMs in a GPU-friendly manner. Nevertheless, incorporating ideas from these studies, such as adaptively controlling the granularity of learned distributions, may further enhance our method." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 515, + 551, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 515, + 551, + 525 + ], + "spans": [ + { + "bbox": [ + 317, + 515, + 551, + 525 + ], + "type": "text", + "content": "7 CONCLUSION, LIMITATIONS AND FUTURE WORK" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 529, + 560, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 529, + 560, + 638 + ], + "spans": [ + { + "bbox": [ + 317, + 529, + 560, + 638 + ], + "type": "text", + "content": "We present Neural Parametric Mixtures, a novel method for learning the target distributions for path guiding techniques. We use a compact implicit neural representation to encode the spatio-directional parametric distributions. Compared to previous non-neural methods that use explicit spatial subdivision structures to store directional distributions, our continuous implicit representation is simpler and more efficient while naturally avoiding the artifacts (e.g., parallax) caused by their discretized subdivision schemes. Our NPM technique could be efficiently trained with stochastic gradient descent to minimize the divergence from the target distribution." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 639, + 560, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 639, + 560, + 693 + ], + "spans": [ + { + "bbox": [ + 317, + 639, + 560, + 693 + ], + "type": "text", + "content": "Despite the simplicity and effectiveness of our method, the main limitation resides in the lack of flexibility of our directional distribution representation, i.e., a fixed number of vMF components. While a similar issue exists in classical methods using PMMs [Dodik et al. 2022; Herholz et al. 2016], recent methods achieve more accurate" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "spans": [ + { + "bbox": [ + 408, + 54, + 541, + 63 + ], + "type": "text", + "content": "Neural Parametric Mixtures for Path Guiding" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 55, + 561, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 55, + 561, + 62 + ], + "spans": [ + { + "bbox": [ + 552, + 55, + 561, + 62 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 413, + 708, + 560, + 716 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 708, + 560, + 716 + ], + "spans": [ + { + "bbox": [ + 413, + 708, + 560, + 716 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 76, + 545, + 635 + ], + "blocks": [ + { + "bbox": [ + 56, + 76, + 545, + 635 + ], + "lines": [ + { + "bbox": [ + 56, + 76, + 545, + 635 + ], + "spans": [ + { + "bbox": [ + 56, + 76, + 545, + 635 + ], + "type": "image", + "image_path": "4de0ae7607601ea031f8b26980ba8e4d0c7d3cc17f48b8484fc9982a6dbb12a4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 646, + 562, + 687 + ], + "lines": [ + { + "bbox": [ + 48, + 646, + 562, + 687 + ], + "spans": [ + { + "bbox": [ + 48, + 646, + 562, + 687 + ], + "type": "text", + "content": "Fig. 4. Visual comparisons using the same experimental setup with Fig. 3, all are rendered with 750spp at " + }, + { + "bbox": [ + 48, + 646, + 562, + 687 + ], + "type": "inline_equation", + "content": "1280 \\times 720" + }, + { + "bbox": [ + 48, + 646, + 562, + 687 + ], + "type": "text", + "content": ". We use the online training setup for all the guiding methods, i.e., all the samples are included in the final rendering. Our method exhibits better performance than other guiding methods in most scenes by only learning the incident radiance term while further reducing the error by incorporating the BSDF term (i.e., product sampling). More results on other test scenes, additional error metrics and false-color visualizations are provided in our supplementary interactive viewer." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "spans": [ + { + "bbox": [ + 50, + 55, + 56, + 62 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "spans": [ + { + "bbox": [ + 69, + 54, + 203, + 64 + ], + "type": "text", + "content": "Honghao Dong, Guoping Wang, and Sheng Li" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "spans": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 75, + 160, + 152 + ], + "blocks": [ + { + "bbox": [ + 53, + 75, + 160, + 152 + ], + "lines": [ + { + "bbox": [ + 53, + 75, + 160, + 152 + ], + "spans": [ + { + "bbox": [ + 53, + 75, + 160, + 152 + ], + "type": "image", + "image_path": "181fb606997372bc36fe02a725b3aeead1f4cfe80019ae1ba38494763dbe1a09.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 94, + 154, + 135, + 161 + ], + "lines": [ + { + "bbox": [ + 94, + 154, + 135, + 161 + ], + "spans": [ + { + "bbox": [ + 94, + 154, + 135, + 161 + ], + "type": "text", + "content": "VEACH DOOR" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 161, + 75, + 257, + 152 + ], + "blocks": [ + { + "bbox": [ + 161, + 75, + 257, + 152 + ], + "lines": [ + { + "bbox": [ + 161, + 75, + 257, + 152 + ], + "spans": [ + { + "bbox": [ + 161, + 75, + 257, + 152 + ], + "type": "image", + "image_path": "e6fd5634de776096702072077954764b1f851b04c67b47deb142edfa71811598.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 154, + 231, + 161 + ], + "lines": [ + { + "bbox": [ + 187, + 154, + 231, + 161 + ], + "spans": [ + { + "bbox": [ + 187, + 154, + 231, + 161 + ], + "type": "text", + "content": "LIVING ROOM" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 258, + 75, + 354, + 152 + ], + "blocks": [ + { + "bbox": [ + 258, + 75, + 354, + 152 + ], + "lines": [ + { + "bbox": [ + 258, + 75, + 354, + 152 + ], + "spans": [ + { + "bbox": [ + 258, + 75, + 354, + 152 + ], + "type": "image", + "image_path": "0dab85787ebf57ece3d409ae13042efbe88173d0445016fac2fde3b91dfca16d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 355, + 75, + 449, + 152 + ], + "blocks": [ + { + "bbox": [ + 355, + 75, + 449, + 152 + ], + "lines": [ + { + "bbox": [ + 355, + 75, + 449, + 152 + ], + "spans": [ + { + "bbox": [ + 355, + 75, + 449, + 152 + ], + "type": "image", + "image_path": "9eca55ab6a658160878a3506294517e5cce89b945cfdf85fec532d3cee6b242a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 450, + 76, + 547, + 152 + ], + "blocks": [ + { + "bbox": [ + 450, + 76, + 547, + 152 + ], + "lines": [ + { + "bbox": [ + 450, + 76, + 547, + 152 + ], + "spans": [ + { + "bbox": [ + 450, + 76, + 547, + 152 + ], + "type": "image", + "image_path": "9deff805ba49e53de948b70b54dc5f1ad8b4f9c77c5599195f5046879dc11f7a.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 54, + 162, + 161, + 239 + ], + "blocks": [ + { + "bbox": [ + 54, + 162, + 161, + 239 + ], + "lines": [ + { + "bbox": [ + 54, + 162, + 161, + 239 + ], + "spans": [ + { + "bbox": [ + 54, + 162, + 161, + 239 + ], + "type": "image", + "image_path": "08c2c9d51bb87f869a84fb7ff37be2f1cb560bc73017e4d1f4e3a4637eecad26.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 95, + 239, + 132, + 247 + ], + "lines": [ + { + "bbox": [ + 95, + 239, + 132, + 247 + ], + "spans": [ + { + "bbox": [ + 95, + 239, + 132, + 247 + ], + "type": "text", + "content": "VEACH EGG" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 162, + 162, + 257, + 239 + ], + "blocks": [ + { + "bbox": [ + 162, + 162, + 257, + 239 + ], + "lines": [ + { + "bbox": [ + 162, + 162, + 257, + 239 + ], + "spans": [ + { + "bbox": [ + 162, + 162, + 257, + 239 + ], + "type": "image", + "image_path": "14fec7d9f02364f9e1b3cc469dbbe763bb1b753ae5ada4e828afb851b65ca47c.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 187, + 239, + 235, + 247 + ], + "lines": [ + { + "bbox": [ + 187, + 239, + 235, + 247 + ], + "spans": [ + { + "bbox": [ + 187, + 239, + 235, + 247 + ], + "type": "text", + "content": "SALLE DE BAIN" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 257, + 162, + 353, + 239 + ], + "blocks": [ + { + "bbox": [ + 288, + 154, + 325, + 161 + ], + "lines": [ + { + "bbox": [ + 288, + 154, + 325, + 161 + ], + "spans": [ + { + "bbox": [ + 288, + 154, + 325, + 161 + ], + "type": "text", + "content": "BATHROOM" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 257, + 162, + 353, + 239 + ], + "lines": [ + { + "bbox": [ + 257, + 162, + 353, + 239 + ], + "spans": [ + { + "bbox": [ + 257, + 162, + 353, + 239 + ], + "type": "image", + "image_path": "459f119a1e6f73caa5bd490548bb83413ea566cd62800bcd3a50b97f94ad6164.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 280, + 239, + 336, + 247 + ], + "lines": [ + { + "bbox": [ + 280, + 239, + 336, + 247 + ], + "spans": [ + { + "bbox": [ + 280, + 239, + 336, + 247 + ], + "type": "text", + "content": "BREAKFAST ROOM" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 354, + 162, + 449, + 239 + ], + "blocks": [ + { + "bbox": [ + 386, + 154, + 417, + 161 + ], + "lines": [ + { + "bbox": [ + 386, + 154, + 417, + 161 + ], + "spans": [ + { + "bbox": [ + 386, + 154, + 417, + 161 + ], + "type": "text", + "content": "BEDROOM" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 354, + 162, + 449, + 239 + ], + "lines": [ + { + "bbox": [ + 354, + 162, + 449, + 239 + ], + "spans": [ + { + "bbox": [ + 354, + 162, + 449, + 239 + ], + "type": "image", + "image_path": "374aa6caa361e40fa1e2fa0c59419a8d5806a2cf71fb18ce126bd34ae3352425.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 380, + 239, + 424, + 247 + ], + "lines": [ + { + "bbox": [ + 380, + 239, + 424, + 247 + ], + "spans": [ + { + "bbox": [ + 380, + 239, + 424, + 247 + ], + "type": "text", + "content": "WHITE ROOM" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 450, + 162, + 547, + 239 + ], + "blocks": [ + { + "bbox": [ + 482, + 154, + 517, + 161 + ], + "lines": [ + { + "bbox": [ + 482, + 154, + 517, + 161 + ], + "spans": [ + { + "bbox": [ + 482, + 154, + 517, + 161 + ], + "type": "text", + "content": "STAIRCASE" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 450, + 162, + 547, + 239 + ], + "lines": [ + { + "bbox": [ + 450, + 162, + 547, + 239 + ], + "spans": [ + { + "bbox": [ + 450, + 162, + 547, + 239 + ], + "type": "image", + "image_path": "ac207f2b0dc08e9ca00b1f7fd2da77c0830570cdc5793049f393be18285a629d.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 479, + 239, + 516, + 247 + ], + "lines": [ + { + "bbox": [ + 479, + 239, + 516, + 247 + ], + "spans": [ + { + "bbox": [ + 479, + 239, + 516, + 247 + ], + "type": "text", + "content": "PINK ROOM" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 140, + 251, + 478, + 264 + ], + "blocks": [ + { + "bbox": [ + 140, + 251, + 478, + 264 + ], + "lines": [ + { + "bbox": [ + 140, + 251, + 478, + 264 + ], + "spans": [ + { + "bbox": [ + 140, + 251, + 478, + 264 + ], + "type": "image", + "image_path": "34654e72da7e2e9ef43f854cd70959d1671f0b7ceb629f08d155c24032847c36.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 275, + 560, + 326 + ], + "lines": [ + { + "bbox": [ + 48, + 275, + 560, + 326 + ], + "spans": [ + { + "bbox": [ + 48, + 275, + 560, + 326 + ], + "type": "text", + "content": "Fig. 5. Convergence plots correspond to Fig. 3 and Fig. 4. Unidirectional path tracing with BSDF importance sampling (PT-BSDF), Practical Path Guiding [Muller 2019], Variance-aware Path Guiding [Rath et al. 2020] and our method with different target distributions (NPM-radiance and NPM-product). Our methods consistently outperform these classical guiding methods, and quickly become effective even with a few training samples and short training time (e.g., 30spp, amounting to about 3 seconds on GPU), indicating practicality for preview or even interactive rendering. We attribute this success to the compact implicit representation and better spatial resolution of our method. The image results and detailed statistics could be inspected in the supplemental materials." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 48, + 342, + 294, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 342, + 294, + 375 + ], + "spans": [ + { + "bbox": [ + 48, + 342, + 294, + 375 + ], + "type": "text", + "content": "directional distributions by adaptively merging and splitting the vMF components [Ruppert et al. 2020]. This, however, is non-trivial to apply to our NPM technique." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 48, + 376, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 376, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 48, + 376, + 295, + 486 + ], + "type": "text", + "content": "In future work, we will investigate more accurate approaches to implicitly encode parametric distributions while remaining efficient. Finding better basis functions or adaptively controlling the number of output components are two possible but challenging directions. Meanwhile, we would like to improve the efficiency of our method by using either novel architectural designs for neural networks, optimized implementation, or adapting previous extensions to path guiding algorithms. We believe these are important steps to make our method more practical for interactive or even real-time rendering pipelines, as well as other related applications that require" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 342, + 561, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 342, + 561, + 387 + ], + "spans": [ + { + "bbox": [ + 314, + 342, + 561, + 387 + ], + "type": "text", + "content": "fitting distributions with high-frequency spatial variations. In addition, applying our method to bidirectional path tracing [Popov et al. 2015], especially subspace probabilistic connections [Su et al. 2022], will also be an interesting future avenue." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 399, + 418, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 399, + 418, + 410 + ], + "spans": [ + { + "bbox": [ + 315, + 399, + 418, + 410 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 414, + 563, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 414, + 563, + 491 + ], + "spans": [ + { + "bbox": [ + 314, + 414, + 563, + 491 + ], + "type": "text", + "content": "This project was supported by the National Key R&D Program of China (No.2022YFB3303400) and NSFC of China (No. 62172013). We also thank the test scenes providers: Mareck (BATHROOM), Slyk-Drako (BEDROOM), Wig42 (BREAKFAST ROOM, LIVING ROOM, PINK ROOM, STAIRCASE), nacinus (SALLE DE BAIN), Jaakko Lehtinen (VEACH DOOR), Jay-Artist (WHITE ROOM), as well as the efforts for converting scene formats by Benedikt Bitterli [2016]." + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 50, + 502, + 164, + 574 + ], + "blocks": [ + { + "bbox": [ + 50, + 502, + 164, + 574 + ], + "lines": [ + { + "bbox": [ + 50, + 502, + 164, + 574 + ], + "spans": [ + { + "bbox": [ + 50, + 502, + 164, + 574 + ], + "type": "image", + "image_path": "a9e3e0bca254a0b92485bb2f58af12c621eb6de5a42b0e3fcc3070347e0f8bf4.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 86, + 576, + 133, + 583 + ], + "lines": [ + { + "bbox": [ + 86, + 576, + 133, + 583 + ], + "spans": [ + { + "bbox": [ + 86, + 576, + 133, + 583 + ], + "type": "text", + "content": "SALLE DE BAIN" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 165, + 510, + 206, + 542 + ], + "blocks": [ + { + "bbox": [ + 165, + 502, + 201, + 510 + ], + "lines": [ + { + "bbox": [ + 165, + 502, + 201, + 510 + ], + "spans": [ + { + "bbox": [ + 165, + 502, + 201, + 510 + ], + "type": "text", + "content": "Rath et al." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 165, + 510, + 206, + 542 + ], + "lines": [ + { + "bbox": [ + 165, + 510, + 206, + 542 + ], + "spans": [ + { + "bbox": [ + 165, + 510, + 206, + 542 + ], + "type": "image", + "image_path": "888d6d0406b223320ab0974c9477a53a96ee8d8238c7d8fd9369753542c3a598.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 165, + 543, + 206, + 574 + ], + "blocks": [ + { + "bbox": [ + 165, + 543, + 206, + 574 + ], + "lines": [ + { + "bbox": [ + 165, + 543, + 206, + 574 + ], + "spans": [ + { + "bbox": [ + 165, + 543, + 206, + 574 + ], + "type": "image", + "image_path": "7dae49ea469d37eb159d984887468aa069cec61a542793b609cae33e2b5f8746.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 576, + 197, + 583 + ], + "lines": [ + { + "bbox": [ + 173, + 576, + 197, + 583 + ], + "spans": [ + { + "bbox": [ + 173, + 576, + 197, + 583 + ], + "type": "text", + "content": "0.05407" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 208, + 510, + 249, + 542 + ], + "blocks": [ + { + "bbox": [ + 209, + 502, + 246, + 510 + ], + "lines": [ + { + "bbox": [ + 209, + 502, + 246, + 510 + ], + "spans": [ + { + "bbox": [ + 209, + 502, + 246, + 510 + ], + "type": "text", + "content": "NPM (rad.)" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 208, + 510, + 249, + 542 + ], + "lines": [ + { + "bbox": [ + 208, + 510, + 249, + 542 + ], + "spans": [ + { + "bbox": [ + 208, + 510, + 249, + 542 + ], + "type": "image", + "image_path": "721cfcdc5d3a0ac27c38adce4f26b3428fb63c7de4fc576c2cfea3db865be04d.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 208, + 543, + 249, + 574 + ], + "blocks": [ + { + "bbox": [ + 208, + 543, + 249, + 574 + ], + "lines": [ + { + "bbox": [ + 208, + 543, + 249, + 574 + ], + "spans": [ + { + "bbox": [ + 208, + 543, + 249, + 574 + ], + "type": "image", + "image_path": "e21985f77ddb52baeef3fabd66b4f32fc6341b6a7d5b8db27786557bde91e4dc.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 576, + 241, + 583 + ], + "lines": [ + { + "bbox": [ + 216, + 576, + 241, + 583 + ], + "spans": [ + { + "bbox": [ + 216, + 576, + 241, + 583 + ], + "type": "text", + "content": "0.04926" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 250, + 510, + 291, + 542 + ], + "blocks": [ + { + "bbox": [ + 255, + 502, + 287, + 510 + ], + "lines": [ + { + "bbox": [ + 255, + 502, + 287, + 510 + ], + "spans": [ + { + "bbox": [ + 255, + 502, + 287, + 510 + ], + "type": "text", + "content": "Reference" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 250, + 510, + 291, + 542 + ], + "lines": [ + { + "bbox": [ + 250, + 510, + 291, + 542 + ], + "spans": [ + { + "bbox": [ + 250, + 510, + 291, + 542 + ], + "type": "image", + "image_path": "5101dc19c079bc740e073f41696f91e4b45adb66d30b0904a6def2725004c356.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 250, + 543, + 291, + 574 + ], + "blocks": [ + { + "bbox": [ + 250, + 543, + 291, + 574 + ], + "lines": [ + { + "bbox": [ + 250, + 543, + 291, + 574 + ], + "spans": [ + { + "bbox": [ + 250, + 543, + 291, + 574 + ], + "type": "image", + "image_path": "909a175a2fcc3697baabec10f44413f5aa481dea166ea26fc95636d89070df0c.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 259, + 576, + 283, + 583 + ], + "lines": [ + { + "bbox": [ + 259, + 576, + 283, + 583 + ], + "spans": [ + { + "bbox": [ + 259, + 576, + 283, + 583 + ], + "type": "text", + "content": "relMSE" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 50, + 585, + 164, + 650 + ], + "blocks": [ + { + "bbox": [ + 50, + 585, + 164, + 650 + ], + "lines": [ + { + "bbox": [ + 50, + 585, + 164, + 650 + ], + "spans": [ + { + "bbox": [ + 50, + 585, + 164, + 650 + ], + "type": "image", + "image_path": "3f1102130cd97c4c939dd9fa09ed8f97c36f686554ea57ef2748057833451595.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 93, + 651, + 123, + 658 + ], + "lines": [ + { + "bbox": [ + 93, + 651, + 123, + 658 + ], + "spans": [ + { + "bbox": [ + 93, + 651, + 123, + 658 + ], + "type": "text", + "content": "BEDROOM" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 48, + 670, + 294, + 690 + ], + "lines": [ + { + "bbox": [ + 48, + 670, + 294, + 690 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 294, + 690 + ], + "type": "text", + "content": "Fig. 6. Equal-time comparisons (80s) on two test scenes between NPM(radiance) and Variance-aware Path Guiding [Rath et al. 2020]." + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 165, + 585, + 206, + 617 + ], + "blocks": [ + { + "bbox": [ + 165, + 585, + 206, + 617 + ], + "lines": [ + { + "bbox": [ + 165, + 585, + 206, + 617 + ], + "spans": [ + { + "bbox": [ + 165, + 585, + 206, + 617 + ], + "type": "image", + "image_path": "eb53553187bc0b187fd83239885ae0a52d57bdcadfd1a255b9daabdc747058c5.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 165, + 618, + 206, + 650 + ], + "blocks": [ + { + "bbox": [ + 165, + 618, + 206, + 650 + ], + "lines": [ + { + "bbox": [ + 165, + 618, + 206, + 650 + ], + "spans": [ + { + "bbox": [ + 165, + 618, + 206, + 650 + ], + "type": "image", + "image_path": "52f1ca1d2c54273a68d3507fd7757099974cc8b5df5f4e725bd2131d4425e717.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 173, + 651, + 197, + 658 + ], + "lines": [ + { + "bbox": [ + 173, + 651, + 197, + 658 + ], + "spans": [ + { + "bbox": [ + 173, + 651, + 197, + 658 + ], + "type": "text", + "content": "0.02176" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_caption" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 208, + 585, + 250, + 617 + ], + "blocks": [ + { + "bbox": [ + 208, + 585, + 250, + 617 + ], + "lines": [ + { + "bbox": [ + 208, + 585, + 250, + 617 + ], + "spans": [ + { + "bbox": [ + 208, + 585, + 250, + 617 + ], + "type": "image", + "image_path": "d38b0cf0ea9559e51984689d857719730f7ec7c4438a69b2938c81644d2b5f63.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + } + ], + "index": 48 + }, + { + "type": "image", + "bbox": [ + 208, + 618, + 250, + 650 + ], + "blocks": [ + { + "bbox": [ + 208, + 618, + 250, + 650 + ], + "lines": [ + { + "bbox": [ + 208, + 618, + 250, + 650 + ], + "spans": [ + { + "bbox": [ + 208, + 618, + 250, + 650 + ], + "type": "image", + "image_path": "f62aaec39bdfee8ed9c26f61ed59b7134f4b5214059c55a13757e21a964bad39.jpg" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 651, + 241, + 658 + ], + "lines": [ + { + "bbox": [ + 216, + 651, + 241, + 658 + ], + "spans": [ + { + "bbox": [ + 216, + 651, + 241, + 658 + ], + "type": "text", + "content": "0.01324" + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 49 + }, + { + "type": "image", + "bbox": [ + 250, + 585, + 292, + 617 + ], + "blocks": [ + { + "bbox": [ + 250, + 585, + 292, + 617 + ], + "lines": [ + { + "bbox": [ + 250, + 585, + 292, + 617 + ], + "spans": [ + { + "bbox": [ + 250, + 585, + 292, + 617 + ], + "type": "image", + "image_path": "b37e2d6558a7080380e4c1b42a42cc24b1dc2668cf0b1cc5ffb8a3a703e156f5.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + } + ], + "index": 51 + }, + { + "type": "image", + "bbox": [ + 251, + 618, + 291, + 649 + ], + "blocks": [ + { + "bbox": [ + 251, + 618, + 291, + 649 + ], + "lines": [ + { + "bbox": [ + 251, + 618, + 291, + 649 + ], + "spans": [ + { + "bbox": [ + 251, + 618, + 291, + 649 + ], + "type": "image", + "image_path": "261222e88bdfcbe95254dde56f97fce4ee0aab79da762475cb7d753ac46716c4.jpg" + } + ] + } + ], + "index": 52, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 259, + 651, + 283, + 658 + ], + "lines": [ + { + "bbox": [ + 259, + 651, + 283, + 658 + ], + "spans": [ + { + "bbox": [ + 259, + 651, + 283, + 658 + ], + "type": "text", + "content": "relMSE" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_caption" + } + ], + "index": 52 + }, + { + "type": "image", + "bbox": [ + 317, + 521, + 560, + 650 + ], + "blocks": [ + { + "bbox": [ + 317, + 521, + 560, + 650 + ], + "lines": [ + { + "bbox": [ + 317, + 521, + 560, + 650 + ], + "spans": [ + { + "bbox": [ + 317, + 521, + 560, + 650 + ], + "type": "image", + "image_path": "7c235d11fcbe63ea0cd6d21d4a1760468d38923fc545f357e03dd55a228e84b9.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 660, + 561, + 690 + ], + "lines": [ + { + "bbox": [ + 314, + 660, + 561, + 690 + ], + "spans": [ + { + "bbox": [ + 314, + 660, + 561, + 690 + ], + "type": "text", + "content": "Fig. 7. We train each guiding method with small training budgets (31 spp, 63 spp, 127 spp, respectively) and render the scene with 500 spp. Our method outperforms previous methods even with much fewer training samples." + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_caption" + } + ], + "index": 55 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 408, + 54, + 540, + 63 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 54, + 540, + 63 + ], + "spans": [ + { + "bbox": [ + 408, + 54, + 540, + 63 + ], + "type": "text", + "content": "Neural Parametric Mixtures for Path Guiding" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 556, + 55, + 560, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 556, + 55, + 560, + 62 + ], + "spans": [ + { + "bbox": [ + 556, + 55, + 560, + 62 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "spans": [ + { + "bbox": [ + 411, + 708, + 560, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 57 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 77, + 121, + 176 + ], + "blocks": [ + { + "bbox": [ + 50, + 77, + 121, + 176 + ], + "lines": [ + { + "bbox": [ + 50, + 77, + 121, + 176 + ], + "spans": [ + { + "bbox": [ + 50, + 77, + 121, + 176 + ], + "type": "image", + "image_path": "c72949cfaa907720f948811ab1f1c468fcd58c869731f256eaa07f814824b055.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 48, + 187, + 295, + 238 + ], + "lines": [ + { + "bbox": [ + 48, + 187, + 295, + 238 + ], + "spans": [ + { + "bbox": [ + 48, + 187, + 295, + 238 + ], + "type": "text", + "content": "Fig. 8. Equal-time comparison (50s) of different input encoding. We report the sample count and error (relMSE) of each method. The dashed line in the plot marks the end of the training phase. The multi-resolution spatial embedding outperforms other methods while remaining training-efficient. Yellow plot refers to path tracing with BSDF importance sampling." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 121, + 77, + 192, + 176 + ], + "blocks": [ + { + "bbox": [ + 121, + 77, + 192, + 176 + ], + "lines": [ + { + "bbox": [ + 121, + 77, + 192, + 176 + ], + "spans": [ + { + "bbox": [ + 121, + 77, + 192, + 176 + ], + "type": "image", + "image_path": "877c0d7fc45351dbafd802d190c15c900b000d30915cae9165790ac96d5238e7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 193, + 76, + 296, + 176 + ], + "blocks": [ + { + "bbox": [ + 193, + 76, + 296, + 176 + ], + "lines": [ + { + "bbox": [ + 193, + 76, + 296, + 176 + ], + "spans": [ + { + "bbox": [ + 193, + 76, + 296, + 176 + ], + "type": "image", + "image_path": "f4055e7418c0dce471dcaa70b30e4d70e2af9a7c639208ff5065e16660bec2b8.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 257, + 109, + 266 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 257, + 109, + 266 + ], + "spans": [ + { + "bbox": [ + 50, + 257, + 109, + 266 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 269, + 294, + 693 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 50, + 269, + 294, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 269, + 294, + 302 + ], + "spans": [ + { + "bbox": [ + 50, + 269, + 294, + 302 + ], + "type": "text", + "content": "Benedikt Bitterli. 2016. Rendering resources. https://benedikt-bitterli.me/resources/. Norbert Bus and Tamy Boubekeur. 2017. Double Hierarchies for Directional Importance Sampling in Monte Carlo Rendering. Journal of Computer Graphics Techniques (JCGT) 6, 3 (28 August 2017), 25-37. http://jcgt.org/published/0006/03/02" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 302, + 294, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 302, + 294, + 326 + ], + "spans": [ + { + "bbox": [ + 50, + 302, + 294, + 326 + ], + "type": "text", + "content": "R. R. Currius, D. Dolonius, U. Assarsson, and E. Sintorn. 2020. Spherical Gaussian Light-field Textures for Fast Precomputed Global Illumination. Computer Graphics Forum 39, 2 (2020), 133-146." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 326, + 294, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 326, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 50, + 326, + 294, + 342 + ], + "type": "text", + "content": "Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. 2017. Density estimation using Real NVP. In International Conference on Learning Representations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 342, + 294, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 342, + 294, + 358 + ], + "spans": [ + { + "bbox": [ + 50, + 342, + 294, + 358 + ], + "type": "text", + "content": "Stavros Diolatzis, Julien Philip, and George Drettakis. 2022. Active Exploration for Neural Global Illumination of Variable Scenes. ACM Transactions on Graphics (2022)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 358, + 294, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 358, + 294, + 381 + ], + "spans": [ + { + "bbox": [ + 50, + 358, + 294, + 381 + ], + "type": "text", + "content": "Ana Dodik, Marios Papas, Cengiz Öztireli, and Thomas Müller. 2022. Path Guiding Using Spatio-Directional Mixture Models. In Computer Graphics Forum, Vol. 41. Wiley Online Library, 172-189." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 381, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 381, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 50, + 381, + 294, + 397 + ], + "type": "text", + "content": "Saeed Hadadan, Shuhong Chen, and Matthias Zwicker. 2021. Neural radiosity. ACM Transactions on Graphics (TOG) 40, 6 (2021), 1-11." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 397, + 294, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 397, + 294, + 422 + ], + "spans": [ + { + "bbox": [ + 50, + 397, + 294, + 422 + ], + "type": "text", + "content": "Sebastian Herholz, Oskar Elek, Jiří Vorba, Hendrik Lensch, and Jaroslav Krivánek. 2016. Product importance sampling for light transport path guiding. In Computer Graphics Forum, Vol. 35. Wiley Online Library, 67-77." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 422, + 294, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 422, + 294, + 445 + ], + "spans": [ + { + "bbox": [ + 50, + 422, + 294, + 445 + ], + "type": "text", + "content": "Yuchi Huo, Rui Wang, Ruzahng Zheng, Hualin Xu, Hujun Bao, and Sung-Eui Yoon. 2020. Adaptive incident radiance field sampling and reconstruction using deep reinforcement learning. ACM Transactions on Graphics (TOG) 39, 1 (2020), 1-17." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 445, + 294, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 445, + 294, + 461 + ], + "spans": [ + { + "bbox": [ + 50, + 445, + 294, + 461 + ], + "type": "text", + "content": "Wenzel Jakob. 2012. Numerically stable sampling of the von Mises-Fisher distribution on " + }, + { + "bbox": [ + 50, + 445, + 294, + 461 + ], + "type": "inline_equation", + "content": "S^{\\wedge}2" + }, + { + "bbox": [ + 50, + 445, + 294, + 461 + ], + "type": "text", + "content": " (and other tricks). Interactive Geometry Lab, ETH Zürich, Tech. Rep (2012), 6." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 50, + 461, + 294, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 461, + 294, + 477 + ], + "spans": [ + { + "bbox": [ + 50, + 461, + 294, + 477 + ], + "type": "text", + "content": "Henrik Wann Jensen. 1995. Importance driven path tracing using the photon map. In Eurographics Workshop on Rendering Techniques. Springer, 326-335." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 50, + 477, + 287, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 477, + 287, + 485 + ], + "spans": [ + { + "bbox": [ + 50, + 477, + 287, + 485 + ], + "type": "text", + "content": "James T. Kajiya. 1986. The Rendering Equation. SIGGRAPH Comput. Graph. (1986)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 50, + 485, + 294, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 485, + 294, + 501 + ], + "spans": [ + { + "bbox": [ + 50, + 485, + 294, + 501 + ], + "type": "text", + "content": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. *ICLR* (2015)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 50, + 501, + 294, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 501, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 50, + 501, + 294, + 525 + ], + "type": "text", + "content": "Eric P Lafortune and Yves D Willems. 1995. A 5D tree to reduce the variance of Monte Carlo ray tracing. In Eurographics Workshop on Rendering Techniques. Springer, 11-20." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 50, + 525, + 294, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 525, + 294, + 548 + ], + "spans": [ + { + "bbox": [ + 50, + 525, + 294, + 548 + ], + "type": "text", + "content": "Samuli Laine, Tero Karras, and Timo Aila. 2013. Megakernels considered harmful: Wavefront path tracing on GPUs. In Proceedings of the 5th High-Performance Graphics Conference, 137-143." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 50, + 548, + 294, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 548, + 294, + 572 + ], + "spans": [ + { + "bbox": [ + 50, + 548, + 294, + 572 + ], + "type": "text", + "content": "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. 2020. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. in ECCV." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 50, + 572, + 294, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 572, + 294, + 589 + ], + "spans": [ + { + "bbox": [ + 50, + 572, + 294, + 589 + ], + "type": "text", + "content": "Thomas Müller. 2019. \"Practical Path Guiding\" in Production. In ACM SIGGRAPH 2019 Courses (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 50, + 589, + 294, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 589, + 294, + 613 + ], + "spans": [ + { + "bbox": [ + 50, + 589, + 294, + 613 + ], + "type": "text", + "content": "Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. 2022. Instant Neural Graphics Primitives with a Multiresolution Hash Encoding. ACM Trans. Graph. 41, 4, Article 102 (July 2022), 15 pages." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 50, + 613, + 294, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 613, + 294, + 636 + ], + "spans": [ + { + "bbox": [ + 50, + 613, + 294, + 636 + ], + "type": "text", + "content": "Thomas Müller, Markus Gross, and Jan Novák. 2017. Practical path guiding for efficient light-transport simulation. In Computer Graphics Forum, Vol. 36. Wiley Online Library, 91-100." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 50, + 636, + 294, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 636, + 294, + 660 + ], + "spans": [ + { + "bbox": [ + 50, + 636, + 294, + 660 + ], + "type": "text", + "content": "Thomas Müller, Brian McWilliams, Fabrice Rousselle, Markus Gross, and Jan Novák. 2019. Neural importance sampling. ACM Transactions on Graphics (TOG) 38, 5 (2019), 1-19." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 50, + 660, + 294, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 660, + 294, + 676 + ], + "spans": [ + { + "bbox": [ + 50, + 660, + 294, + 676 + ], + "type": "text", + "content": "Thomas Müller, Fabrice Rousselle, Alexander Keller, and Jan Novák. 2020. Neural control variates. ACM Transactions on Graphics (TOG) 39, 6 (2020), 1-19." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 50, + 676, + 294, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 676, + 294, + 693 + ], + "spans": [ + { + "bbox": [ + 50, + 676, + 294, + 693 + ], + "type": "text", + "content": "Thomas Müller, Fabrice Rousselle, Jan Novák, and Alexander Keller. 2021. Real-Time Neural Radiance Caching for Path Tracing. ACM Trans. Graph. 40, 4, Article 36 (jul" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 315, + 82, + 561, + 369 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 326, + 82, + 371, + 90 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 82, + 371, + 90 + ], + "spans": [ + { + "bbox": [ + 326, + 82, + 371, + 90 + ], + "type": "text", + "content": "2021), 16 pages." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 315, + 90, + 561, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 90, + 561, + 114 + ], + "spans": [ + { + "bbox": [ + 315, + 90, + 561, + 114 + ], + "type": "text", + "content": "Jacob Munkberg, Jon Hasselgren, Tianchang Shen, Jun Gao, Wenzheng Chen, Alex Evans, Thomas Mueller, and Sanja Fidler. 2022. Extracting Triangular 3D Models, Materials, and Lighting From Images. CVPR (2022)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 315, + 114, + 541, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 114, + 541, + 121 + ], + "spans": [ + { + "bbox": [ + 315, + 114, + 541, + 121 + ], + "type": "text", + "content": "Thomas Muller. 2021. tiny-cuda-nn. https://github.com/NVlabs/tiny-cuda-nn" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 315, + 121, + 561, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 121, + 561, + 153 + ], + "spans": [ + { + "bbox": [ + 315, + 121, + 561, + 153 + ], + "type": "text", + "content": "Steven G Parker, James Bigler, Andreas Dietrich, Heiko Friedrich, Jared Hoberock, David Luebke, David McAllister, Morgan McGuire, Keith Morley, Austin Robison, et al. 2010. Optix: a general purpose ray tracing engine. ACM Transactions on Graphics (TOG) 29, 4 (2010), 1-13." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 153, + 561, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 153, + 561, + 170 + ], + "spans": [ + { + "bbox": [ + 315, + 153, + 561, + 170 + ], + "type": "text", + "content": "S. Popov, R. Ramamoorthi, F. Durand, and G. Drettakis. 2015. Probabilistic Connections for Bidirectional Path Tracing. Computer Graphics Forum 34, 4 (07 2015), 75-86." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 315, + 170, + 561, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 170, + 561, + 193 + ], + "spans": [ + { + "bbox": [ + 315, + 170, + 561, + 193 + ], + "type": "text", + "content": "Alexander Rath, Pascal Grittmann, Sebastian Herholz, Petr Vévoda, Philipp Slusallek, and Jaroslav Křivánek. 2020. Variance-aware path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 151-1." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 193, + 561, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 193, + 561, + 217 + ], + "spans": [ + { + "bbox": [ + 315, + 193, + 561, + 217 + ], + "type": "text", + "content": "Lukas Ruppert, Sebastian Herholz, and Hendrik PA Lensch. 2020. Robust fitting of parallax-aware mixtures for path guiding. ACM Transactions on Graphics (TOG) 39, 4 (2020), 147-1." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 217, + 561, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 217, + 561, + 241 + ], + "spans": [ + { + "bbox": [ + 315, + 217, + 561, + 241 + ], + "type": "text", + "content": "Fujia Su, Sheng Li, and Guoping Wang. 2022. SPCBPT: Subspace-Based Probabilistic Connections for Bidirectional Path Tracing. ACM Trans. Graph. 41, 4, Article 77 (jul 2022), 14 pages. https://doi.org/10.1145/3528223.3530183" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 241, + 561, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 241, + 561, + 265 + ], + "spans": [ + { + "bbox": [ + 315, + 241, + 561, + 265 + ], + "type": "text", + "content": "Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T. Barron, and Pratul P. Srinivasan. 2022. Ref-NeRF: Structured View-Dependent Appearance for Neural Radiance Fields. CVPR (2022)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 315, + 265, + 561, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 265, + 561, + 297 + ], + "spans": [ + { + "bbox": [ + 315, + 265, + 561, + 297 + ], + "type": "text", + "content": "Jiri Vorba, Johannes Hanika, Sebastian Herholz, Thomas Müller, Jaroslav Krivánek, and Alexander Keller. 2019. Path Guiding in Production. In ACM SIGGRAPH 2019 Courses (Los Angeles, California) (SIGGRAPH '19). ACM, New York, NY, USA, Article 18, 77 pages." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 315, + 297, + 561, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 297, + 561, + 320 + ], + "spans": [ + { + "bbox": [ + 315, + 297, + 561, + 320 + ], + "type": "text", + "content": "Jiri Vorba, Ondrej Karlik, Martin Sik, Tobias Ritschel, and Jaroslav Krivanek. 2014. On-line learning of parametric mixture models for light transport simulation. ACM Transactions on Graphics (TOG) 33, 4 (2014), 1-11." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 315, + 320, + 561, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 320, + 561, + 337 + ], + "spans": [ + { + "bbox": [ + 315, + 320, + 561, + 337 + ], + "type": "text", + "content": "Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. 2021. PlenOctrees for Real-time Rendering of Neural Radiance Fields. In ICCV." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 315, + 337, + 561, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 337, + 561, + 369 + ], + "spans": [ + { + "bbox": [ + 315, + 337, + 561, + 369 + ], + "type": "text", + "content": "Shilin Zhu, Zexiang Xu, Tiancheng Sun, Alexandr Kuznetsov, Mark Meyer, Henrik Wann Jensen, Hao Su, and Ravi Ramamoorthi. 2021. Hierarchical neural reconstruction for path guiding using hybrid path and photon samples. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-16." + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 55, + 58, + 62 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 55, + 58, + 62 + ], + "spans": [ + { + "bbox": [ + 51, + 55, + 58, + 62 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 54, + 206, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 54, + 206, + 64 + ], + "spans": [ + { + "bbox": [ + 61, + 54, + 206, + 64 + ], + "type": "text", + "content": "- Honghao Dong, Guoping Wang, and Sheng Li" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "spans": [ + { + "bbox": [ + 50, + 708, + 198, + 717 + ], + "type": "text", + "content": "Vol. 1, No. 1, Article. Publication date: April 2025." + } + ] + } + ], + "index": 43 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_content_list.json b/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..93207cbdbe44f760830e76b5b6c8d3df003e7074 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_content_list.json @@ -0,0 +1,3011 @@ +[ + { + "type": "image", + "img_path": "images/01334081f210a9c5d39bed84a79543b78c394d85d18689b9e3b6b4e720660630.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 223, + 87, + 313, + 157 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "PolyGuard: A Multilingual Safety Moderation Tool for 17 Languages", + "text_level": 1, + "bbox": [ + 354, + 99, + 727, + 143 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Priyanshu Kumar $^{\\text{♥1}}$ Devansh Jain $^{\\text{♥1}}$ Akhila Yerukola $^{\\text{♥}}$", + "bbox": [ + 274, + 179, + 720, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Liwei Jiang\\* Himanshu Beniwal△ $\\diamond$ Thomas Hartvigsen Maarten Sap", + "bbox": [ + 197, + 215, + 799, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Carnegie Mellon University $\\spadesuit$ University of Washington $\\triangle$ IIT Gandhinagar $\\diamond$ University of Virginia $\\clubsuit$ Allen Institute for AI", + "bbox": [ + 205, + 244, + 792, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 304, + 540, + 320 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Truly multilingual safety moderation efforts for Large Language Models (LLMs) have been hindered by a narrow focus on a small set of languages (e.g., English, Chinese) as well as a limited scope of safety definition, resulting in significant gaps in moderation capabilities. To bridge these gaps, we release POLYGUARD, a new state-of-the-art multilingual safety model for safeguarding LLM generations, and the corresponding training and evaluation datasets. POLYGUARD is trained on POLYGUARDMIX, the largest multilingual safety training corpus to date containing 1.91M samples across 17 languages (e.g., Chinese, Czech, English, Hindi). We also introduce POLYGUARDPROMPTS, a high quality multilingual benchmark with 29K samples for the evaluation of safety guardrails. Created by combining naturally occurring multilingual human-LLM interactions and human-verified machine translations of an English-only safety dataset (WildGuardMix; Han et al., 2024), our datasets contain prompt-output pairs with labels of prompt harmfulness, response harmfulness, and response refusal. Through extensive evaluations across multiple safety and toxicity benchmarks, we demonstrate that POLYGUARD outperforms existing state-of-the-art open-weight and commercial safety classifiers by $5.5\\%$ . Our contributions advance efforts toward safer multilingual LLMs for all global users.", + "bbox": [ + 228, + 335, + 769, + 601 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/07b1c8fbc3923a4e6aed477b493bd29125cec7df8131a0b52664d39ca90e3329.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 380, + 609, + 406, + 623 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "PolyGuard Collection", + "bbox": [ + 436, + 609, + 596, + 625 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/14d692a5dd9fb9f6ca098b1d92717b3f4009134c5c31cf1e964e4a115286a3f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 380, + 625, + 405, + 640 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "kpriyanshu256/polyguard", + "bbox": [ + 418, + 625, + 614, + 641 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 664, + 318, + 680 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in large language models (LLMs), especially their multilingual capabilities, have led to their deployment to a diverse global user base that spans multiple languages. Despite this global reach, safety research has focused primarily on the English language (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024), exposing global users to potential safety risks such as harmful content and privacy violations. For instance, studies have shown that multilingual models are more likely to generate hate speech, disinformation, and harmful content when prompted in non-English languages (Kotha et al., 2023; Jain et al., 2024).", + "bbox": [ + 169, + 696, + 826, + 796 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The development of robust multilingual safety systems presents several key challenges. First, building multilingual systems is inherently difficult due to challenges such as the lack of comprehensive datasets, the \"curse of multilinguality\" (Aharoni et al., 2019; Conneau et al., 2020; Gurgurov et al., 2024), and the inherent biases embedded in training corpora (Xu et al., 2024). Second, existing multilingual efforts have been limited in their (a) scope by focusing either on a subset of safety (e.g., PerspectiveAPI covering only toxicity, ignoring other unsafe content) and/or on a narrow set of language coverage (e.g., Llama-Guard-1", + "bbox": [ + 169, + 800, + 826, + 901 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1Equal contributors, correspondence at msap2@cs.cmu.edu.", + "bbox": [ + 189, + 909, + 573, + 924 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.04377v2 [cs.CL] 7 Aug 2025", + "bbox": [ + 22, + 281, + 60, + 715 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "only covering English safety, ignoring toxicity and DuoGuard being evaluated on 4 very high resource languages only; Inan et al., 2023; Jain et al., 2024; Deng et al., 2025), or (b) performance (e.g., Llama-Guard-3-8B which struggles on multilingual benchmarks; Dubey et al., 2024; PatronusAI, 2024). Finally, most existing safety frameworks address only the single task of classifying safety and often rely on simplistic binary settings (safe/unsafe), which fail to capture the complex spectrum of harmful content that can manifest differently across cultural and linguistic contexts (Sap et al., 2020; Zhou et al., 2023).", + "bbox": [ + 169, + 103, + 826, + 202 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these gaps, we release POLYGUARD (PG), a new state-of-the-art fine-tuned language model for multi-task safety detection and moderation. As Figure 1 highlights, PG can classify a multilingual input of a user prompt and an LLM response on five dimensions.", + "bbox": [ + 169, + 207, + 826, + 252 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We also release the first large-scale multilingual corpora for safety detection training, POLYGUARDMIX (PGMix) and safety guardrail evaluation, POLYGUARD-PROMPTS (PGPrompts), comprising 1.91M and 29K user prompt - LLM output pairs, respectively, across 17 languages. Our datasets contain binary and categorical labels for prompt harmfulness and response harmfulness, and response refusal (if the LLM response complies with the user request). We use a systematic labeling process that leverages a panel of English safety classifiers and LLM-as-a-judge (proprietary and open-weight LLM) to obtain these labels.", + "bbox": [ + 169, + 257, + 486, + 467 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We create our PGMix dataset by combining both: (a) naturally occurring multilingual human-LLM interactions from In-The-Wild (ITW) datasets, and (b) machine translations of WildGuardMix (Han et al., 2024), to ensure data diversity which is crucial for improved model performance (Davani et al., 2024). We utilize multiple LLMs to ensure high-quality translations of WildGuardMix, verified by a high average translation score of 81.15 as rated by human annotators.", + "bbox": [ + 169, + 472, + 486, + 626 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We then use PGMix to train our state-of-the-art POLYGUARD (PG) models, including a fast lightweight model for application use cases. Our empirical results show that PG", + "bbox": [ + 169, + 631, + 486, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "outperforms existing open-source and proprietary safety detectors on English-only as well as multilingual safety and toxicity benchmarks. Furthermore, we find that the incorporation of ITW samples in the training datasets makes PG models more robust to various data distributions, including code-switched and translated data.", + "bbox": [ + 169, + 686, + 823, + 744 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overall, our datasets and models2 serve as a starting point for building powerful and robust multilingual safety detectors and advance efforts towards multilingual safe AI systems.", + "bbox": [ + 169, + 751, + 823, + 781 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Dataset", + "text_level": 1, + "bbox": [ + 171, + 804, + 274, + 819 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the critical need for multilingual safety detection, we introduce POLYGUARDMIX (PGMix) and POLYGUARDPROMPTS (PGPrompts), multilingual datasets specifically designed to train and evaluate robust safety classifiers. PGMix comprises 1.91M human-LLM interactions, including 1.47M machine-translated samples from WildGuardMix and 0.43M", + "bbox": [ + 169, + 837, + 826, + 895 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2Model, code, and data are available under the ODC-BY license.", + "bbox": [ + 189, + 907, + 609, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "naturally-occurring samples from In-The-Wild datasets, whereas PGPrompts comprises 29K translated samples.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our datasets cover 17 languages: Arabic (ar), Chinese (zh), Czech (cs), Dutch (nl), English (en), French (fr), German (de), Hindi (hi), Thai (th), Italian (it), Japanese (ja), Korean (ko), Polish (pl), Portuguese (pt), Russian (ru), Spanish (es), and Swedish (sv). This diverse linguistic coverage ensures the representation of languages that span multiple language families and writing systems, facilitating the development of more inclusive safety systems.", + "bbox": [ + 169, + 138, + 826, + 210 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Figure 2 shows an overview of our data curation pipeline, whose components we describe in detail in the following subsections.", + "bbox": [ + 169, + 215, + 823, + 244 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Data Sources", + "text_level": 1, + "bbox": [ + 171, + 262, + 310, + 276 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Both PGMix and PGPrompts are constructed from the train and test samples of WildGuardMix (Han et al., 2024), a dataset of synthetic and natural single-turn human-LLM interactions with fine-grained annotations, respectively. In addition, PGMix also contains samples from In-TheWild datasets: LMSys-Chat1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024). We posit that the combination of natural and synthetic sam", + "bbox": [ + 169, + 287, + 395, + 498 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/64f1eb9148050067cf1c5ad96d1ffffa8146bc4b4e995ba80a783bd2ca1b0b99.jpg", + "image_caption": [ + "Figure 2: Data curation process for PGMix (safety detection training) and PGPrompts (safety guardrail evaluation). Takeaway: PGMix combines machine-translated and naturally occurring data to improve data diversity and, consequently, model performance." + ], + "image_footnote": [], + "bbox": [ + 418, + 294, + 807, + 407 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "plies improves the diversity of data and consequently improves model performance (Davani et al., 2024).", + "bbox": [ + 169, + 498, + 823, + 526 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Machine Translation Pipeline", + "text_level": 1, + "bbox": [ + 171, + 544, + 433, + 559 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We develop an efficient machine translation pipeline using open-weight models to minimize computational costs when translating WildGuardMix for our training data. We employ two state-of-the-art translation models: TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022). For optimal performance, we utilize TowerInstruct-7B-v0.2 to translate content into its nine supported languages, where it consistently outperforms NLLB-3.3B. We then leverage NLLB-3.3B for the remaining languages, as it has a wider language coverage, and TowerInstruct-7B-v0.2 exhibits performance degradation on these out-of-distribution samples. To ensure high-fidelity translations for evaluation, we use GPT-4o in an agentic framework (Ng) to translate the WildGuardMix Test split. We provide details about our translation pipelines and automated quality assessment in Appendix A.", + "bbox": [ + 169, + 570, + 826, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 Safety Annotation", + "text_level": 1, + "bbox": [ + 171, + 728, + 349, + 742 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We leverage a panel of English safety classifiers and LLM-as-judges to annotate safety violation categories automatically. We follow Llama-Guard-3-8B (Dubey et al., 2024) and define our safety violation taxonomy according to the MLCommons Safety Taxonomy4. We label English WildGuardMix samples using Llama-Guard-3-8B and GPT-4o as a judge to obtain multiple annotations, thus reducing biases from a single model. Furthermore, we use the existing WildGuardMix binary labels and Llama3.1-405B-Instruct (Dubey et al., 2024) as a judge to resolve conflicts and obtain the final annotations5. Finally, since PGMix and PGPrompts contain translations of WildGuardMix, we propagate safety labels from the", + "bbox": [ + 169, + 753, + 826, + 871 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "$^{3}$ WildChat-1M is available for modifications under the ODC-BY license.", + "bbox": [ + 189, + 881, + 658, + 895 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "4https://mlcommons.org/2024/04/mlc-aisafety-v0-5-poc/", + "bbox": [ + 192, + 896, + 586, + 909 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "5We use the same prompt as Llama-Guard-3-8B for all LLM-as-judges.", + "bbox": [ + 192, + 909, + 645, + 924 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/da2dedc8d353ba756792c421a28eb3d58a44bc6e99849e62e8c2dfddfcef3244.jpg", + "image_caption": [ + "Figure 3: Safety category distribution for user prompts and model responses for WildGuard-Mix train samples. The model name (GPT-4o and Llama-Guard-3-8B) represents the LLM used as a judge to automatically annotate the safety category. These annotations are then ensembled together, using Llama3.1-405B-Instruct to break ties (Combined). Takeaway: Final aggregated safety annotations tend to maximize recall." + ], + "image_footnote": [], + "bbox": [ + 174, + 103, + 480, + 313 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f9f0c1c9bd1f15fc16731e2aa7d66b6c071d5ab63757f5412e1f1c2f8fd487e7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 103, + 816, + 313 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "annotated English samples to other languages. ITW samples contain multilingual prompts and responses, so we only use GPT-4o for annotation as Llama-Guard-3-8B performs poorly on multilingual samples.", + "bbox": [ + 169, + 426, + 823, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 3 illustrates the distribution of safety categories across both user prompt harmfulness and model response harmfulness, comparing annotations from Llama-Guard-3-8B, GPT-4o, and our final consolidated labels. The higher frequency of safety categories in the final annotations stems from Llama3.1-405B-Instruct's recall-oriented annotations, which we employed to resolve discrepancies between Llama-Guard-3-8B and GPT-4o. Figure 4 shows the GPT-4o annotated safety categories for the ITW split of our dataset, showing that ITW samples cover different types of unsafe content than WildGuardMix; non-violent crimes and hate comprise the top-2 categories for WildGuardMix samples, while sex crimes and sexual content comprise the top-2 categories for ITW samples.", + "bbox": [ + 169, + 474, + 826, + 603 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.4 Human Validation", + "text_level": 1, + "bbox": [ + 171, + 622, + 352, + 636 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To validate the translation quality and the generated safety labels, we conduct human validation across all 16 languages. Due to budget constraints, we randomly sample 50 data points per language, ensuring a balanced distribution across PGMix (train) and PGPrompts (test), harmful and harmless labels, as well as user prompts and model responses. We recruit workers from Prolific, filtering them based on their proficiency in each language. Each data point is evaluated by three annotators.", + "bbox": [ + 169, + 648, + 823, + 736 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For each data point, we ask the annotators to assess the following.", + "bbox": [ + 171, + 742, + 650, + 758 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Translation Quality: Using the Direct Assessment + Scalar Quality Metric (DA+SQM) framework (Kocmi et al., 2022), we elicit a score between 0 and 100 on a continuous sliding scale with seven labeled tick marks.", + "2. Safety Label for the Source Sentence: Annotators assign a label of either 'harmful' or 'safe' for the source sentence in English.", + "3. Safety Label for the Translated Sentence: Annotators assign a 'harmful' or 'safe' label for the corresponding translation." + ], + "bbox": [ + 207, + 770, + 826, + 886 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 3 + }, + { + "type": "footer", + "text": "$^{6}$ https://www.prolific.com", + "bbox": [ + 191, + 909, + 380, + 922 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Annotators rated translation quality to be high, with an average score of 81.15 across all 16 languages. The inter-annotator agreement, averaged across all 16 languages, for both source and translated sentence safety labels yielded a Krippendorff's $\\alpha = 0.46$ . Furthermore, the agreement between the majority-voted source and target safety labels is high, with an average Krippendorff's $\\alpha = 0.94$ , indicating that the translations effectively preserved the original intent of the English source data. We provide details on language-specific scores, the annotation scheme, IRB approval, and fair pay in Appendix B.", + "bbox": [ + 169, + 103, + 573, + 270 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/86d7b5bd1f6dc6ae24ed987b871645b581f681659f8d1f3bb052cbaa5de2d04c.jpg", + "image_caption": [ + "Figure 4: Safety category distributions for PGMix ITW samples." + ], + "image_footnote": [], + "bbox": [ + 591, + 75, + 815, + 237 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 POLYGUARD: A 17-Language Safety Moderation Tool", + "text_level": 1, + "bbox": [ + 169, + 290, + 683, + 309 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To build POLYGUARD, we fine-tune Qwen2.5-7B-Instruct (Yang et al., 2024a) and Ministral-8B-Instruct-2410, both of which have been shown to have state-of-the-art performance in multilingual knowledge and commonsense, code, and math settings (Qwen; Mistral). We refer to these models as PG Qwen2.5 and PG Ministral In addition, we also fine-tune Qwen2.5-0.5B-Instruct to build PG Smol.", + "bbox": [ + 169, + 321, + 826, + 391 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The models are fine-tuned on PGMix using Low-Rank Adapters (Hu et al., 2022). We follow Han et al. (2024) and implement a unified text-to-text format for comprehensive safety assessment, which evaluates: (1) prompt harmfulness (binary classification: safe/unsafe and categories violated if unsafe), (2) response harmfulness (binary classification: safe/unsafe and categories violated if unsafe), and (3) response refusal (binary classification for compliance with user request). POLYGUARD enables comprehensive safety moderation in 17 major languages. We provide detailed training specifications in Appendix C.", + "bbox": [ + 169, + 397, + 826, + 498 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Results & Research Questions", + "text_level": 1, + "bbox": [ + 169, + 515, + 475, + 532 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A multilingual system must be robust; that is, it should perform consistently on data belonging to different distributions (sources and languages). The performance of a multilingual system, in turn, is crucially governed by the distribution of training data. Hence, we study the performance of POLYGUARD on POLYGUARDPROMPTS and multiple out-of-distribution evaluation benchmarks, and the influence of ITW samples and low-quality translations on model performance. We perform one run per evaluation due to computational constraints.", + "bbox": [ + 169, + 546, + 826, + 632 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines: We compare POLYGUARD with popular open-source safety detection models of similar size (Yang et al., 2024b), namely Llama-Guard-2 (Team, 2024), Llama-Guard-3-8B (Dubey et al., 2024), Aegis 1.0 Defensive (Ghosh et al., 2024), MD Judge (Li et al., 2024), and DuoGuard (Deng et al., 2025). We also benchmark proprietary models, namely Perspective API7, OpenAI Omni Moderation8, and Google Moderation9.", + "bbox": [ + 169, + 638, + 825, + 712 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 How do PG models perform on the in-distribution PGPrompts benchmark?", + "text_level": 1, + "bbox": [ + 169, + 727, + 779, + 744 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first evaluate PG and open-source baselines on POLYGUARDPROMPTs benchmark, comprising 29K samples, using the following metrics: (1) for binary tasks of prompt harmfulness, response harmfulness, and response refusal, we use F1 score for the positive label (unsafe for harmfulness and yes for response refusal), and (2) for the tasks of prompt violations and response violations, we compare the list of ground truth and predicted categories using Exact Match and Jaccard Similarity.", + "bbox": [ + 169, + 753, + 826, + 838 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "PG models based on Qwen2.5 and Ministral achieve state-of-the-art performance on PGPrompts with Qwen2.5 performing marginally better. PG Smol outperforms DuoGuard,", + "text_level": 1, + "bbox": [ + 169, + 844, + 826, + 875 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "7https://perspectiveapi.com/", + "bbox": [ + 189, + 881, + 401, + 895 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "8https://platform.openai.com/docs/models/omni-moderation-latest", + "bbox": [ + 192, + 896, + 658, + 909 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "9https://cloud.google.com/natural-language/docs/moderating-text", + "bbox": [ + 192, + 909, + 658, + 922 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/f2520c3d4db2dc1f554f4fef2eb312adfe3cdc4cd29305c0f48bc3f31bda42b9.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelHarmful Request F1 ScoreResponse Refusal F1 ScoreHarmful Response F1 ScorePrompt Safety ViolationsResponse Safety Violations
Exact MatchJaccardExact MatchJaccard
Aegis-Defensive66.45------
MD Judge43.54-49.12----
Llama Guard 260.87-63.62----
Llama Guard 367.98-65.7471.9874.5987.2488.37
DuoGuard62.59-37.99----
PG Qwen2.5 7B (Ours)87.1283.5974.0880.8785.4486.6788.79
PG Ministral (Ours)86.0284.4573.7579.9284.3086.8588.78
PG Smol (Ours)83.7681.3666.8277.0281.5184.0585.92
", + "bbox": [ + 176, + 99, + 823, + 260 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "its similar size counterpart (Table 1). Aegis Defensive supports only a single text as input and is hence evaluated for Harmful Request only. Since the remaining baselines do not explicitly support Harmful Response, we approximate the prediction by executing them on prompt + response. None of the baselines support the Response Refusal task. Out of all baselines, the safety category taxonomy is the same for Llama-Guard-3 and PG. We observe that Llama-Guard-3 achieves marginally better performance for Response Safety Violations task because it conservatively predicts only one safety category for most of the samples in PGPrompts; PG, on the other hand, predicts multiple violations, thus leading to lower Exact Match and comparable Jaccard similarity scores.", + "bbox": [ + 169, + 325, + 826, + 454 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 How does POLYGUARD fare against existing baselines on out-of-distribution multilingual benchmarks?", + "text_level": 1, + "bbox": [ + 169, + 469, + 784, + 500 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/c55222dbf21f2abff079f48af27aa803b24e7526403aa98dc764f985f5738d93.jpg", + "table_caption": [ + "Table 1: Evaluation of POLYGUARD models and baselines on POLYGUARDPROMPTS. Take-away: PG models outperform baselines on in-distribution data." + ], + "table_footnote": [], + "table_body": "
TypeModelRTP-LX En.RTP-LX Mul.Mod. En.Mod. Mul.XS En. (LG)XS Mul. (LG)XS En. (Aegis)XS Mul. (Aegis)MJ En. (LG)MJ Mul. (LG)MJ En. (Aegis)MJ Mul. (Aegis)Avg
Open -WeightAegis-Defensive84.2383.2171.1359.2266.5935.4769.4636.7590.9179.5290.6179.3770.54
MD Judge85.2838.6079.8661.4669.0017.2269.5617.7191.2138.4790.9137.9758.10
Llama Guard 239.4734.9975.8372.5553.7022.3250.5722.5677.5262.3876.8661.5654.19
Llama Guard 348.5144.8778.7373.9860.8425.7057.5026.9879.9278.1479.6777.5261.03
Duo Guard91.8350.4670.8549.4461.1626.0364.8327.3189.1841.8489.2641.4458.64
Closed -SourcePerspective API97.0981.9769.4064.1927.646.6433.926.8553.7945.3753.2344.7348.73
OpenAI Omni87.5274.1074.4368.0858.0222.4860.1123.5282.5966.9482.7366.9463.95
Google Mod.90.4483.2159.6453.8950.4441.8455.7144.7983.1480.8583.6681.0067.38
OursPG Qwen2.591.3483.2174.3969.5172.0735.3374.9337.1393.9386.4493.9786.3374.88
PG Ministrial87.2579.5874.9070.5171.3034.9374.0736.6895.7183.1195.3983.0273.87
PG Smol92.371.5669.363.0070.2833.2274.3835.1994.3973.5993.7273.3470.36
", + "bbox": [ + 176, + 517, + 823, + 674 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: F1 scores of safety detectors on Multilingual Guardrail Test Suite; metrics are in bold and underlined for the best second-best performing models respectively. Mod.=Moderation, XS=XSafety, MJ=MultiJail, En.=English, Mul.=Multilingual, LG=Llama Guard. Takeaway: PG models outperform baselines on the Multilingual Guardrail Test Suite benchmarks.", + "bbox": [ + 169, + 684, + 828, + 742 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Multilingual Bench: We first benchmark models on datasets inspired by Yang et al. (2024b). This comprises multilingual toxicity and safety datasets, namely RTP-LX (de Wynter et al., 2024), OpenAI Moderation (Markov et al., 2023), $^{10}$ XSafety (Wang et al., 2023), and MultiJail (Deng et al., 2024). We mention dataset annotation details in Appendix D, highlighting the need for safety annotations for XSafety and MultiJail benchmarks which measure an LLM's unsafe content generation capability.", + "bbox": [ + 169, + 765, + 826, + 853 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Patronus AI Bench: We also evaluate models using the recall score on the benchmarks reported by PatronusAI (2024), consisting of toxic/unsafe samples from English and multi-", + "bbox": [ + 169, + 854, + 828, + 886 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "10The OpenAI Moderation dataset comprises only English samples and is extended to a multilingual setting using Google Translate.", + "bbox": [ + 169, + 896, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 960 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "lingual toxicity and safety datasets. We perform our evaluations on all samples instead of a small subset. Appendix E contains details about the benchmark.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results show that our PG models outperform the baselines on most datasets, achieving higher scores for the unsafe class (Table 2). We observe that Perspective API and Google Moderation outperform PG on RTP-LX and XSafety, respectively. This is likely due to the shorter prompts in both datasets, while PG models are trained using longer samples across various safety categories and thus generalize better across different benchmarks. PG models also outperform existing detectors on safety datasets in the Patronus AI benchmark and also achieve the best average performance (Table 3).", + "bbox": [ + 169, + 138, + 826, + 238 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/65c3265b462c4ea8c93985a5838b37d9d9f2ecd49b1593fa2b9e5cf51fe3081e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeModeltoxic-text-enjigsawukr-toxicitythai-toxicity-tweettoxic-text-pttoxic-chatBeaver TailsSalad-DataAvg
Open-WeightAegis-Defensive80.3279.2762.8067.2986.54--91.6477.98
MD Judge68.4573.405.800.8056.8663.5481.4196.6855.87
Llama Guard 223.7320.676.324.8353.5123.1759.2016.1425.95
Llama Guard 340.0327.209.6011.5053.7827.3052.6829.4231.43
Duo Guard93.6593.180.729.2774.2254.1787.5470.7060.43
Closed-SourcePerspective API77.2086.20--93.0015.8923.001.8037.14
OpenAI Omni54.2086.8041.6034.0099.8046.3567.8045.8059.54
Google Mod.95.2098.0086.6041.8097.6069.2777.6027.2074.16
OursPG Qwen2.585.3283.4765.2446.4784.2697.6590.6597.0881.27
PG Ministrial82.6079.1155.5235.7680.5197.3990.5396.8877.29
PG Smol89.5785.7259.1637.2081.8496.1084.6096.4278.83
", + "bbox": [ + 173, + 252, + 823, + 452 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Are PG models robust?", + "text_level": 1, + "bbox": [ + 171, + 532, + 387, + 547 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We study the average performance of the PG models trained using 3 datasets: only translated data, only ITW data, and translated + ITW data. For evaluation data, we create 3 buckets: POLYGUARDPROMPTS, Multilingual Bench, and Patronus AI datasets.", + "bbox": [ + 169, + 560, + 826, + 603 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "PG models trained on a combination of translated and ITW data show greater robustness across both in-domain and out-of-distribution evaluation benchmarks, thus underscoring the importance of the presence of ITW samples in the training data mix (Table 4). Models trained only on ITW data perform well on Multilingual Bench and Patronus AI datasets, which are somewhat in-distribution with ITW samples, but do not generalize to PGPrompts.", + "bbox": [ + 169, + 609, + 828, + 681 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/038615ea481e4df5198d2af6b4e813cc8c85be3aa566f2c629cf2b62c4e725fa.jpg", + "table_caption": [ + "Table 3: Recall scores on unsafe samples from Patronus' benchmarking; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. Takeaway: PG models outperform baselines on Patronus AI's benchmarks." + ], + "table_footnote": [], + "table_body": "
POLYGUARDTraining DataPGPromptsMultilingual BenchPatronus AI
Qwen2.5Translated84.9574.5679.79
ITW64.6974.6382.26
Translated + ITW83.7974.8881.27
MinistralTranslated84.3273.8677.07
ITW63.1175.3585.76
Translated + ITW83.4473.8777.29
SmolTranslated82.2269.9974.84
ITW59.465.0872.21
Translated + ITW80.0670.3578.82
", + "bbox": [ + 191, + 695, + 805, + 849 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4: Average F1 score on POLYGUARDPROMPTS and Multilingual Bench, and Recall on PatronusAI, when models are trained with different training dataset settings. Underlined values represent in-distribution evaluations. Takeaway: Models trained with translated + ITW samples are robust on different distributions of evaluation data", + "bbox": [ + 169, + 863, + 823, + 921 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Furthermore, we investigate in detail the influence of the presence of ITW data in our training data mix for each benchmark dataset (Figure 5). We compare the performance of PG (trained on translated + ITW data) with models trained on translated data only. We observe that the performance of Qwen2.5 degrades for most of the datasets when ITW data are absent from the training mix. The performance differences for Ministrial are more balanced compared to Qwen2.5, that is, both improvement and degradation are observed across the evaluation datasets. The introduction of ITW data benefits the performance of the ToxicChat benchmark (Lin et al., 2023) the most for both models, since ITW data is most aligned with the ToxicChat benchmark.", + "bbox": [ + 169, + 103, + 826, + 229 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 How does performance vary on English vs Translated vs Code-Switched data?", + "text_level": 1, + "bbox": [ + 169, + 246, + 784, + 263 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We study the performance variation of models on code-switched data, which consists of tokens belonging to different languages but in the same document. Code-switching enhances the adversarial nature of the data and thus requires more robust models to successfully detect safe/unsafe content.", + "bbox": [ + 169, + 272, + 823, + 329 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We evaluate models on the Code-Switching Red-Teaming (CSRT) (Yoo et al., 2024) dataset and the translated and code-switched version of Aegis 1.0 (Ghosh et al., 2024) as provided by Yang et al. (2024b). Since CSRT also evaluates LLMs' tendency to generate unsafe content, we use the same automatic annotation pipeline as described in Appendix D.", + "bbox": [ + 169, + 335, + 826, + 393 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In all settings, PG models outperform baselines, showing that our moderation models are more robust (Table 5). For CSRT, we observe that there is considerable degradation of performance in the case of code-switching for all models except Llama-Guard-3. For Aegis 1.0, there is a performance drop from English to the translated version. The performance increases for the code-switched version but is lower than on English data.", + "bbox": [ + 169, + 398, + 823, + 470 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7c06a04f5905654a8188566a61f60456844d509c5f416f57e789d1529a3ba559.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeModelCSRT English (LG)CSRT English (Aegis)CSRT Code-switch (LG)CSRT Code-switch (Aegis)Aegis English*Aegis Translated*Aegis Code-switch*Avg
Open -WeightAegis-Defensive90.9190.6181.3881.5383.8975.1580.3583.40
MD Judge91.2190.9150.0050.0082.9842.5474.0668.81
Llama Guard 277.5276.8665.8864.7960.8251.6959.1665.25
Llama Guard 379.6679.4279.8379.1667.3962.1566.8673.50
Duo Guard89.1852.8289.2652.2883.3759.1073.4971.36
Closed -SourcePerspective API53.7953.2332.5231.7531.1526.1127.2636.54
OpenAI Omni82.8382.9774.2474.0373.3063.8268.1474.19
Google Mod.83.1483.6682.1981.9474.5473.6072.8978.85
OursPG Qwen2.594.1093.7888.5587.8887.8583.0085.1388.61
PG Ministrial95.1995.2290.0289.3586.9681.1883.8188.82
PG Smol94.3993.7284.1383.8684.7172.8980.3284.86
", + "bbox": [ + 173, + 482, + 823, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 5: F1 scores comparison on English only, translated, and code-switched data; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. * represent results averaged across 3 annotations, LG=Llama Guard Takeaway: All models suffer performance degradation for code-switched data, with PG models outperforming baselines.", + "bbox": [ + 169, + 696, + 823, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5 How is performance affected by removing low-quality translated data?", + "text_level": 1, + "bbox": [ + 169, + 792, + 740, + 809 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Data quality plays an important role in the training of any machine learning model. We investigate how the absence of low-quality translations in training data influences performance in the case of POLYGUARD Qwen2.5 and Ministral. Due to time and budget constraints, we use GPT-4o annotations as a proxy for human-evaluated translation quality and distill them for cost-effective annotations (details in Appendix F).", + "bbox": [ + 169, + 818, + 826, + 891 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Empirical evaluations show that the elimination of low-quality translations does not necessarily improve model performance (Figure 9, Appendix F) since contrastive trends", + "bbox": [ + 169, + 895, + 823, + 926 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/d5b00f408637d7cf518f8a12ec5653f7af824060843b2097368f54d9d0e1a04b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAverageStd Dev
POLYGONQwen2.587.018.27
POLYGONMinistral84.0412.25
POLYGONSmol65.2525.02
", + "bbox": [ + 323, + 99, + 674, + 172 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 6: Recall scores for POLYGUARD models on human-written samples from the Aya RedTeam benchmark. Takeaway: POLYGUARD models generalize on data from different distributions despite being trained only on machine-translated data.", + "bbox": [ + 169, + 181, + 826, + 226 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "are observed for Qwen2.5 and Ministral. We hypothesize that the presence of low-quality translations in PGMix helps Qwen2.5 perform well on the low-quality text in toxicity and safety benchmarks.", + "bbox": [ + 169, + 258, + 823, + 301 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.6 Does POLYGUARD superficially align with artifacts of machine-translated text only?", + "text_level": 1, + "bbox": [ + 169, + 327, + 825, + 345 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The use of machine-translated data for training POLYGUARD models can lead to the hypothesis that models learn only to rely on machine-translation artifacts in the data to evaluate safety. To investigate if this behavior exists, we evaluate our models on the Aya Red-teaming dataset (Ahmadian et al., 2024), which consists of manually created 7,419 samples in 8 languages, thus lacking the noise patterns present in machine-translated texts. We do not observe empirical evidence supporting the hypothesis (Table 6).", + "bbox": [ + 169, + 357, + 464, + 555 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 POLYGUARD Runtime Comparison", + "text_level": 1, + "bbox": [ + 171, + 584, + 370, + 619 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We have trained and open-sourced models of three sizes (0.5B, 7B, and 8B). While all three can run on consumer hardware, the 0.5B can benefit on-device or latency-critical applications. We also test the latency of our models on 7419 samples from the Aya RedTeaming dataset (Ahmadian et al., 2024) on an NVIDIA L40S GPU using VLLM (Table 7), and find that our 0.5B model has a high throughput. However, our 7B and", + "bbox": [ + 169, + 638, + 462, + 792 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "8B models run comparatively slower than their similarly sized Llama Guard counterparts. Compared to Llama Guard, POLYGUARD models solve more tasks, and thus require longer prompts and generate more output tokens, which leads to increased runtime.", + "bbox": [ + 169, + 791, + 826, + 835 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/7aa7eb18842bdeb0a90f34bf39ebc26933d8984b21590840eab966e47578f07f.jpg", + "image_caption": [ + "Figure 5: Performance difference on removing ITW data Takeaway: Removal of ITW data generally degrades model performance by reducing training data diversity." + ], + "image_footnote": [], + "bbox": [ + 480, + 357, + 821, + 710 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_footnote", + "text": "11We also use the Aya Red-teaming dataset to assess the need for multilingual safety classifiers by translating it to English via TowerInstruct-7B-v0.2 and then evaluating an English-only classifier (Llama-Guard-3-8B). PG Qwen2.5 significantly outperforms this setup - achieving a higher recall in French (0.916 vs. 0.706), Russian (0.926 vs. 0.669) and Spanish (0.952 vs. 0.681) - highlighting the limitations of relying solely on translation for multilingual safety moderation.", + "bbox": [ + 169, + 858, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/de2a11dde32c0308543546988d5a694ba9a7d72072127e3414ddf4f43b0c5fca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelSizeInput TokensOutput TokensTime (m:ss)
Llama Guard 28B1575800275362:13
Llama Guard 38B1657409363642:14
POLYGON Smol0.5B18702062393370:31
POLYGON Qwen2.57B18702062430433:27
POLYGON Ministral8B18810522424263:58
", + "bbox": [ + 200, + 101, + 795, + 207 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 7: Latency comparison of POLYGUARD models on Aya RedTeaming Takeaway: Smol is highly efficient, whereas Qwen and Ministral are slower than LlamaGuards as POLYGUARD models solve multiple tasks.", + "bbox": [ + 169, + 215, + 823, + 258 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6 Background & Related Work", + "text_level": 1, + "bbox": [ + 171, + 284, + 464, + 303 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Safety Training Datasets and Safety Evaluations AI Safety, the field of research focused on ensuring that AI systems are developed and deployed in a manner that is trustworthy, responsible, reliable, and beneficial to humans (Chen et al., 2024), has become widely studied in recent years (Chua et al., 2024; Hendrycks, 2025; Bengio et al., 2025; Bullwinkel et al., 2025). This increasing interest has led to the procurement of datasets for training and evaluating safety guardrails for AI systems (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024; Lin et al., 2023; Ji et al., 2023; Li et al., 2024). Similarly, safety benchmarks have been curated to evaluate the safety risks exhibited by AI systems (Xie et al., 2024; Mazeika et al., 2024; Jain et al., 2024; Kumar et al., 2024; Yoo et al., 2024; Zeng et al., 2024b; Zhang et al., 2024a;b; Tan et al., 2024). However, almost all of the aforementioned datasets are limited to the English or Chinese language only or focus on specific subsets of AI safety Jain et al. (2024).", + "bbox": [ + 169, + 315, + 826, + 470 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Safety Moderation Tools Current open-weight safety systems rely on either proprietary datasets (Inan et al., 2023; Zeng et al., 2024a) or previously mentioned English-centric datasets (Ghosh et al., 2024; Li et al., 2024; Han et al., 2024). Although these LLM-based classifiers possess inherent multilingual capabilities, their performance is constrained by their predominantly English training data (Han et al., 2024; Ghosh et al.). Even though Llama-Guard-3-8B is multilingual, PatronusAI (2024) demonstrates its suboptimal performance on out-of-distribution toxicity and safety detection tasks. Additionally, existing models face structural limitations; most are restricted to binary safety classification (with WildGuardMix (Han et al., 2024) being a notable exception), or ignore the structure of user-LLM interactions by processing only a single text at a time (Aegis 1.0 Ghosh et al. (2024) and DuoGuard Deng et al. (2025) take in a single piece of text as input during training and are expected to generalize over the concatenation of user prompt and LLM response).", + "bbox": [ + 169, + 484, + 828, + 654 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 672, + 308, + 688 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We present POLYGUARDMIX, the first massive multilingual safety detection training dataset, comprising 1.91M user-LLM interactions across 17 languages. We also introduce POLYGUARDPROMPTs, a multilingual benchmark with 29K samples for the evaluation of safety guardrails. Further, we train robust multilingual LLM-based safety detectors, POLYGUARD, which perform better or comparably to existing open-weight and proprietary safety detectors across numerous evaluation benchmarks belonging to different data distributions.", + "bbox": [ + 169, + 704, + 828, + 789 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Ethics Statement", + "text_level": 1, + "bbox": [ + 171, + 101, + 328, + 118 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Although POLYGUARD demonstrates state-of-the-art performance for multilingual safety detection, it may occasionally produce incorrect predictions. Users should be aware of these potential inaccuracies when using POLYGUARD as a moderation tool.", + "bbox": [ + 169, + 133, + 826, + 176 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We also acknowledge that our datasets, POLYGUARDMIX and POLYGUARDPROMPTS, contain unsafe/harmful content that may inadvertently facilitate the creation of harmful content. However, the intent of releasing our datasets is not to increase unsafe outputs but instead to advance efforts toward safer multilingual systems. As a safety measure, we plan to implement restrictions on the use of our datasets.", + "bbox": [ + 169, + 181, + 828, + 253 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 171, + 273, + 346, + 291 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This research was supported in part by Google Jigsaw, DSO National Laboratories and Microsoft's Accelerating Foundation Models Research program.", + "bbox": [ + 169, + 303, + 823, + 335 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Data We express our gratitude to the authors whose meticulous efforts were instrumental in the creation of our data set: WildGuardMix (Han et al., 2024), LMSys-Chat-1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024).", + "bbox": [ + 169, + 339, + 825, + 383 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Software and Models We would like to thank the authors of TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022) which we use for automatic translations, contributors and maintainers of vLLM (Kwon et al., 2023) and LiteLLM $^{12}$ which we leverage to generate continuations from models, and OpenRLHF (Hu et al., 2024) which we use to fine-tune models. Finally, we thank Jigsaw for providing access to Perspective API.", + "bbox": [ + 169, + 396, + 826, + 470 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 488, + 274, + 503 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Roee Aharoni, Melvin Johnson, and Orhan First. Massively multilingual neural machine translation. In Jill Burstein, Christy Doran, and Thamar Solorio (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 3874-3884, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1388. URL https://aclanthology.org/N19-1388/.", + "Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, Sara Hooker, et al. The multilingual alignment prism: Aligning global and local preferences to reduce harm. arXiv preprint arXiv:2406.18682, 2024.", + "Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733, 2024.", + "Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025.", + "Blake Bullwinkel, Amanda Minnich, Shiven Chawla, Gary Lopez, Martin Pouliot, Whitney Maxwell, Joris de Gruyter, Katherine Pratt, Saphir Qi, Nina Chikanov, et al. Lessons from red teaming 100 generative ai products. arXiv preprint arXiv:2501.07238, 2025.", + "Chen Chen, Ziyao Liu, Weifeng Jiang, Si Qi Goh, and KwoK-Yan Lam. Trustworthy, responsible, and safe ai: A comprehensive architectural framework for ai safety with challenges and mitigations. arXiv preprint arXiv:2408.12935, 2024.", + "Jaymari Chua, Yun Li, Shiyi Yang, Chen Wang, and Lina Yao. Ai safety in generative ai large language models: A survey. arXiv preprint arXiv:2407.18369, 2024." + ], + "bbox": [ + 173, + 511, + 828, + 902 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_footnote", + "text": "$^{12}$ https://github.com/BerriAI/litellm", + "bbox": [ + 186, + 907, + 452, + 924 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. Unsupervised cross-lingual representation learning at scale. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 8440-8451, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.747. URL https://aclanthology.org/2020.acl-main.747/.", + "Marta Costa-jussà, Eric Smith, Christophe Ropers, Daniel Licht, Jean Maillard, Javier Ferrando, and Carlos Escolano. Toxicity in multilingual machine translation at scale. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 9570-9586, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.642. URL https://aclanthology.org/2023-findings-emnlp.642.", + "Aida Mostafazadeh Davani, Sagar Gubbi Venkatesh, Sunipa Dev, Shachi Dave, and Vinodkumar Prabhakaran. Genil: A multilingual dataset on generalizing language. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=kLH4ccaL21.", + "Adrian de Wynter, Ishaan Watts, Nektar Ege Altintoprak, Tua Wongsangaroonsri, Minghui Zhang, Noura Farra, Lena Baur, Samantha Claudet, Pavel Gajdusek, Can Gören, et al. Rtplx: Can llms evaluate toxicity in multilingual scenarios? arXiv preprint arXiv:2404.14397, 2024.", + "Yihe Deng, Yu Yang, Junkai Zhang, Wei Wang, and Bo Li. Duoguard: A two-player rl-driven framework for multilingual llm guardrails. arXiv preprint arXiv:2502.05163, 2025.", + "Yue Deng, Wenxuan Zhang, Sinno Jialin Pan, and Lidong Bing. Multilingual jailbreak challenges in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vESNKdEMGp.", + "Jesse Dodge, Maarten Sap, Ana Marasovic, William Agnew, Gabriel Ilharco, Dirk Groeneweld, Margaret Mitchell, and Matt Gardner. Documenting large webtext corpora: A case study on the colossal clean crawled corpus. arXiv preprint arXiv:2104.08758, 2021.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.", + "Shaona Ghosh, Prasoon Varshney, Makes Narsimhan Sreedhar, Aishwarya Padmakumar, Traian Rebedea, Jibin Rajan Varghese, and Christopher Parisien. Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails. In Neurips Safe Generative AI Workshop 2024.", + "Shaona Ghosh, Prasoon Varshney, Erick Galinkin, and Christopher Parisien. Aegis: Online adaptive ai content safety moderation with ensemble of llm experts. arXiv preprint arXiv:2404.05993, 2024.", + "Daniil Gurgurov, Tanja Bäumel, and Tatiana Anikina. Multilingual large language models and curse of multilinguality. 2024. doi: 10.48550/ARXIV.2406.10602. URL https://arxiv.org/abs/2406.10602.", + "Seungju Han, Kavel Rao, Allyson Ettinger, Liwei Jiang, Bill Yuchen Lin, Nathan Lambert, Yejin Choi, and Nouha Dziri. Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms. arXiv preprint arXiv:2406.18495, 2024.", + "Dan Hendrycks. Introduction to ai safety, ethics, and society, 2025.", + "Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=nZeVKeeFYf9." + ], + "bbox": [ + 171, + 102, + 826, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024.", + "Hakan Inan, Kartikeya Upasani, Jianfeng Chi, Rashi Rungta, Krithika Iyer, Yuning Mao, Michael Tontchev, Qing Hu, Brian Fuller, Davide Testuggine, et al. Llama guard: Llm-based input-output safeguard for human-ai conversations. arXiv preprint arXiv:2312.06674, 2023.", + "Devansh Jain, Priyanshu Kumar, Samuel Gehman, Xuhui Zhou, Thomas Hartvigsen, and Maarten Sap. Polyglotoxicityprompts: Multilingual evaluation of neural toxic degeneration in large language models. arXiv preprint arXiv:2405.09373, 2024.", + "Jiaming Ji, Mickel Liu, Juntao Dai, Xuehai Pan, Chi Zhang, Ce Bian, Boyuan Chen, Ruiyang Sun, Yizhou Wang, and Yaodong Yang. Beavertails: Towards improved safety alignment of LLM via a human-preference dataset. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=g0QovXbFw3.", + "Tom Kocmi, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Thamme Gowda, Yvette Graham, Roman Grundkiewicz, Barry Haddow, Rebecca Knowles, Philipp Koehn, Christof Monz, Makoto Morishita, Masaaki Nagata, Toshiaki Nakazawa, Michal Novák, Martin Popel, and Maja Popovic. Findings of the 2022 conference on machine translation (wmt22). In Conference on Machine Translation, 2022. URL https://apisemantic scholar.org/CorpusID:256461033.", + "Suhas Kotha, Jacob M. Springer, and Aditi Raghunathan. Understanding catastrophic forgetting in language models via implicit inference. ArXiv, abs/2309.10105, 2023. URL https://api_semanticscholar.org/CorpusID:262054014.", + "Priyanshu Kumar, Elaine Lau, Saranya Vijayakumar, Tu Trinh, Scale Red Team, Elaine Chang, Vaughn Robinson, Sean Hendryx, Shuyan Zhou, Matt Fredrikson, et al. Refusal-trained llms are easily jailbroken as browser agents. arXiv preprint arXiv:2410.13886, 2024.", + "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023.", + "Lijun Li, Bowen Dong, Ruohui Wang, Xuhao Hu, Wangmeng Zuo, Dahua Lin, Yu Qiao, and Jing Shao. SALAD-bench: A hierarchical and comprehensive safety benchmark for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 3923-3954, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.235. URL https://aclanthology.org/2024-findings-acl.235.", + "Zi Lin, Zihan Wang, Yongqi Tong, Yangkun Wang, Yuxin Guo, Yujia Wang, and Jingbo Shang. ToxicChat: Unveiling hidden challenges of toxicity detection in real-world user-AI conversation. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 4694-4702, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.311. URL https://aclanthology.org/2023.findings-emnlp.311.", + "AI @ Meta Llama Team. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783.", + "Todor Markov, Chong Zhang, Sandhini Agarwal, Florentine Eloundou Nekoul, Theodore Lee, Steven Adler, Angela Jiang, and Lilian Weng. A holistic approach to undesired content detection in the real world. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 15009-15018, 2023." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakaehie, Nathaniel Li, Steven Basart, Bo Li, David Forsyth, and Dan Hendrycks. Harmbench: a standardized evaluation framework for automated red teaming and robust refusal. In Proceedings of the 41st International Conference on Machine Learning, ICML'24. JMLR.org, 2024.", + "Mistral. Un ministral, des ministraux. URL https://mistral.ai/en/news/ministraux.", + "Andrew Ng. Agentic translation. URL https://github.com/andrewyng/translation-agent.", + "PatronusAI. Llama guard is off duty. https://www.patronus.ai/blog/llama-guard-is-off-duty, 2024.", + "Qwen. Qwen2.5: A party of foundation models! URL https://qwenlm.github.io/blog/qwen2.5/.", + "Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Jurafsky, Noah A. Smith, and Yejin Choi. Social bias frames: Reasoning about social and power implications of language. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 5477-5490, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.486. URL https://aclanthology.org/2020.acl-main.486/.", + "Khetam Al Sharou and Lucia Specia. A taxonomy and study of critical errors in machine translation. In Helena Moniz, Lieve Macken, Andrew Rufener, Loici Barrault, Marta R. Costa-jussa, Christophe Declercq, Maarit Koponen, Ellie Kemp, Spyridon Pilos, Mikel L. Forcada, Carolina Scarton, Joachim Van den Bogaert, Joke Daems, Arda Tezcan, Bram Vanroy, and Margot Fonteyne (eds.), Proceedings of the 23rd Annual Conference of the European Association for Machine Translation, pp. 171-180, Ghent, Belgium, June 2022. European Association for Machine Translation. URL https://aclanthology.org/2022.eamt-1.20.", + "Lucia Specia, Frédéric Blain, Marina Fomicheva, Chrysoula Zerva, Zhenhao Li, Vishrav Chaudhary, and André F. T. Martins. Findings of the WMT 2021 shared task on quality estimation. In Loic Barrault, Ondrej Bojar, Fethi Bougares, Rajen Chatterjee, Marta R. Costa-jussa, Christian Federmann, Mark Fishel, Alexander Fraser, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Paco Guzman, Barry Haddow, Matthias Huck, Antonio Jimeno Yepes, Philipp Koehn, Tom Kocmi, Andre Martins, Makoto Morishita, and Christof Monz (eds.), Proceedings of the Sixth Conference on Machine Translation, pp. 684-725, Online, November 2021. Association for Computational Linguistics. URL https://aclanthology.org/2021.wmt-1.71.", + "Yingshui Tan, Boren Zheng, Baihui Zheng, Kerui Cao, Huiyun Jing, Jincheng Wei, Jiaheng Liu, Yancheng He, Wenbo Su, Xiangyong Zhu, et al. Chinese safetyqa: A safety short-form factuality benchmark for large language models. arXiv preprint arXiv:2412.15265, 2024.", + "Llama Team. Meta llama guard 2. https://github.com/meta-llama/PurpleLlama/blob/main/Llama-Guard2/MODEL_CARD.md, 2024.", + "NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022.", + "Wenxuan Wang, Zhaopeng Tu, Chang Chen, Youliang Yuan, Jen-tse Huang, Wenxiang Jiao, and Michael R Lyu. All languages matter: On the multilingual safety of large language models. arXiv preprint arXiv:2310.00905, 2023." + ], + "bbox": [ + 174, + 102, + 826, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tinghao Xie, Xiangyu Qi, Yi Zeng, Yangsibo Huang, Udari Madhushani Sehwag, Kaixuan Huang, Luxi He, Boyi Wei, Dacheng Li, Ying Sheng, et al. Sorry-bench: Systematically evaluating large language model safety refusal behaviors. arXiv preprint arXiv:2406.14598, 2024.", + "Yuemei Xu, Ling Hu, Jiayi Zhao, Zihan Qiu, Yuqi Ye, and Hanwen Gu. A survey on multilingual large language models: Corpora, alignment, and bias. ArXiv, abs/2404.00929, 2024. URL https://api_semanticscholar.org/CorpusID:268819377.", + "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a.", + "Yahan Yang, Soham Dan, Dan Roth, and Insup Lee. Benchmarking llm guardrails in handling multilingual toxicity. arXiv preprint arXiv:2410.22153, 2024b.", + "Haneul Yoo, Yongjin Yang, and Hwaran Lee. Code-switching red-teaming: Lm evaluation for safety and multilingual understanding. arXiv preprint arXiv:2406.15481, 2024.", + "Wenjun Zeng, Yuchi Liu, Ryan Mullins, Ludovic Peran, Joe Fernandez, Hamza Harkous, Karthik Narasimhan, Drew Proud, Piyush Kumar, Bhaktipriya Radharapu, et al. Shieldgemma: Generative ai content moderation based on gemma. arXiv preprint arXiv:2407.21772, 2024a.", + "Yi Zeng, Yu Yang, Andy Zhou, Jeffrey Ziwei Tan, Yuheng Tu, Yifan Mai, Kevin Klyman, Minzhou Pan, Ruoxi Jia, Dawn Song, et al. Air-bench 2024: A safety benchmark based on risk categories from regulations and policies. arXiv preprint arXiv:2407.17436, 2024b.", + "Hengxiang Zhang, Hongfu Gao, Qiang Hu, Guanhua Chen, Lili Yang, Bingyi Jing, Hongxin Wei, Bing Wang, Haifeng Bai, and Lei Yang. Chinesesa: A chinese benchmark for evaluating safety in large language models. arXiv preprint arXiv:2410.18491, 2024a.", + "Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Meijuan An, Bikun Yang, KaiKai Zhao, Kai Wang, and Shiguo Lian. Chisafetybench: A chinese hierarchical safety benchmark for large language models. arXiv preprint arXiv:2406.10311, 2024b.", + "Wenting Zhao, Xiang Ren, Jack Hessel, Claire Cardie, Yejin Choi, and Yuntian Deng. Wildchat: 1m chatgpt interaction logs in the wild. arXiv preprint arXiv:2405.01470, 2024.", + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Tianle Li, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zhuohan Li, Zi Lin, Eric P Xing, et al. Lmsys-chat-1m: A large-scale real-world llm conversation dataset. arXiv preprint arXiv:2309.11998, 2023.", + "Xuhui Zhou, Hao Zhu, Akhila Yerukola, Thomas Davidson, Jena D. Hwang, Swabha Swayamdipta, and Maarten Sap. COBRA frames: Contextual reasoning about effects and harms of offensive statements. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 6294–6315, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.392. URL https://aclanthology.org/2023-findings-acl.392/." + ], + "bbox": [ + 171, + 102, + 826, + 742 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A Translation Pipeline and Quality Evaluation", + "text_level": 1, + "bbox": [ + 171, + 101, + 606, + 119 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our training data translation pipeline segments the source text into chunks using *blingfire*13, translates each chunk independently, and reconstructs the target text by concatenating the translated segments. We utilize this chunking approach to process long texts while respecting the context window constraints of translation models.", + "bbox": [ + 169, + 140, + 826, + 199 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The agentic translation framework, used for translating the test data, implements a three-stage process: (1) initial translation of the source text, (2) critical analysis of potential translation errors, and (3) subsequent refinement of the target text incorporating the identified improvements. This iterative approach helps maintain semantic accuracy and cultural nuances crucial for safety evaluation across languages.", + "bbox": [ + 169, + 204, + 826, + 276 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Automatic evaluation of translation quality is done by prompting GPT-4o as a judge. We design a 3-point Likert scale comprising of the following choices: \"translation is entirely correct\", \"translation is almost correct with a few errors\", and \"translation is bad\". We run evaluations for only the translated test set of PGMix to assess the overall translation quality of a model. The evaluation prompts are shown below:", + "bbox": [ + 169, + 281, + 826, + 353 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "System Prompt", + "text_level": 1, + "bbox": [ + 196, + 369, + 313, + 386 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text`.", + "guess_lang": "txt", + "bbox": [ + 196, + 397, + 496, + 483 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "User Prompt", + "text_level": 1, + "bbox": [ + 197, + 508, + 295, + 525 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "`source_text`: {src} \n`target_text`: {tgt}", + "guess_lang": "verilog", + "bbox": [ + 197, + 537, + 366, + 566 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Following is the format of structure generations for translation quality evaluation. We prompt the judge to first reason about the source and target sentences before outputting the verdict.", + "bbox": [ + 169, + 589, + 826, + 631 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "class QualityEnum(str, Enum): incorrect $=$ 'translation is bad' almost.correct $=$ 'translation is almost correct with a few errors' entirely.correct $=$ 'translation is entirely correct' \nclass Result(BaseModel): reason: str $=$ Field(description $\\equiv$ \"brief pointers on why the translation is correct or wrong\") verdict: QualityEnum $=$ Field(description $\\equiv$ \"the verdict about the translation quality\")", + "guess_lang": "python", + "bbox": [ + 173, + 647, + 792, + 801 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Tables 8 and 9 show the verdicts of the GPT-4o judge for the human prompt and model response respectively. We observe that TowerInstruct generates higher-quality translations when compared to NLLB for the languages it supports. However, in the case of Hindi (which is not supported by Tower), the quality is poor.", + "bbox": [ + 169, + 825, + 826, + 885 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "13https://pypi.org/project/blingfire", + "bbox": [ + 184, + 907, + 452, + 924 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/d98006c005474071287e277efe122673eb39e0c87bc1b6a8e8b92e50ace4d38d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB636688401-
Tower12023601621
ESNLLB1437218682
Tower1374303471
FRNLLB1406245722
Tower1499177472
DENLLB12753481011
Tower1335323661
KONLLB10754901582
Tower12783361092
ITNLLB1384260801
Tower144222756-
PTNLLB146320260-
Tower153214251-
NLNLLB1339306773
Tower139926462-
RUNLLB1379240106-
Tower1406233851
HINLLB147018669-
Tower72516912
", + "bbox": [ + 173, + 133, + 848, + 431 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/8b8df921c7157f55eb7b46dc8c5f5a2e939ccc7fd21b159503d855dc7d135b5f.jpg", + "table_caption": [ + "Table 8: GPT-4o Judge verdicts for human prompts translation. Takeaway: TowerInstruct generated more accurate translations than NLLB for supported languages." + ], + "table_footnote": [], + "table_body": "
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB15311474241
Tower822729174-
ESNLLB858426441-
Tower583105785-
FRNLLB883741101-
Tower481116381-
DENLLB811790124-
Tower625102872-
KONLLB72192084-
Tower7079161011
ITNLLB809566350-
Tower5291103921
PTNLLB8846232162
Tower4891131105-
NLNLLB8287721241
Tower5931049821
RUNLLB906663156-
Tower512112390-
HINLLB128641128
Tower611718
", + "bbox": [ + 173, + 549, + 846, + 845 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 9: GPT-40 Judge verdicts for model generation translation. Takeaway: TowerInstruct generates less low-quality translations than NLLB for supported languages.", + "bbox": [ + 169, + 854, + 823, + 886 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B Human Validation", + "text_level": 1, + "bbox": [ + 171, + 101, + 375, + 118 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We use Prolific14 to collect annotations. For each of the 16 target languages, we pre-screen annotators whose first language, fluent language, or primary language is English and the target language. Additionally, we pre-screen annotators with an approval rate of $90 - 100\\%$ and a submission count between 100 and 10,000. Annotators were compensated at the rate of $\\$12/$ hr. Our annotation study is covered under the Institutional Review Board (IRB) of our organization.", + "bbox": [ + 169, + 138, + 826, + 223 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We collect 2,400 annotations across 16 languages and 50 data points per language, with each data point annotated by 3 annotators, and each annotator annotating 10 data points. We recruited 191 unique annotators15 via Prolific, spanning across 24 countries. They self-identified as 110 male and 81 female. In terms of ethnicity, they described themselves as 84 White, 79 Black, 12 Mixed, 10 Asian, and 5 Other.", + "bbox": [ + 169, + 228, + 826, + 303 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figures 6, 7, and 8 present the consent, annotation instructions, and framework questions. The human validation results for each language are shown in Table 10. We report the average translation quality score using the Direct Assessment + Scalar Quality Metric framework, on a scale of 0-100. Inter-annotator agreement is computed using Krippendorff's $\\alpha$ for both source and target language safety labels.", + "bbox": [ + 169, + 306, + 826, + 380 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/05148bca329f0174cfa52c7f72c148d82cffe6df95917784369a9249a0046357.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LanguageAvg. Trans- lation ScoreSource Safety αTarget Safety αSource - Target α
Arabic80.990.410.400.96
Chinese78.550.430.420.91
Czech81.110.470.480.96
Dutch77.150.370.330.96
French82.120.480.471.0
German82.670.440.450.92
Hindi84.720.340.370.96
Italian83.210.380.370.91
Japanese76.390.390.360.76
Korean81.550.430.460.96
Polish80.330.390.400.96
Portuguese81.090.460.450.92
Russian80.440.420.430.96
Spanish84.110.450.441.0
Swedish79.660.360.351.0
Thai78.890.410.420.92
", + "bbox": [ + 184, + 397, + 813, + 652 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 10: Human validation results for translation quality and safety labels. Translation scores are on a 0-100 scale, using the DA+SQM framework. Inter-annotator agreement (Krippendorff's $\\alpha$ ) for source and target safety labels is reported, along with agreement between majority-voted source and target labels.", + "bbox": [ + 169, + 661, + 823, + 719 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C POLYGUARD Training Details", + "text_level": 1, + "bbox": [ + 169, + 760, + 478, + 780 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We train our models using OPENRLHF $^{16}$ on 8 NVIDIA A6000 GPUs. We set LoRA rank to 8 and alpha to 16. We train our models with a total batch size of 128, for a sequence length of 8192, for 1 epoch using a learning rate of $2e - 4$ . The system and user prompts (adapted from WildGuard and Llama Guard v3) used by PG are as follows:", + "bbox": [ + 169, + 800, + 826, + 858 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "$^{14}$ https://www.prolific.com/", + "bbox": [ + 184, + 878, + 387, + 895 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "15some participated in multiple languages, resulting in a lower unique count", + "bbox": [ + 186, + 895, + 687, + 910 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "16https://github.com/OpenRLHF/OpenRLHF/tree/main", + "bbox": [ + 186, + 909, + 540, + 922 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Instruction and Consent", + "bbox": [ + 452, + 154, + 545, + 162 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 179, + 176, + 223, + 181 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Hi! We are a team of researchers who are passionate about making AI systems safer to use across multiple languages. We are trying to test out a few systems for automatically translating sentences across languages to see how well they preserve the original meaning and if they translate unsafe content as well. We appreciate your help in making AI systems safer across multiple cultures and languages!", + "bbox": [ + 179, + 184, + 813, + 199 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Task Overview:", + "text_level": 1, + "bbox": [ + 179, + 207, + 232, + 213 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In this study, we are interested in making AI systems safer to use in multiple languages. You will be presented with a sentence in English and its machine-generated translation in another language. Your task is to judge the quality of the translation, and if either the original sentence/translation contains any unsafe language.", + "bbox": [ + 179, + 215, + 813, + 229 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Data collection & sharing:", + "text_level": 1, + "bbox": [ + 179, + 238, + 266, + 244 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We will not ask you for your name, and the data collected in this study will be made identifiable to the best of our extent. We will securely store the data on our servers and only share with qualified researchers (e.g., who want to further the study of hate speech detection). If you later decide that you do not want your responses included in this study, please email so we can exclude your work from the analysis.", + "bbox": [ + 179, + 247, + 784, + 262 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Contact:", + "text_level": 1, + "bbox": [ + 179, + 270, + 210, + 276 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "If you have any questions about this study, you should feel free to ask them by contacting us (via the MTurk interface or via email at: info@mturk.com). If you have questions later, desire additional information, or wish to withdraw your participation please contact e-mail in accordance with the contact information listed above. If you have questions pertaining to your rights as a research participant; or to report concerns to this study, you should contact", + "bbox": [ + 179, + 277, + 808, + 303 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Please do not use AI systems when answering the tasks.", + "bbox": [ + 179, + 316, + 366, + 323 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We will sample some of the responses and manually review them, if we find evidence to show that annotators are not giving proper efforts to this task, we will exclude them from future tasks.", + "bbox": [ + 179, + 325, + 759, + 332 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "I have read and understood the information above, are 18 years or older, and agree to participate in our study.", + "text_level": 1, + "bbox": [ + 187, + 340, + 571, + 351 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Yes", + "bbox": [ + 187, + 358, + 210, + 363 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "No", + "bbox": [ + 189, + 368, + 210, + 375 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Move forward", + "bbox": [ + 475, + 422, + 521, + 430 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 6: Annotator Consent. Blacked out for anonymity.", + "bbox": [ + 290, + 446, + 704, + 463 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Annotation Task", + "text_level": 1, + "bbox": [ + 187, + 566, + 269, + 575 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You will be presented with 2 sentences: an source original sentence in English, and its automatic translation in Thai. Your task is to assess the quality of the translation and if the source/translated sentences contain unsafe language.", + "bbox": [ + 187, + 587, + 805, + 604 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Quality of Translation", + "text_level": 1, + "bbox": [ + 187, + 609, + 303, + 619 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Evaluate the quality of the translation in Thai. Consider both:", + "bbox": [ + 187, + 623, + 380, + 630 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Meaning: Does the translation convey and reflect the same meaning as the source sentence in English?", + "- Fluency: Is the translation easy to read and understand?" + ], + "bbox": [ + 197, + 636, + 529, + 652 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Rate the translation quality on a continuous scale from 0 to 6.", + "bbox": [ + 187, + 657, + 382, + 665 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Rating Scale:", + "text_level": 1, + "bbox": [ + 187, + 670, + 251, + 679 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "0 - No Meaning Preserved: The translation is confusing, makes no sense, or is completely different from the English sentence. Most of the meaning and information is lost.", + "- 2 - Some Meaning Preserved: The translation captures only part of the meaning but misses key details. It may be hard to understand because of major mistakes like completely incorrect or irrelevant words or very poor grammar.", + "- 4 - Most Meaning Preserved: The translation retains most of the meaning of the English sentence -- the main idea is clear with just a few small mistakes, like minor grammar issues, awkward wording, or slight changes in meaning.", + "- 6 - Perfect Translation: The translation is clear, natural, and grammatically correct -- same meaning like the source sentence, with no mistakes." + ], + "bbox": [ + 197, + 681, + 803, + 729 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Key Tip:", + "text_level": 1, + "bbox": [ + 189, + 734, + 232, + 743 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- You can move the slider to any point on the scale, not just whole numbers (e.g., 5.3).", + "bbox": [ + 197, + 746, + 470, + 753 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Identifying Unsafe Language", + "text_level": 1, + "bbox": [ + 187, + 760, + 344, + 771 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Please check if the original sentence in English or the translation in Thai contain any unsafe or harmful language.", + "bbox": [ + 187, + 773, + 540, + 781 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "What Counts as Unsafe or Harmful Language?", + "text_level": 1, + "bbox": [ + 187, + 786, + 380, + 795 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A sentence is considered unsafe or harmful if it supports, encourages, or facilitates harmful, illegal, or unethical actions. This includes BOTH:", + "bbox": [ + 187, + 800, + 642, + 806 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Content that is itself harmful: (e.g., promoting violence, supporting crimes, providing inappropriate sexual content, expressing hate speech towards Jews and Muslim communities, revealing confidential information or health records, encouraging misuse of technology).", + "Sentences that request harmful content: (e.g., making unauthorized requests to edit medical records and prescription, asking for instructions on illegal activities, ways to create or use dangerous weapons, promoting self-harm, ways to exploit children, how to spread false election information, even if the language seems neutral)." + ], + "bbox": [ + 197, + 813, + 808, + 844 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 7: Annotator instructions", + "bbox": [ + 380, + 859, + 616, + 875 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 516, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/8c1ec5dc3465a48fd1833e0f2501ef6f8b1d046c8f4ff78cbdf04101d4148ee2.jpg", + "image_caption": [ + "Figure 8: Annotator Framework with Example in Thai" + ], + "image_footnote": [], + "bbox": [ + 173, + 101, + 823, + 373 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/23369c7221938247f171a9f65b265388e08de765f08367b5bf2d80d1302b1041.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 463, + 823, + 921 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 516, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "User Prompt", + "text_level": 1, + "bbox": [ + 197, + 104, + 295, + 119 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Human user: {prompt} AI assistant: {response}", + "guess_lang": "txt", + "bbox": [ + 197, + 133, + 305, + 202 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We train our models $^{17}$ to generate formatted responses containing the user input prompt safety label, policies violated, model response refusal, model response safety label, and policies violated (shown below). Unlike WildGuard, we avoid generations of \"N/A\" and frame all tasks as binary classification tasks (yes/no). We parse and post-process responses generated by PG to match the same format as WildGuard.", + "bbox": [ + 169, + 224, + 826, + 297 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "POLYGON Generation", + "text_level": 1, + "bbox": [ + 199, + 309, + 380, + 323 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Harmful request: yes/no \nRequest safety violations: None / List of violated categories \nResponse refusal: yes/no \nHarmful response: yes/no \nResponse safety violations: None / List of violated categories", + "guess_lang": "txt", + "bbox": [ + 197, + 334, + 511, + 431 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D Out-of-Distribution Benchmarking Dataset Annotations", + "text_level": 1, + "bbox": [ + 171, + 465, + 714, + 484 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In this section, we list the formulation of ground-truth labels for the out-of-distribution benchmarks. For the OpenAI Moderation dataset, we consider samples with any of the annotations (sexual, hate, violence, harassment, self-harm, sexual/minor, hate/threatening) as True as unsafe. For RTP-LX, we consider samples with a Toxicity score above 1 unsafe. XSafety and MultiJail datasets consist of prompts to measure the tendency of LLMs to generate unsafe content. Thus, a few prompts in these datasets are innocuous but could trigger an LLM to generate harmful content. Therefore, we use GPT-4o to determine the safety label of the samples. Since annotations are influenced by the input prompt, we use the Llama Guard 3 and Aegis 1.0 prompts to create two sets of ground-truth labels.", + "bbox": [ + 169, + 500, + 826, + 628 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "E Patronus AI Safety Study", + "text_level": 1, + "bbox": [ + 171, + 650, + 434, + 669 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Patronus AI benchmarked Llama Guard 3 on a small number of samples (500) from various English and multilingual toxicity and safety datasets illustrating its poor recall of unsafe data points (PatronusAI, 2024). Their evaluation benchmark consists of the following datasets available on HuggingfaceHub:", + "bbox": [ + 169, + 684, + 826, + 743 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. nicholasKluge/toxic-text-en", + "2. Arsive/toxicity_classification_jigsaw", + "3. ukr-detect/ukr-toxicity-dataset", + "4. tmu-nlp/thai_toxicity tweet", + "5. nicholasKluge/toxic-text-pt", + "6. lmsys/toxic-chat", + "7. PKU-Alignment/BeaverTails", + "8. OpenSafetyLab/Salad-Data" + ], + "bbox": [ + 210, + 757, + 495, + 883 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_footnote", + "text": "$^{17}$ Qwen2.5-7B-Instruct and Ministrial-8B-Instruct-2410 are available for modifications under the Apache 2.0 license and Mistral Research License respectively.", + "bbox": [ + 169, + 896, + 823, + 925 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F Influence of low-quality translated data", + "text_level": 1, + "bbox": [ + 169, + 101, + 562, + 119 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We distill GPT-4o's knowledge of translation quality into a Qwen2.5 7B classifier to filter out samples with low translation quality. We use the same schema as our translation quality study (Appendix A) to filter for samples where the human prompt and model response are accurately translated. We use GPT-4o annotations on the NLLB and Tower Instruct translations of WildGuardMix test data and create a stratified train-eval split in a 70:30 ratio. Similar to PG, we train a Qwen2.5-based SFT classifier to predict the quality of the translated source document, using the following prompts:", + "bbox": [ + 169, + 133, + 826, + 233 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "System Prompt", + "text_level": 1, + "bbox": [ + 197, + 243, + 313, + 258 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text`", + "guess_lang": "txt", + "bbox": [ + 197, + 272, + 503, + 354 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "User Prompt", + "text_level": 1, + "bbox": [ + 197, + 377, + 295, + 393 + ], + "page_idx": 21 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "`source_text': {source}\n`target_text': {target}", + "guess_lang": "python", + "bbox": [ + 197, + 405, + 390, + 434 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The model is trained on 60,346 training samples and achieves an overall accuracy of $82\\%$ on the validation set of 25,863 samples. A complete evaluation report is shown below in Table 11.", + "bbox": [ + 169, + 450, + 823, + 493 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/c8c5b104c08252fb9df60205d9c2185c775a21889177f63de0f3b67525d663a0.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
LabelPrecisionRecallF1Support
Bad7073712066
Partially Correct7663697704
Entirely Correct87939016093
", + "bbox": [ + 290, + 503, + 707, + 565 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 11: Translation Quality Classifier performance metrics", + "bbox": [ + 279, + 573, + 718, + 590 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Removal of low-quality training data does not necessarily improve model performance. Intuitively, the presence of poor-quality translated data should harm model performance. However, PG models show contrastive trends when low-quality samples are removed from the training data mix (Figure 9). The performance of Qwen2.5 degrades for most datasets, whereas the performance of Ministrial improves. The performance degradation in the case of Qwen2.5 can be attributed to noisy samples in safety and toxicity evaluation datasets. Harmful text is considered to belong to low-quality data; web-crawls implement word blocklist filters to enhance data quality (Dodge et al., 2021). Thus, we hypothesize that the noise induced by poor translations bridges the gap between training and evaluation data, thus leading to performance improvement.", + "bbox": [ + 169, + 618, + 826, + 758 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G Limitations", + "text_level": 1, + "bbox": [ + 171, + 779, + 316, + 795 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We describe several limitations of our work. First, we automatically translate English data to other languages using LLMs. However, automatic translations can introduce deviations in toxicity and safety risks due to incorrect translations and hallucinations (Specia et al., 2021; Sharou & Specia, 2022; Team et al., 2022; Costa-jussa et al., 2023). Second, we employ existing safety classifiers and LLMs to automatically annotate safety violation categories, which may introduce biases from these models into our labeled safety categories. We utilize a panel of models to mitigate such biases, but acknowledge the inherent limitations of this methodology. Third, we follow Llama-Guard-3-8B (Dubey et al., 2024) and define", + "bbox": [ + 169, + 811, + 826, + 925 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e8d291c17038f0e90e663c6a3add7960cc90021d61231e732d1932d341e6d655.jpg", + "image_caption": [ + "Figure 9: Performance difference on removing low-quality data. Takeaway: Removal of low-quality training data does not necessarily improve model performance." + ], + "image_footnote": [], + "bbox": [ + 323, + 106, + 674, + 439 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "our safety violation taxonomy according to the MLCommons Safety Taxonomy18. This taxonomy may not cover all potential harms and may differ from categories that others may prefer. Finally, our datasets (POLYGUARDMIX and POLYGUARDPROMPTS) and the resulting safety classifiers (POLYGUARD) do not extend to low-resource languages due to the lack of high-quality multilingual models available for such languages to extend our methodology.", + "bbox": [ + 169, + 508, + 826, + 583 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_footnote", + "text": "18https://mlcommons.org/2024/04/mlc-aisafety-v0-5-poc/", + "bbox": [ + 186, + 907, + 588, + 924 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + } +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_model.json b/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_model.json new file mode 100644 index 0000000000000000000000000000000000000000..dbf27bc7cd2d5f1ad5c9f62dc8013b5dd20d8fff --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_model.json @@ -0,0 +1,3788 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.225, + 0.088, + 0.315, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.356, + 0.1, + 0.728, + 0.144 + ], + "angle": 0, + "content": "PolyGuard: A Multilingual Safety Moderation Tool for 17 Languages" + }, + { + "type": "text", + "bbox": [ + 0.276, + 0.18, + 0.721, + 0.199 + ], + "angle": 0, + "content": "Priyanshu Kumar\\(^{\\text{♥1}}\\) Devansh Jain\\(^{\\text{♥1}}\\) Akhila Yerukola\\(^{\\text{♥}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.216, + 0.8, + 0.236 + ], + "angle": 0, + "content": "Liwei Jiang\\* Himanshu Beniwal△ \\(\\diamond\\) Thomas Hartvigsen Maarten Sap" + }, + { + "type": "text", + "bbox": [ + 0.207, + 0.246, + 0.793, + 0.282 + ], + "angle": 0, + "content": "Carnegie Mellon University \\(\\spadesuit\\)University of Washington \\(\\triangle\\)IIT Gandhinagar \\(\\diamond\\)University of Virginia \\(\\clubsuit\\)Allen Institute for AI" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.305, + 0.542, + 0.321 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.336, + 0.77, + 0.602 + ], + "angle": 0, + "content": "Truly multilingual safety moderation efforts for Large Language Models (LLMs) have been hindered by a narrow focus on a small set of languages (e.g., English, Chinese) as well as a limited scope of safety definition, resulting in significant gaps in moderation capabilities. To bridge these gaps, we release POLYGUARD, a new state-of-the-art multilingual safety model for safeguarding LLM generations, and the corresponding training and evaluation datasets. POLYGUARD is trained on POLYGUARDMIX, the largest multilingual safety training corpus to date containing 1.91M samples across 17 languages (e.g., Chinese, Czech, English, Hindi). We also introduce POLYGUARDPROMPTS, a high quality multilingual benchmark with 29K samples for the evaluation of safety guardrails. Created by combining naturally occurring multilingual human-LLM interactions and human-verified machine translations of an English-only safety dataset (WildGuardMix; Han et al., 2024), our datasets contain prompt-output pairs with labels of prompt harmfulness, response harmfulness, and response refusal. Through extensive evaluations across multiple safety and toxicity benchmarks, we demonstrate that POLYGUARD outperforms existing state-of-the-art open-weight and commercial safety classifiers by \\(5.5\\%\\). Our contributions advance efforts toward safer multilingual LLMs for all global users." + }, + { + "type": "image", + "bbox": [ + 0.381, + 0.61, + 0.408, + 0.624 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.437, + 0.61, + 0.598, + 0.625 + ], + "angle": 0, + "content": "PolyGuard Collection" + }, + { + "type": "image", + "bbox": [ + 0.382, + 0.625, + 0.406, + 0.641 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.419, + 0.626, + 0.616, + 0.642 + ], + "angle": 0, + "content": "kpriyanshu256/polyguard" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.665, + 0.32, + 0.681 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.697, + 0.827, + 0.797 + ], + "angle": 0, + "content": "Recent advances in large language models (LLMs), especially their multilingual capabilities, have led to their deployment to a diverse global user base that spans multiple languages. Despite this global reach, safety research has focused primarily on the English language (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024), exposing global users to potential safety risks such as harmful content and privacy violations. For instance, studies have shown that multilingual models are more likely to generate hate speech, disinformation, and harmful content when prompted in non-English languages (Kotha et al., 2023; Jain et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.828, + 0.902 + ], + "angle": 0, + "content": "The development of robust multilingual safety systems presents several key challenges. First, building multilingual systems is inherently difficult due to challenges such as the lack of comprehensive datasets, the \"curse of multilinguality\" (Aharoni et al., 2019; Conneau et al., 2020; Gurgurov et al., 2024), and the inherent biases embedded in training corpora (Xu et al., 2024). Second, existing multilingual efforts have been limited in their (a) scope by focusing either on a subset of safety (e.g., PerspectiveAPI covering only toxicity, ignoring other unsafe content) and/or on a narrow set of language coverage (e.g., Llama-Guard-1" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.574, + 0.925 + ], + "angle": 0, + "content": "1Equal contributors, correspondence at msap2@cs.cmu.edu." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.282, + 0.061, + 0.716 + ], + "angle": 270, + "content": "arXiv:2504.04377v2 [cs.CL] 7 Aug 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.203 + ], + "angle": 0, + "content": "only covering English safety, ignoring toxicity and DuoGuard being evaluated on 4 very high resource languages only; Inan et al., 2023; Jain et al., 2024; Deng et al., 2025), or (b) performance (e.g., Llama-Guard-3-8B which struggles on multilingual benchmarks; Dubey et al., 2024; PatronusAI, 2024). Finally, most existing safety frameworks address only the single task of classifying safety and often rely on simplistic binary settings (safe/unsafe), which fail to capture the complex spectrum of harmful content that can manifest differently across cultural and linguistic contexts (Sap et al., 2020; Zhou et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.208, + 0.828, + 0.253 + ], + "angle": 0, + "content": "To address these gaps, we release POLYGUARD (PG), a new state-of-the-art fine-tuned language model for multi-task safety detection and moderation. As Figure 1 highlights, PG can classify a multilingual input of a user prompt and an LLM response on five dimensions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.258, + 0.487, + 0.468 + ], + "angle": 0, + "content": "We also release the first large-scale multilingual corpora for safety detection training, POLYGUARDMIX (PGMix) and safety guardrail evaluation, POLYGUARD-PROMPTS (PGPrompts), comprising 1.91M and 29K user prompt - LLM output pairs, respectively, across 17 languages. Our datasets contain binary and categorical labels for prompt harmfulness and response harmfulness, and response refusal (if the LLM response complies with the user request). We use a systematic labeling process that leverages a panel of English safety classifiers and LLM-as-a-judge (proprietary and open-weight LLM) to obtain these labels." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.487, + 0.627 + ], + "angle": 0, + "content": "We create our PGMix dataset by combining both: (a) naturally occurring multilingual human-LLM interactions from In-The-Wild (ITW) datasets, and (b) machine translations of WildGuardMix (Han et al., 2024), to ensure data diversity which is crucial for improved model performance (Davani et al., 2024). We utilize multiple LLMs to ensure high-quality translations of WildGuardMix, verified by a high average translation score of 81.15 as rated by human annotators." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.632, + 0.487, + 0.688 + ], + "angle": 0, + "content": "We then use PGMix to train our state-of-the-art POLYGUARD (PG) models, including a fast lightweight model for application use cases. Our empirical results show that PG" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.688, + 0.825, + 0.745 + ], + "angle": 0, + "content": "outperforms existing open-source and proprietary safety detectors on English-only as well as multilingual safety and toxicity benchmarks. Furthermore, we find that the incorporation of ITW samples in the training datasets makes PG models more robust to various data distributions, including code-switched and translated data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.752, + 0.825, + 0.782 + ], + "angle": 0, + "content": "Overall, our datasets and models2 serve as a starting point for building powerful and robust multilingual safety detectors and advance efforts towards multilingual safe AI systems." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.805, + 0.275, + 0.82 + ], + "angle": 0, + "content": "2 Dataset" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.838, + 0.827, + 0.896 + ], + "angle": 0, + "content": "To address the critical need for multilingual safety detection, we introduce POLYGUARDMIX (PGMix) and POLYGUARDPROMPTS (PGPrompts), multilingual datasets specifically designed to train and evaluate robust safety classifiers. PGMix comprises 1.91M human-LLM interactions, including 1.47M machine-translated samples from WildGuardMix and 0.43M" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.909, + 0.611, + 0.924 + ], + "angle": 0, + "content": "2Model, code, and data are available under the ODC-BY license." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "naturally-occurring samples from In-The-Wild datasets, whereas PGPrompts comprises 29K translated samples." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.139, + 0.827, + 0.211 + ], + "angle": 0, + "content": "Our datasets cover 17 languages: Arabic (ar), Chinese (zh), Czech (cs), Dutch (nl), English (en), French (fr), German (de), Hindi (hi), Thai (th), Italian (it), Japanese (ja), Korean (ko), Polish (pl), Portuguese (pt), Russian (ru), Spanish (es), and Swedish (sv). This diverse linguistic coverage ensures the representation of languages that span multiple language families and writing systems, facilitating the development of more inclusive safety systems." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.825, + 0.246 + ], + "angle": 0, + "content": "Figure 2 shows an overview of our data curation pipeline, whose components we describe in detail in the following subsections." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.263, + 0.312, + 0.277 + ], + "angle": 0, + "content": "2.1 Data Sources" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.288, + 0.396, + 0.499 + ], + "angle": 0, + "content": "Both PGMix and PGPrompts are constructed from the train and test samples of WildGuardMix (Han et al., 2024), a dataset of synthetic and natural single-turn human-LLM interactions with fine-grained annotations, respectively. In addition, PGMix also contains samples from In-TheWild datasets: LMSys-Chat1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024). We posit that the combination of natural and synthetic sam" + }, + { + "type": "image", + "bbox": [ + 0.419, + 0.295, + 0.808, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.42, + 0.828, + 0.492 + ], + "angle": 0, + "content": "Figure 2: Data curation process for PGMix (safety detection training) and PGPrompts (safety guardrail evaluation). Takeaway: PGMix combines machine-translated and naturally occurring data to improve data diversity and, consequently, model performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.499, + 0.825, + 0.527 + ], + "angle": 0, + "content": "plies improves the diversity of data and consequently improves model performance (Davani et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.545, + 0.434, + 0.56 + ], + "angle": 0, + "content": "2.2 Machine Translation Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.571, + 0.827, + 0.712 + ], + "angle": 0, + "content": "We develop an efficient machine translation pipeline using open-weight models to minimize computational costs when translating WildGuardMix for our training data. We employ two state-of-the-art translation models: TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022). For optimal performance, we utilize TowerInstruct-7B-v0.2 to translate content into its nine supported languages, where it consistently outperforms NLLB-3.3B. We then leverage NLLB-3.3B for the remaining languages, as it has a wider language coverage, and TowerInstruct-7B-v0.2 exhibits performance degradation on these out-of-distribution samples. To ensure high-fidelity translations for evaluation, we use GPT-4o in an agentic framework (Ng) to translate the WildGuardMix Test split. We provide details about our translation pipelines and automated quality assessment in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.729, + 0.35, + 0.743 + ], + "angle": 0, + "content": "2.3 Safety Annotation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.754, + 0.828, + 0.872 + ], + "angle": 0, + "content": "We leverage a panel of English safety classifiers and LLM-as-judges to annotate safety violation categories automatically. We follow Llama-Guard-3-8B (Dubey et al., 2024) and define our safety violation taxonomy according to the MLCommons Safety Taxonomy4. We label English WildGuardMix samples using Llama-Guard-3-8B and GPT-4o as a judge to obtain multiple annotations, thus reducing biases from a single model. Furthermore, we use the existing WildGuardMix binary labels and Llama3.1-405B-Instruct (Dubey et al., 2024) as a judge to resolve conflicts and obtain the final annotations5. Finally, since PGMix and PGPrompts contain translations of WildGuardMix, we propagate safety labels from the" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.882, + 0.66, + 0.896 + ], + "angle": 0, + "content": "\\(^{3}\\)WildChat-1M is available for modifications under the ODC-BY license." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.897, + 0.588, + 0.91 + ], + "angle": 0, + "content": "4https://mlcommons.org/2024/04/mlc-aisafety-v0-5-poc/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.91, + 0.647, + 0.925 + ], + "angle": 0, + "content": "5We use the same prompt as Llama-Guard-3-8B for all LLM-as-judges." + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.882, + 0.66, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.104, + 0.482, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.104, + 0.818, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.326, + 0.828, + 0.399 + ], + "angle": 0, + "content": "Figure 3: Safety category distribution for user prompts and model responses for WildGuard-Mix train samples. The model name (GPT-4o and Llama-Guard-3-8B) represents the LLM used as a judge to automatically annotate the safety category. These annotations are then ensembled together, using Llama3.1-405B-Instruct to break ties (Combined). Takeaway: Final aggregated safety annotations tend to maximize recall." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.427, + 0.825, + 0.471 + ], + "angle": 0, + "content": "annotated English samples to other languages. ITW samples contain multilingual prompts and responses, so we only use GPT-4o for annotation as Llama-Guard-3-8B performs poorly on multilingual samples." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.476, + 0.827, + 0.604 + ], + "angle": 0, + "content": "Figure 3 illustrates the distribution of safety categories across both user prompt harmfulness and model response harmfulness, comparing annotations from Llama-Guard-3-8B, GPT-4o, and our final consolidated labels. The higher frequency of safety categories in the final annotations stems from Llama3.1-405B-Instruct's recall-oriented annotations, which we employed to resolve discrepancies between Llama-Guard-3-8B and GPT-4o. Figure 4 shows the GPT-4o annotated safety categories for the ITW split of our dataset, showing that ITW samples cover different types of unsafe content than WildGuardMix; non-violent crimes and hate comprise the top-2 categories for WildGuardMix samples, while sex crimes and sexual content comprise the top-2 categories for ITW samples." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.623, + 0.353, + 0.637 + ], + "angle": 0, + "content": "2.4 Human Validation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.65, + 0.825, + 0.737 + ], + "angle": 0, + "content": "To validate the translation quality and the generated safety labels, we conduct human validation across all 16 languages. Due to budget constraints, we randomly sample 50 data points per language, ensuring a balanced distribution across PGMix (train) and PGPrompts (test), harmful and harmless labels, as well as user prompts and model responses. We recruit workers from Prolific, filtering them based on their proficiency in each language. Each data point is evaluated by three annotators." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.743, + 0.651, + 0.759 + ], + "angle": 0, + "content": "For each data point, we ask the annotators to assess the following." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.771, + 0.825, + 0.814 + ], + "angle": 0, + "content": "1. Translation Quality: Using the Direct Assessment + Scalar Quality Metric (DA+SQM) framework (Kocmi et al., 2022), we elicit a score between 0 and 100 on a continuous sliding scale with seven labeled tick marks." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.821, + 0.826, + 0.85 + ], + "angle": 0, + "content": "2. Safety Label for the Source Sentence: Annotators assign a label of either 'harmful' or 'safe' for the source sentence in English." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.857, + 0.827, + 0.887 + ], + "angle": 0, + "content": "3. Safety Label for the Translated Sentence: Annotators assign a 'harmful' or 'safe' label for the corresponding translation." + }, + { + "type": "list", + "bbox": [ + 0.209, + 0.771, + 0.827, + 0.887 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.192, + 0.91, + 0.381, + 0.924 + ], + "angle": 0, + "content": "\\(^{6}\\)https://www.prolific.com" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.574, + 0.271 + ], + "angle": 0, + "content": "Annotators rated translation quality to be high, with an average score of 81.15 across all 16 languages. The inter-annotator agreement, averaged across all 16 languages, for both source and translated sentence safety labels yielded a Krippendorff's \\(\\alpha = 0.46\\). Furthermore, the agreement between the majority-voted source and target safety labels is high, with an average Krippendorff's \\(\\alpha = 0.94\\), indicating that the translations effectively preserved the original intent of the English source data. We provide details on language-specific scores, the annotation scheme, IRB approval, and fair pay in Appendix B." + }, + { + "type": "image", + "bbox": [ + 0.593, + 0.077, + 0.816, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.578, + 0.249, + 0.828, + 0.278 + ], + "angle": 0, + "content": "Figure 4: Safety category distributions for PGMix ITW samples." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.291, + 0.684, + 0.31 + ], + "angle": 0, + "content": "3 POLYGUARD: A 17-Language Safety Moderation Tool" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.322, + 0.828, + 0.392 + ], + "angle": 0, + "content": "To build POLYGUARD, we fine-tune Qwen2.5-7B-Instruct (Yang et al., 2024a) and Ministral-8B-Instruct-2410, both of which have been shown to have state-of-the-art performance in multilingual knowledge and commonsense, code, and math settings (Qwen; Mistral). We refer to these models as PG Qwen2.5 and PG Ministral In addition, we also fine-tune Qwen2.5-0.5B-Instruct to build PG Smol." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.398, + 0.827, + 0.499 + ], + "angle": 0, + "content": "The models are fine-tuned on PGMix using Low-Rank Adapters (Hu et al., 2022). We follow Han et al. (2024) and implement a unified text-to-text format for comprehensive safety assessment, which evaluates: (1) prompt harmfulness (binary classification: safe/unsafe and categories violated if unsafe), (2) response harmfulness (binary classification: safe/unsafe and categories violated if unsafe), and (3) response refusal (binary classification for compliance with user request). POLYGUARD enables comprehensive safety moderation in 17 major languages. We provide detailed training specifications in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.516, + 0.476, + 0.533 + ], + "angle": 0, + "content": "4 Results & Research Questions" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.547, + 0.828, + 0.633 + ], + "angle": 0, + "content": "A multilingual system must be robust; that is, it should perform consistently on data belonging to different distributions (sources and languages). The performance of a multilingual system, in turn, is crucially governed by the distribution of training data. Hence, we study the performance of POLYGUARD on POLYGUARDPROMPTS and multiple out-of-distribution evaluation benchmarks, and the influence of ITW samples and low-quality translations on model performance. We perform one run per evaluation due to computational constraints." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.64, + 0.826, + 0.713 + ], + "angle": 0, + "content": "Baselines: We compare POLYGUARD with popular open-source safety detection models of similar size (Yang et al., 2024b), namely Llama-Guard-2 (Team, 2024), Llama-Guard-3-8B (Dubey et al., 2024), Aegis 1.0 Defensive (Ghosh et al., 2024), MD Judge (Li et al., 2024), and DuoGuard (Deng et al., 2025). We also benchmark proprietary models, namely Perspective API7, OpenAI Omni Moderation8, and Google Moderation9." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.728, + 0.78, + 0.745 + ], + "angle": 0, + "content": "4.1 How do PG models perform on the in-distribution PGPrompts benchmark?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.754, + 0.828, + 0.839 + ], + "angle": 0, + "content": "We first evaluate PG and open-source baselines on POLYGUARDPROMPTs benchmark, comprising 29K samples, using the following metrics: (1) for binary tasks of prompt harmfulness, response harmfulness, and response refusal, we use F1 score for the positive label (unsafe for harmfulness and yes for response refusal), and (2) for the tasks of prompt violations and response violations, we compare the list of ground truth and predicted categories using Exact Match and Jaccard Similarity." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.845, + 0.828, + 0.875 + ], + "angle": 0, + "content": "PG models based on Qwen2.5 and Ministral achieve state-of-the-art performance on PGPrompts with Qwen2.5 performing marginally better. PG Smol outperforms DuoGuard," + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.882, + 0.403, + 0.896 + ], + "angle": 0, + "content": "7https://perspectiveapi.com/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.897, + 0.66, + 0.91 + ], + "angle": 0, + "content": "8https://platform.openai.com/docs/models/omni-moderation-latest" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.91, + 0.66, + 0.924 + ], + "angle": 0, + "content": "9https://cloud.google.com/natural-language/docs/moderating-text" + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.882, + 0.66, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.101, + 0.825, + 0.261 + ], + "angle": 0, + "content": "
ModelHarmful Request F1 ScoreResponse Refusal F1 ScoreHarmful Response F1 ScorePrompt Safety ViolationsResponse Safety Violations
Exact MatchJaccardExact MatchJaccard
Aegis-Defensive66.45------
MD Judge43.54-49.12----
Llama Guard 260.87-63.62----
Llama Guard 367.98-65.7471.9874.5987.2488.37
DuoGuard62.59-37.99----
PG Qwen2.5 7B (Ours)87.1283.5974.0880.8785.4486.6788.79
PG Ministral (Ours)86.0284.4573.7579.9284.3086.8588.78
PG Smol (Ours)83.7681.3666.8277.0281.5184.0585.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.27, + 0.828, + 0.3 + ], + "angle": 0, + "content": "Table 1: Evaluation of POLYGUARD models and baselines on POLYGUARDPROMPTS. Take-away: PG models outperform baselines on in-distribution data." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.327, + 0.827, + 0.455 + ], + "angle": 0, + "content": "its similar size counterpart (Table 1). Aegis Defensive supports only a single text as input and is hence evaluated for Harmful Request only. Since the remaining baselines do not explicitly support Harmful Response, we approximate the prediction by executing them on prompt + response. None of the baselines support the Response Refusal task. Out of all baselines, the safety category taxonomy is the same for Llama-Guard-3 and PG. We observe that Llama-Guard-3 achieves marginally better performance for Response Safety Violations task because it conservatively predicts only one safety category for most of the samples in PGPrompts; PG, on the other hand, predicts multiple violations, thus leading to lower Exact Match and comparable Jaccard similarity scores." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.47, + 0.785, + 0.501 + ], + "angle": 0, + "content": "4.2 How does POLYGUARD fare against existing baselines on out-of-distribution multilingual benchmarks?" + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.518, + 0.825, + 0.675 + ], + "angle": 0, + "content": "
TypeModelRTP-LX En.RTP-LX Mul.Mod. En.Mod. Mul.XS En. (LG)XS Mul. (LG)XS En. (Aegis)XS Mul. (Aegis)MJ En. (LG)MJ Mul. (LG)MJ En. (Aegis)MJ Mul. (Aegis)Avg
Open -WeightAegis-Defensive84.2383.2171.1359.2266.5935.4769.4636.7590.9179.5290.6179.3770.54
MD Judge85.2838.6079.8661.4669.0017.2269.5617.7191.2138.4790.9137.9758.10
Llama Guard 239.4734.9975.8372.5553.7022.3250.5722.5677.5262.3876.8661.5654.19
Llama Guard 348.5144.8778.7373.9860.8425.7057.5026.9879.9278.1479.6777.5261.03
Duo Guard91.8350.4670.8549.4461.1626.0364.8327.3189.1841.8489.2641.4458.64
Closed -SourcePerspective API97.0981.9769.4064.1927.646.6433.926.8553.7945.3753.2344.7348.73
OpenAI Omni87.5274.1074.4368.0858.0222.4860.1123.5282.5966.9482.7366.9463.95
Google Mod.90.4483.2159.6453.8950.4441.8455.7144.7983.1480.8583.6681.0067.38
OursPG Qwen2.591.3483.2174.3969.5172.0735.3374.9337.1393.9386.4493.9786.3374.88
PG Ministrial87.2579.5874.9070.5171.3034.9374.0736.6895.7183.1195.3983.0273.87
PG Smol92.371.5669.363.0070.2833.2274.3835.1994.3973.5993.7273.3470.36
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.685, + 0.829, + 0.743 + ], + "angle": 0, + "content": "Table 2: F1 scores of safety detectors on Multilingual Guardrail Test Suite; metrics are in bold and underlined for the best second-best performing models respectively. Mod.=Moderation, XS=XSafety, MJ=MultiJail, En.=English, Mul.=Multilingual, LG=Llama Guard. Takeaway: PG models outperform baselines on the Multilingual Guardrail Test Suite benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.766, + 0.828, + 0.854 + ], + "angle": 0, + "content": "Multilingual Bench: We first benchmark models on datasets inspired by Yang et al. (2024b). This comprises multilingual toxicity and safety datasets, namely RTP-LX (de Wynter et al., 2024), OpenAI Moderation (Markov et al., 2023),\\(^{10}\\) XSafety (Wang et al., 2023), and MultiJail (Deng et al., 2024). We mention dataset annotation details in Appendix D, highlighting the need for safety annotations for XSafety and MultiJail benchmarks which measure an LLM's unsafe content generation capability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.829, + 0.887 + ], + "angle": 0, + "content": "Patronus AI Bench: We also evaluate models using the recall score on the benchmarks reported by PatronusAI (2024), consisting of toxic/unsafe samples from English and multi-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.897, + 0.825, + 0.926 + ], + "angle": 0, + "content": "10The OpenAI Moderation dataset comprises only English samples and is extended to a multilingual setting using Google Translate." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.961 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "lingual toxicity and safety datasets. We perform our evaluations on all samples instead of a small subset. Appendix E contains details about the benchmark." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.139, + 0.827, + 0.239 + ], + "angle": 0, + "content": "Results show that our PG models outperform the baselines on most datasets, achieving higher scores for the unsafe class (Table 2). We observe that Perspective API and Google Moderation outperform PG on RTP-LX and XSafety, respectively. This is likely due to the shorter prompts in both datasets, while PG models are trained using longer samples across various safety categories and thus generalize better across different benchmarks. PG models also outperform existing detectors on safety datasets in the Patronus AI benchmark and also achieve the best average performance (Table 3)." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.253, + 0.825, + 0.453 + ], + "angle": 0, + "content": "
TypeModeltoxic-text-enjigsawukr-toxicitythai-toxicity-tweettoxic-text-pttoxic-chatBeaver TailsSalad-DataAvg
Open-WeightAegis-Defensive80.3279.2762.8067.2986.54--91.6477.98
MD Judge68.4573.405.800.8056.8663.5481.4196.6855.87
Llama Guard 223.7320.676.324.8353.5123.1759.2016.1425.95
Llama Guard 340.0327.209.6011.5053.7827.3052.6829.4231.43
Duo Guard93.6593.180.729.2774.2254.1787.5470.7060.43
Closed-SourcePerspective API77.2086.20--93.0015.8923.001.8037.14
OpenAI Omni54.2086.8041.6034.0099.8046.3567.8045.8059.54
Google Mod.95.2098.0086.6041.8097.6069.2777.6027.2074.16
OursPG Qwen2.585.3283.4765.2446.4784.2697.6590.6597.0881.27
PG Ministrial82.6079.1155.5235.7680.5197.3990.5396.8877.29
PG Smol89.5785.7259.1637.2081.8496.1084.6096.4278.83
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.461, + 0.825, + 0.506 + ], + "angle": 0, + "content": "Table 3: Recall scores on unsafe samples from Patronus' benchmarking; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. Takeaway: PG models outperform baselines on Patronus AI's benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.534, + 0.388, + 0.548 + ], + "angle": 0, + "content": "4.3 Are PG models robust?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.561, + 0.827, + 0.604 + ], + "angle": 0, + "content": "We study the average performance of the PG models trained using 3 datasets: only translated data, only ITW data, and translated + ITW data. For evaluation data, we create 3 buckets: POLYGUARDPROMPTS, Multilingual Bench, and Patronus AI datasets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.61, + 0.829, + 0.682 + ], + "angle": 0, + "content": "PG models trained on a combination of translated and ITW data show greater robustness across both in-domain and out-of-distribution evaluation benchmarks, thus underscoring the importance of the presence of ITW samples in the training data mix (Table 4). Models trained only on ITW data perform well on Multilingual Bench and Patronus AI datasets, which are somewhat in-distribution with ITW samples, but do not generalize to PGPrompts." + }, + { + "type": "table", + "bbox": [ + 0.192, + 0.696, + 0.807, + 0.851 + ], + "angle": 0, + "content": "
POLYGUARDTraining DataPGPromptsMultilingual BenchPatronus AI
Qwen2.5Translated84.9574.5679.79
ITW64.6974.6382.26
Translated + ITW83.7974.8881.27
MinistralTranslated84.3273.8677.07
ITW63.1175.3585.76
Translated + ITW83.4473.8777.29
SmolTranslated82.2269.9974.84
ITW59.465.0872.21
Translated + ITW80.0670.3578.82
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.864, + 0.825, + 0.922 + ], + "angle": 0, + "content": "Table 4: Average F1 score on POLYGUARDPROMPTS and Multilingual Bench, and Recall on PatronusAI, when models are trained with different training dataset settings. Underlined values represent in-distribution evaluations. Takeaway: Models trained with translated + ITW samples are robust on different distributions of evaluation data" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.231 + ], + "angle": 0, + "content": "Furthermore, we investigate in detail the influence of the presence of ITW data in our training data mix for each benchmark dataset (Figure 5). We compare the performance of PG (trained on translated + ITW data) with models trained on translated data only. We observe that the performance of Qwen2.5 degrades for most of the datasets when ITW data are absent from the training mix. The performance differences for Ministrial are more balanced compared to Qwen2.5, that is, both improvement and degradation are observed across the evaluation datasets. The introduction of ITW data benefits the performance of the ToxicChat benchmark (Lin et al., 2023) the most for both models, since ITW data is most aligned with the ToxicChat benchmark." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.247, + 0.785, + 0.264 + ], + "angle": 0, + "content": "4.4 How does performance vary on English vs Translated vs Code-Switched data?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.273, + 0.825, + 0.33 + ], + "angle": 0, + "content": "We study the performance variation of models on code-switched data, which consists of tokens belonging to different languages but in the same document. Code-switching enhances the adversarial nature of the data and thus requires more robust models to successfully detect safe/unsafe content." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.336, + 0.827, + 0.395 + ], + "angle": 0, + "content": "We evaluate models on the Code-Switching Red-Teaming (CSRT) (Yoo et al., 2024) dataset and the translated and code-switched version of Aegis 1.0 (Ghosh et al., 2024) as provided by Yang et al. (2024b). Since CSRT also evaluates LLMs' tendency to generate unsafe content, we use the same automatic annotation pipeline as described in Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.399, + 0.825, + 0.471 + ], + "angle": 0, + "content": "In all settings, PG models outperform baselines, showing that our moderation models are more robust (Table 5). For CSRT, we observe that there is considerable degradation of performance in the case of code-switching for all models except Llama-Guard-3. For Aegis 1.0, there is a performance drop from English to the translated version. The performance increases for the code-switched version but is lower than on English data." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.483, + 0.825, + 0.688 + ], + "angle": 0, + "content": "
TypeModelCSRT English (LG)CSRT English (Aegis)CSRT Code-switch (LG)CSRT Code-switch (Aegis)Aegis English*Aegis Translated*Aegis Code-switch*Avg
Open -WeightAegis-Defensive90.9190.6181.3881.5383.8975.1580.3583.40
MD Judge91.2190.9150.0050.0082.9842.5474.0668.81
Llama Guard 277.5276.8665.8864.7960.8251.6959.1665.25
Llama Guard 379.6679.4279.8379.1667.3962.1566.8673.50
Duo Guard89.1852.8289.2652.2883.3759.1073.4971.36
Closed -SourcePerspective API53.7953.2332.5231.7531.1526.1127.2636.54
OpenAI Omni82.8382.9774.2474.0373.3063.8268.1474.19
Google Mod.83.1483.6682.1981.9474.5473.6072.8978.85
OursPG Qwen2.594.1093.7888.5587.8887.8583.0085.1388.61
PG Ministrial95.1995.2290.0289.3586.9681.1883.8188.82
PG Smol94.3993.7284.1383.8684.7172.8980.3284.86
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.697, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Table 5: F1 scores comparison on English only, translated, and code-switched data; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. * represent results averaged across 3 annotations, LG=Llama Guard Takeaway: All models suffer performance degradation for code-switched data, with PG models outperforming baselines." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.793, + 0.741, + 0.81 + ], + "angle": 0, + "content": "4.5 How is performance affected by removing low-quality translated data?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.819, + 0.827, + 0.892 + ], + "angle": 0, + "content": "Data quality plays an important role in the training of any machine learning model. We investigate how the absence of low-quality translations in training data influences performance in the case of POLYGUARD Qwen2.5 and Ministral. Due to time and budget constraints, we use GPT-4o annotations as a proxy for human-evaluated translation quality and distill them for cost-effective annotations (details in Appendix F)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Empirical evaluations show that the elimination of low-quality translations does not necessarily improve model performance (Figure 9, Appendix F) since contrastive trends" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.325, + 0.101, + 0.675, + 0.174 + ], + "angle": 0, + "content": "
ModelAverageStd Dev
POLYGONQwen2.587.018.27
POLYGONMinistral84.0412.25
POLYGONSmol65.2525.02
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.183, + 0.828, + 0.227 + ], + "angle": 0, + "content": "Table 6: Recall scores for POLYGUARD models on human-written samples from the Aya RedTeam benchmark. Takeaway: POLYGUARD models generalize on data from different distributions despite being trained only on machine-translated data." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.26, + 0.825, + 0.303 + ], + "angle": 0, + "content": "are observed for Qwen2.5 and Ministral. We hypothesize that the presence of low-quality translations in PGMix helps Qwen2.5 perform well on the low-quality text in toxicity and safety benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.328, + 0.826, + 0.346 + ], + "angle": 0, + "content": "4.6 Does POLYGUARD superficially align with artifacts of machine-translated text only?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.358, + 0.465, + 0.556 + ], + "angle": 0, + "content": "The use of machine-translated data for training POLYGUARD models can lead to the hypothesis that models learn only to rely on machine-translation artifacts in the data to evaluate safety. To investigate if this behavior exists, we evaluate our models on the Aya Red-teaming dataset (Ahmadian et al., 2024), which consists of manually created 7,419 samples in 8 languages, thus lacking the noise patterns present in machine-translated texts. We do not observe empirical evidence supporting the hypothesis (Table 6)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.585, + 0.371, + 0.621 + ], + "angle": 0, + "content": "5 POLYGUARD Runtime Comparison" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.639, + 0.464, + 0.793 + ], + "angle": 0, + "content": "We have trained and open-sourced models of three sizes (0.5B, 7B, and 8B). While all three can run on consumer hardware, the 0.5B can benefit on-device or latency-critical applications. We also test the latency of our models on 7419 samples from the Aya RedTeaming dataset (Ahmadian et al., 2024) on an NVIDIA L40S GPU using VLLM (Table 7), and find that our 0.5B model has a high throughput. However, our 7B and" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.827, + 0.836 + ], + "angle": 0, + "content": "8B models run comparatively slower than their similarly sized Llama Guard counterparts. Compared to Llama Guard, POLYGUARD models solve more tasks, and thus require longer prompts and generate more output tokens, which leads to increased runtime." + }, + { + "type": "image", + "bbox": [ + 0.482, + 0.358, + 0.822, + 0.712 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.47, + 0.726, + 0.825, + 0.784 + ], + "angle": 0, + "content": "Figure 5: Performance difference on removing ITW data Takeaway: Removal of ITW data generally degrades model performance by reducing training data diversity." + }, + { + "type": "page_footnote", + "bbox": [ + 0.17, + 0.859, + 0.825, + 0.926 + ], + "angle": 0, + "content": "11We also use the Aya Red-teaming dataset to assess the need for multilingual safety classifiers by translating it to English via TowerInstruct-7B-v0.2 and then evaluating an English-only classifier (Llama-Guard-3-8B). PG Qwen2.5 significantly outperforms this setup - achieving a higher recall in French (0.916 vs. 0.706), Russian (0.926 vs. 0.669) and Spanish (0.952 vs. 0.681) - highlighting the limitations of relying solely on translation for multilingual safety moderation." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.202, + 0.102, + 0.797, + 0.208 + ], + "angle": 0, + "content": "
ModelSizeInput TokensOutput TokensTime (m:ss)
Llama Guard 28B1575800275362:13
Llama Guard 38B1657409363642:14
POLYGON Smol0.5B18702062393370:31
POLYGON Qwen2.57B18702062430433:27
POLYGON Ministral8B18810522424263:58
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.217, + 0.825, + 0.26 + ], + "angle": 0, + "content": "Table 7: Latency comparison of POLYGUARD models on Aya RedTeaming Takeaway: Smol is highly efficient, whereas Qwen and Ministral are slower than LlamaGuards as POLYGUARD models solve multiple tasks." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.285, + 0.465, + 0.304 + ], + "angle": 0, + "content": "6 Background & Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.316, + 0.828, + 0.471 + ], + "angle": 0, + "content": "Safety Training Datasets and Safety Evaluations AI Safety, the field of research focused on ensuring that AI systems are developed and deployed in a manner that is trustworthy, responsible, reliable, and beneficial to humans (Chen et al., 2024), has become widely studied in recent years (Chua et al., 2024; Hendrycks, 2025; Bengio et al., 2025; Bullwinkel et al., 2025). This increasing interest has led to the procurement of datasets for training and evaluating safety guardrails for AI systems (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024; Lin et al., 2023; Ji et al., 2023; Li et al., 2024). Similarly, safety benchmarks have been curated to evaluate the safety risks exhibited by AI systems (Xie et al., 2024; Mazeika et al., 2024; Jain et al., 2024; Kumar et al., 2024; Yoo et al., 2024; Zeng et al., 2024b; Zhang et al., 2024a;b; Tan et al., 2024). However, almost all of the aforementioned datasets are limited to the English or Chinese language only or focus on specific subsets of AI safety Jain et al. (2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.485, + 0.829, + 0.655 + ], + "angle": 0, + "content": "Safety Moderation Tools Current open-weight safety systems rely on either proprietary datasets (Inan et al., 2023; Zeng et al., 2024a) or previously mentioned English-centric datasets (Ghosh et al., 2024; Li et al., 2024; Han et al., 2024). Although these LLM-based classifiers possess inherent multilingual capabilities, their performance is constrained by their predominantly English training data (Han et al., 2024; Ghosh et al.). Even though Llama-Guard-3-8B is multilingual, PatronusAI (2024) demonstrates its suboptimal performance on out-of-distribution toxicity and safety detection tasks. Additionally, existing models face structural limitations; most are restricted to binary safety classification (with WildGuardMix (Han et al., 2024) being a notable exception), or ignore the structure of user-LLM interactions by processing only a single text at a time (Aegis 1.0 Ghosh et al. (2024) and DuoGuard Deng et al. (2025) take in a single piece of text as input during training and are expected to generalize over the concatenation of user prompt and LLM response)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.673, + 0.31, + 0.689 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.829, + 0.79 + ], + "angle": 0, + "content": "We present POLYGUARDMIX, the first massive multilingual safety detection training dataset, comprising 1.91M user-LLM interactions across 17 languages. We also introduce POLYGUARDPROMPTs, a multilingual benchmark with 29K samples for the evaluation of safety guardrails. Further, we train robust multilingual LLM-based safety detectors, POLYGUARD, which perform better or comparably to existing open-weight and proprietary safety detectors across numerous evaluation benchmarks belonging to different data distributions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.102, + 0.33, + 0.119 + ], + "angle": 0, + "content": "Ethics Statement" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.827, + 0.178 + ], + "angle": 0, + "content": "Although POLYGUARD demonstrates state-of-the-art performance for multilingual safety detection, it may occasionally produce incorrect predictions. Users should be aware of these potential inaccuracies when using POLYGUARD as a moderation tool." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.183, + 0.829, + 0.254 + ], + "angle": 0, + "content": "We also acknowledge that our datasets, POLYGUARDMIX and POLYGUARDPROMPTS, contain unsafe/harmful content that may inadvertently facilitate the creation of harmful content. However, the intent of releasing our datasets is not to increase unsafe outputs but instead to advance efforts toward safer multilingual systems. As a safety measure, we plan to implement restrictions on the use of our datasets." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.274, + 0.347, + 0.292 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.304, + 0.825, + 0.336 + ], + "angle": 0, + "content": "This research was supported in part by Google Jigsaw, DSO National Laboratories and Microsoft's Accelerating Foundation Models Research program." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.826, + 0.385 + ], + "angle": 0, + "content": "Data We express our gratitude to the authors whose meticulous efforts were instrumental in the creation of our data set: WildGuardMix (Han et al., 2024), LMSys-Chat-1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.828, + 0.472 + ], + "angle": 0, + "content": "Software and Models We would like to thank the authors of TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022) which we use for automatic translations, contributors and maintainers of vLLM (Kwon et al., 2023) and LiteLLM \\(^{12}\\) which we leverage to generate continuations from models, and OpenRLHF (Hu et al., 2024) which we use to fine-tune models. Finally, we thank Jigsaw for providing access to Perspective API." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.489, + 0.275, + 0.505 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.512, + 0.829, + 0.598 + ], + "angle": 0, + "content": "Roee Aharoni, Melvin Johnson, and Orhan First. Massively multilingual neural machine translation. In Jill Burstein, Christy Doran, and Thamar Solorio (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 3874-3884, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1388. URL https://aclanthology.org/N19-1388/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.605, + 0.829, + 0.648 + ], + "angle": 0, + "content": "Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, Sara Hooker, et al. The multilingual alignment prism: Aligning global and local preferences to reduce harm. arXiv preprint arXiv:2406.18682, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.655, + 0.826, + 0.713 + ], + "angle": 0, + "content": "Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.72, + 0.829, + 0.763 + ], + "angle": 0, + "content": "Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.77, + 0.826, + 0.814 + ], + "angle": 0, + "content": "Blake Bullwinkel, Amanda Minnich, Shiven Chawla, Gary Lopez, Martin Pouliot, Whitney Maxwell, Joris de Gruyter, Katherine Pratt, Saphir Qi, Nina Chikanov, et al. Lessons from red teaming 100 generative ai products. arXiv preprint arXiv:2501.07238, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.821, + 0.829, + 0.864 + ], + "angle": 0, + "content": "Chen Chen, Ziyao Liu, Weifeng Jiang, Si Qi Goh, and KwoK-Yan Lam. Trustworthy, responsible, and safe ai: A comprehensive architectural framework for ai safety with challenges and mitigations. arXiv preprint arXiv:2408.12935, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.872, + 0.826, + 0.903 + ], + "angle": 0, + "content": "Jaymari Chua, Yun Li, Shiyi Yang, Chen Wang, and Lina Yao. Ai safety in generative ai large language models: A survey. arXiv preprint arXiv:2407.18369, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.512, + 0.829, + 0.903 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.187, + 0.909, + 0.454, + 0.925 + ], + "angle": 0, + "content": "\\(^{12}\\)https://github.com/BerriAI/litellm" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.203 + ], + "angle": 0, + "content": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. Unsupervised cross-lingual representation learning at scale. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 8440-8451, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.747. URL https://aclanthology.org/2020.acl-main.747/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.211, + 0.827, + 0.297 + ], + "angle": 0, + "content": "Marta Costa-jussà, Eric Smith, Christophe Ropers, Daniel Licht, Jean Maillard, Javier Ferrando, and Carlos Escolano. Toxicity in multilingual machine translation at scale. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 9570-9586, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.642. URL https://aclanthology.org/2023-findings-emnlp.642." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.305, + 0.827, + 0.351 + ], + "angle": 0, + "content": "Aida Mostafazadeh Davani, Sagar Gubbi Venkatesh, Sunipa Dev, Shachi Dave, and Vinodkumar Prabhakaran. Genil: A multilingual dataset on generalizing language. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=kLH4ccaL21." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.358, + 0.827, + 0.414 + ], + "angle": 0, + "content": "Adrian de Wynter, Ishaan Watts, Nektar Ege Altintoprak, Tua Wongsangaroonsri, Minghui Zhang, Noura Farra, Lena Baur, Samantha Claudet, Pavel Gajdusek, Can Gören, et al. Rtplx: Can llms evaluate toxicity in multilingual scenarios? arXiv preprint arXiv:2404.14397, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.424, + 0.824, + 0.455 + ], + "angle": 0, + "content": "Yihe Deng, Yu Yang, Junkai Zhang, Wei Wang, and Bo Li. Duoguard: A two-player rl-driven framework for multilingual llm guardrails. arXiv preprint arXiv:2502.05163, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.462, + 0.824, + 0.507 + ], + "angle": 0, + "content": "Yue Deng, Wenxuan Zhang, Sinno Jialin Pan, and Lidong Bing. Multilingual jailbreak challenges in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vESNKdEMGp." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.515, + 0.827, + 0.56 + ], + "angle": 0, + "content": "Jesse Dodge, Maarten Sap, Ana Marasovic, William Agnew, Gabriel Ilharco, Dirk Groeneweld, Margaret Mitchell, and Matt Gardner. Documenting large webtext corpora: A case study on the colossal clean crawled corpus. arXiv preprint arXiv:2104.08758, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.567, + 0.827, + 0.611 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.619, + 0.827, + 0.677 + ], + "angle": 0, + "content": "Shaona Ghosh, Prasoon Varshney, Makes Narsimhan Sreedhar, Aishwarya Padmakumar, Traian Rebedea, Jibin Rajan Varghese, and Christopher Parisien. Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails. In Neurips Safe Generative AI Workshop 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.686, + 0.825, + 0.73 + ], + "angle": 0, + "content": "Shaona Ghosh, Prasoon Varshney, Erick Galinkin, and Christopher Parisien. Aegis: Online adaptive ai content safety moderation with ensemble of llm experts. arXiv preprint arXiv:2404.05993, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.738, + 0.827, + 0.782 + ], + "angle": 0, + "content": "Daniil Gurgurov, Tanja Bäumel, and Tatiana Anikina. Multilingual large language models and curse of multilinguality. 2024. doi: 10.48550/ARXIV.2406.10602. URL https://arxiv.org/abs/2406.10602." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.79, + 0.827, + 0.835 + ], + "angle": 0, + "content": "Seungju Han, Kavel Rao, Allyson Ettinger, Liwei Jiang, Bill Yuchen Lin, Nathan Lambert, Yejin Choi, and Nouha Dziri. Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms. arXiv preprint arXiv:2406.18495, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.843, + 0.654, + 0.86 + ], + "angle": 0, + "content": "Dan Hendrycks. Introduction to ai safety, ethics, and society, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.867, + 0.827, + 0.924 + ], + "angle": 0, + "content": "Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=nZeVKeeFYf9." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.147 + ], + "angle": 0, + "content": "Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.83, + 0.213 + ], + "angle": 0, + "content": "Hakan Inan, Kartikeya Upasani, Jianfeng Chi, Rashi Rungta, Krithika Iyer, Yuning Mao, Michael Tontchev, Qing Hu, Brian Fuller, Davide Testuggine, et al. Llama guard: Llm-based input-output safeguard for human-ai conversations. arXiv preprint arXiv:2312.06674, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.224, + 0.829, + 0.268 + ], + "angle": 0, + "content": "Devansh Jain, Priyanshu Kumar, Samuel Gehman, Xuhui Zhou, Thomas Hartvigsen, and Maarten Sap. Polyglotoxicityprompts: Multilingual evaluation of neural toxic degeneration in large language models. arXiv preprint arXiv:2405.09373, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.277, + 0.829, + 0.348 + ], + "angle": 0, + "content": "Jiaming Ji, Mickel Liu, Juntao Dai, Xuehai Pan, Chi Zhang, Ce Bian, Boyuan Chen, Ruiyang Sun, Yizhou Wang, and Yaodong Yang. Beavertails: Towards improved safety alignment of LLM via a human-preference dataset. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=g0QovXbFw3." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.358, + 0.829, + 0.442 + ], + "angle": 0, + "content": "Tom Kocmi, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Thamme Gowda, Yvette Graham, Roman Grundkiewicz, Barry Haddow, Rebecca Knowles, Philipp Koehn, Christof Monz, Makoto Morishita, Masaaki Nagata, Toshiaki Nakazawa, Michal Novák, Martin Popel, and Maja Popovic. Findings of the 2022 conference on machine translation (wmt22). In Conference on Machine Translation, 2022. URL https://apisemantic scholar.org/CorpusID:256461033." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.452, + 0.829, + 0.496 + ], + "angle": 0, + "content": "Suhas Kotha, Jacob M. Springer, and Aditi Raghunathan. Understanding catastrophic forgetting in language models via implicit inference. ArXiv, abs/2309.10105, 2023. URL https://api_semanticscholar.org/CorpusID:262054014." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.505, + 0.829, + 0.561 + ], + "angle": 0, + "content": "Priyanshu Kumar, Elaine Lau, Saranya Vijayakumar, Tu Trinh, Scale Red Team, Elaine Chang, Vaughn Robinson, Sean Hendryx, Shuyan Zhou, Matt Fredrikson, et al. Refusal-trained llms are easily jailbroken as browser agents. arXiv preprint arXiv:2410.13886, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.572, + 0.829, + 0.63 + ], + "angle": 0, + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.639, + 0.829, + 0.724 + ], + "angle": 0, + "content": "Lijun Li, Bowen Dong, Ruohui Wang, Xuhao Hu, Wangmeng Zuo, Dahua Lin, Yu Qiao, and Jing Shao. SALAD-bench: A hierarchical and comprehensive safety benchmark for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 3923-3954, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.235. URL https://aclanthology.org/2024-findings-acl.235." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.734, + 0.829, + 0.82 + ], + "angle": 0, + "content": "Zi Lin, Zihan Wang, Yongqi Tong, Yangkun Wang, Yuxin Guo, Yujia Wang, and Jingbo Shang. ToxicChat: Unveiling hidden challenges of toxicity detection in real-world user-AI conversation. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 4694-4702, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.311. URL https://aclanthology.org/2023.findings-emnlp.311." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.829, + 0.829, + 0.856 + ], + "angle": 0, + "content": "AI @ Meta Llama Team. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.867, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Todor Markov, Chong Zhang, Sandhini Agarwal, Florentine Eloundou Nekoul, Theodore Lee, Steven Adler, Angela Jiang, and Lilian Weng. A holistic approach to undesired content detection in the real world. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 15009-15018, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.83, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.176 + ], + "angle": 0, + "content": "Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakaehie, Nathaniel Li, Steven Basart, Bo Li, David Forsyth, and Dan Hendrycks. Harmbench: a standardized evaluation framework for automated red teaming and robust refusal. In Proceedings of the 41st International Conference on Machine Learning, ICML'24. JMLR.org, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.183, + 0.798, + 0.199 + ], + "angle": 0, + "content": "Mistral. Un ministral, des ministraux. URL https://mistral.ai/en/news/ministraux." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.208, + 0.825, + 0.236 + ], + "angle": 0, + "content": "Andrew Ng. Agentic translation. URL https://github.com/andrewyng/translation-agent." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.245, + 0.826, + 0.275 + ], + "angle": 0, + "content": "PatronusAI. Llama guard is off duty. https://www.patronus.ai/blog/llama-guard-is-off-duty, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.283, + 0.826, + 0.312 + ], + "angle": 0, + "content": "Qwen. Qwen2.5: A party of foundation models! URL https://qwenlm.github.io/blog/qwen2.5/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.321, + 0.828, + 0.406 + ], + "angle": 0, + "content": "Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Jurafsky, Noah A. Smith, and Yejin Choi. Social bias frames: Reasoning about social and power implications of language. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 5477-5490, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.486. URL https://aclanthology.org/2020.acl-main.486/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.415, + 0.828, + 0.513 + ], + "angle": 0, + "content": "Khetam Al Sharou and Lucia Specia. A taxonomy and study of critical errors in machine translation. In Helena Moniz, Lieve Macken, Andrew Rufener, Loici Barrault, Marta R. Costa-jussa, Christophe Declercq, Maarit Koponen, Ellie Kemp, Spyridon Pilos, Mikel L. Forcada, Carolina Scarton, Joachim Van den Bogaert, Joke Daems, Arda Tezcan, Bram Vanroy, and Margot Fonteyne (eds.), Proceedings of the 23rd Annual Conference of the European Association for Machine Translation, pp. 171-180, Ghent, Belgium, June 2022. European Association for Machine Translation. URL https://aclanthology.org/2022.eamt-1.20." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.522, + 0.828, + 0.648 + ], + "angle": 0, + "content": "Lucia Specia, Frédéric Blain, Marina Fomicheva, Chrysoula Zerva, Zhenhao Li, Vishrav Chaudhary, and André F. T. Martins. Findings of the WMT 2021 shared task on quality estimation. In Loic Barrault, Ondrej Bojar, Fethi Bougares, Rajen Chatterjee, Marta R. Costa-jussa, Christian Federmann, Mark Fishel, Alexander Fraser, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Paco Guzman, Barry Haddow, Matthias Huck, Antonio Jimeno Yepes, Philipp Koehn, Tom Kocmi, Andre Martins, Makoto Morishita, and Christof Monz (eds.), Proceedings of the Sixth Conference on Machine Translation, pp. 684-725, Online, November 2021. Association for Computational Linguistics. URL https://aclanthology.org/2021.wmt-1.71." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.657, + 0.826, + 0.701 + ], + "angle": 0, + "content": "Yingshui Tan, Boren Zheng, Baihui Zheng, Kerui Cao, Huiyun Jing, Jincheng Wei, Jiaheng Liu, Yancheng He, Wenbo Su, Xiangyong Zhu, et al. Chinese safetyqa: A safety short-form factuality benchmark for large language models. arXiv preprint arXiv:2412.15265, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.709, + 0.826, + 0.738 + ], + "angle": 0, + "content": "Llama Team. Meta llama guard 2. https://github.com/meta-llama/PurpleLlama/blob/main/Llama-Guard2/MODEL_CARD.md, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.747, + 0.828, + 0.873 + ], + "angle": 0, + "content": "NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Wenxuan Wang, Zhaopeng Tu, Chang Chen, Youliang Yuan, Jen-tse Huang, Wenxiang Jiao, and Michael R Lyu. All languages matter: On the multilingual safety of large language models. arXiv preprint arXiv:2310.00905, 2023." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.161 + ], + "angle": 0, + "content": "Tinghao Xie, Xiangyu Qi, Yi Zeng, Yangsibo Huang, Udari Madhushani Sehwag, Kaixuan Huang, Luxi He, Boyi Wei, Dacheng Li, Ying Sheng, et al. Sorry-bench: Systematically evaluating large language model safety refusal behaviors. arXiv preprint arXiv:2406.14598, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.827, + 0.213 + ], + "angle": 0, + "content": "Yuemei Xu, Ling Hu, Jiayi Zhao, Zihan Qiu, Yuqi Ye, and Hanwen Gu. A survey on multilingual large language models: Corpora, alignment, and bias. ArXiv, abs/2404.00929, 2024. URL https://api_semanticscholar.org/CorpusID:268819377." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.221, + 0.826, + 0.263 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.272, + 0.824, + 0.303 + ], + "angle": 0, + "content": "Yahan Yang, Soham Dan, Dan Roth, and Insup Lee. Benchmarking llm guardrails in handling multilingual toxicity. arXiv preprint arXiv:2410.22153, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.31, + 0.824, + 0.34 + ], + "angle": 0, + "content": "Haneul Yoo, Yongjin Yang, and Hwaran Lee. Code-switching red-teaming: Lm evaluation for safety and multilingual understanding. arXiv preprint arXiv:2406.15481, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.347, + 0.827, + 0.404 + ], + "angle": 0, + "content": "Wenjun Zeng, Yuchi Liu, Ryan Mullins, Ludovic Peran, Joe Fernandez, Hamza Harkous, Karthik Narasimhan, Drew Proud, Piyush Kumar, Bhaktipriya Radharapu, et al. Shieldgemma: Generative ai content moderation based on gemma. arXiv preprint arXiv:2407.21772, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.413, + 0.826, + 0.457 + ], + "angle": 0, + "content": "Yi Zeng, Yu Yang, Andy Zhou, Jeffrey Ziwei Tan, Yuheng Tu, Yifan Mai, Kevin Klyman, Minzhou Pan, Ruoxi Jia, Dawn Song, et al. Air-bench 2024: A safety benchmark based on risk categories from regulations and policies. arXiv preprint arXiv:2407.17436, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.465, + 0.824, + 0.509 + ], + "angle": 0, + "content": "Hengxiang Zhang, Hongfu Gao, Qiang Hu, Guanhua Chen, Lili Yang, Bingyi Jing, Hongxin Wei, Bing Wang, Haifeng Bai, and Lei Yang. Chinesesa: A chinese benchmark for evaluating safety in large language models. arXiv preprint arXiv:2410.18491, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.516, + 0.824, + 0.56 + ], + "angle": 0, + "content": "Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Meijuan An, Bikun Yang, KaiKai Zhao, Kai Wang, and Shiguo Lian. Chisafetybench: A chinese hierarchical safety benchmark for large language models. arXiv preprint arXiv:2406.10311, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.568, + 0.827, + 0.598 + ], + "angle": 0, + "content": "Wenting Zhao, Xiang Ren, Jack Hessel, Claire Cardie, Yejin Choi, and Yuntian Deng. Wildchat: 1m chatgpt interaction logs in the wild. arXiv preprint arXiv:2405.01470, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.605, + 0.827, + 0.649 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Tianle Li, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zhuohan Li, Zi Lin, Eric P Xing, et al. Lmsys-chat-1m: A large-scale real-world llm conversation dataset. arXiv preprint arXiv:2309.11998, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.657, + 0.827, + 0.743 + ], + "angle": 0, + "content": "Xuhui Zhou, Hao Zhu, Akhila Yerukola, Thomas Davidson, Jena D. Hwang, Swabha Swayamdipta, and Maarten Sap. COBRA frames: Contextual reasoning about effects and harms of offensive statements. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 6294–6315, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.392. URL https://aclanthology.org/2023-findings-acl.392/." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.743 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.102, + 0.607, + 0.121 + ], + "angle": 0, + "content": "A Translation Pipeline and Quality Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.141, + 0.828, + 0.2 + ], + "angle": 0, + "content": "Our training data translation pipeline segments the source text into chunks using *blingfire*13, translates each chunk independently, and reconstructs the target text by concatenating the translated segments. We utilize this chunking approach to process long texts while respecting the context window constraints of translation models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.205, + 0.828, + 0.277 + ], + "angle": 0, + "content": "The agentic translation framework, used for translating the test data, implements a three-stage process: (1) initial translation of the source text, (2) critical analysis of potential translation errors, and (3) subsequent refinement of the target text incorporating the identified improvements. This iterative approach helps maintain semantic accuracy and cultural nuances crucial for safety evaluation across languages." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.282, + 0.828, + 0.354 + ], + "angle": 0, + "content": "Automatic evaluation of translation quality is done by prompting GPT-4o as a judge. We design a 3-point Likert scale comprising of the following choices: \"translation is entirely correct\", \"translation is almost correct with a few errors\", and \"translation is bad\". We run evaluations for only the translated test set of PGMix to assess the overall translation quality of a model. The evaluation prompts are shown below:" + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.371, + 0.315, + 0.387 + ], + "angle": 0, + "content": "System Prompt" + }, + { + "type": "code", + "bbox": [ + 0.197, + 0.398, + 0.498, + 0.484 + ], + "angle": 0, + "content": "You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text`." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.51, + 0.296, + 0.526 + ], + "angle": 0, + "content": "User Prompt" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.538, + 0.367, + 0.567 + ], + "angle": 0, + "content": "`source_text`: {src} \n`target_text`: {tgt}" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.59, + 0.827, + 0.632 + ], + "angle": 0, + "content": "Following is the format of structure generations for translation quality evaluation. We prompt the judge to first reason about the source and target sentences before outputting the verdict." + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.648, + 0.793, + 0.803 + ], + "angle": 0, + "content": "class QualityEnum(str, Enum): incorrect \\(=\\) 'translation is bad' almost.correct \\(=\\) 'translation is almost correct with a few errors' entirely.correct \\(=\\) 'translation is entirely correct' \nclass Result(BaseModel): reason: str \\(=\\) Field(description \\(\\equiv\\) \"brief pointers on why the translation is correct or wrong\") verdict: QualityEnum \\(=\\) Field(description \\(\\equiv\\) \"the verdict about the translation quality\")" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.828, + 0.886 + ], + "angle": 0, + "content": "Tables 8 and 9 show the verdicts of the GPT-4o judge for the human prompt and model response respectively. We observe that TowerInstruct generates higher-quality translations when compared to NLLB for the languages it supports. However, in the case of Hindi (which is not supported by Tower), the quality is poor." + }, + { + "type": "page_footnote", + "bbox": [ + 0.186, + 0.909, + 0.454, + 0.925 + ], + "angle": 0, + "content": "13https://pypi.org/project/blingfire" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.135, + 0.849, + 0.433 + ], + "angle": 0, + "content": "
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB636688401-
Tower12023601621
ESNLLB1437218682
Tower1374303471
FRNLLB1406245722
Tower1499177472
DENLLB12753481011
Tower1335323661
KONLLB10754901582
Tower12783361092
ITNLLB1384260801
Tower144222756-
PTNLLB146320260-
Tower153214251-
NLNLLB1339306773
Tower139926462-
RUNLLB1379240106-
Tower1406233851
HINLLB147018669-
Tower72516912
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.441, + 0.825, + 0.473 + ], + "angle": 0, + "content": "Table 8: GPT-4o Judge verdicts for human prompts translation. Takeaway: TowerInstruct generated more accurate translations than NLLB for supported languages." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.55, + 0.848, + 0.847 + ], + "angle": 0, + "content": "
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB15311474241
Tower822729174-
ESNLLB858426441-
Tower583105785-
FRNLLB883741101-
Tower481116381-
DENLLB811790124-
Tower625102872-
KONLLB72192084-
Tower7079161011
ITNLLB809566350-
Tower5291103921
PTNLLB8846232162
Tower4891131105-
NLNLLB8287721241
Tower5931049821
RUNLLB906663156-
Tower512112390-
HINLLB128641128
Tower611718
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.855, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Table 9: GPT-40 Judge verdicts for model generation translation. Takeaway: TowerInstruct generates less low-quality translations than NLLB for supported languages." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.102, + 0.377, + 0.119 + ], + "angle": 0, + "content": "B Human Validation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.139, + 0.827, + 0.224 + ], + "angle": 0, + "content": "We use Prolific14 to collect annotations. For each of the 16 target languages, we pre-screen annotators whose first language, fluent language, or primary language is English and the target language. Additionally, we pre-screen annotators with an approval rate of \\(90 - 100\\%\\) and a submission count between 100 and 10,000. Annotators were compensated at the rate of \\(\\$12/\\)hr. Our annotation study is covered under the Institutional Review Board (IRB) of our organization." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.229, + 0.828, + 0.304 + ], + "angle": 0, + "content": "We collect 2,400 annotations across 16 languages and 50 data points per language, with each data point annotated by 3 annotators, and each annotator annotating 10 data points. We recruited 191 unique annotators15 via Prolific, spanning across 24 countries. They self-identified as 110 male and 81 female. In terms of ethnicity, they described themselves as 84 White, 79 Black, 12 Mixed, 10 Asian, and 5 Other." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.308, + 0.828, + 0.381 + ], + "angle": 0, + "content": "Figures 6, 7, and 8 present the consent, annotation instructions, and framework questions. The human validation results for each language are shown in Table 10. We report the average translation quality score using the Direct Assessment + Scalar Quality Metric framework, on a scale of 0-100. Inter-annotator agreement is computed using Krippendorff's \\(\\alpha\\) for both source and target language safety labels." + }, + { + "type": "table", + "bbox": [ + 0.185, + 0.398, + 0.814, + 0.653 + ], + "angle": 0, + "content": "
LanguageAvg. Trans- lation ScoreSource Safety αTarget Safety αSource - Target α
Arabic80.990.410.400.96
Chinese78.550.430.420.91
Czech81.110.470.480.96
Dutch77.150.370.330.96
French82.120.480.471.0
German82.670.440.450.92
Hindi84.720.340.370.96
Italian83.210.380.370.91
Japanese76.390.390.360.76
Korean81.550.430.460.96
Polish80.330.390.400.96
Portuguese81.090.460.450.92
Russian80.440.420.430.96
Spanish84.110.450.441.0
Swedish79.660.360.351.0
Thai78.890.410.420.92
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.662, + 0.825, + 0.72 + ], + "angle": 0, + "content": "Table 10: Human validation results for translation quality and safety labels. Translation scores are on a 0-100 scale, using the DA+SQM framework. Inter-annotator agreement (Krippendorff's \\(\\alpha\\)) for source and target safety labels is reported, along with agreement between majority-voted source and target labels." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.761, + 0.479, + 0.781 + ], + "angle": 0, + "content": "C POLYGUARD Training Details" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.827, + 0.859 + ], + "angle": 0, + "content": "We train our models using OPENRLHF\\(^{16}\\) on 8 NVIDIA A6000 GPUs. We set LoRA rank to 8 and alpha to 16. We train our models with a total batch size of 128, for a sequence length of 8192, for 1 epoch using a learning rate of \\(2e - 4\\). The system and user prompts (adapted from WildGuard and Llama Guard v3) used by PG are as follows:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.186, + 0.88, + 0.388, + 0.896 + ], + "angle": 0, + "content": "\\(^{14}\\)https://www.prolific.com/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.187, + 0.896, + 0.689, + 0.911 + ], + "angle": 0, + "content": "15some participated in multiple languages, resulting in a lower unique count" + }, + { + "type": "page_footnote", + "bbox": [ + 0.187, + 0.91, + 0.541, + 0.924 + ], + "angle": 0, + "content": "16https://github.com/OpenRLHF/OpenRLHF/tree/main" + }, + { + "type": "list", + "bbox": [ + 0.186, + 0.88, + 0.689, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.517, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.454, + 0.155, + 0.546, + 0.164 + ], + "angle": 0, + "content": "Instruction and Consent" + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.177, + 0.225, + 0.183 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.185, + 0.815, + 0.2 + ], + "angle": 0, + "content": "Hi! We are a team of researchers who are passionate about making AI systems safer to use across multiple languages. We are trying to test out a few systems for automatically translating sentences across languages to see how well they preserve the original meaning and if they translate unsafe content as well. We appreciate your help in making AI systems safer across multiple cultures and languages!" + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.208, + 0.233, + 0.214 + ], + "angle": 0, + "content": "Task Overview:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.216, + 0.815, + 0.231 + ], + "angle": 0, + "content": "In this study, we are interested in making AI systems safer to use in multiple languages. You will be presented with a sentence in English and its machine-generated translation in another language. Your task is to judge the quality of the translation, and if either the original sentence/translation contains any unsafe language." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.239, + 0.267, + 0.246 + ], + "angle": 0, + "content": "Data collection & sharing:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.248, + 0.785, + 0.263 + ], + "angle": 0, + "content": "We will not ask you for your name, and the data collected in this study will be made identifiable to the best of our extent. We will securely store the data on our servers and only share with qualified researchers (e.g., who want to further the study of hate speech detection). If you later decide that you do not want your responses included in this study, please email so we can exclude your work from the analysis." + }, + { + "type": "title", + "bbox": [ + 0.18, + 0.271, + 0.212, + 0.277 + ], + "angle": 0, + "content": "Contact:" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.279, + 0.81, + 0.304 + ], + "angle": 0, + "content": "If you have any questions about this study, you should feel free to ask them by contacting us (via the MTurk interface or via email at: info@mturk.com). If you have questions later, desire additional information, or wish to withdraw your participation please contact e-mail in accordance with the contact information listed above. If you have questions pertaining to your rights as a research participant; or to report concerns to this study, you should contact" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.318, + 0.367, + 0.324 + ], + "angle": 0, + "content": "Please do not use AI systems when answering the tasks." + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.326, + 0.761, + 0.333 + ], + "angle": 0, + "content": "We will sample some of the responses and manually review them, if we find evidence to show that annotators are not giving proper efforts to this task, we will exclude them from future tasks." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.342, + 0.572, + 0.352 + ], + "angle": 0, + "content": "I have read and understood the information above, are 18 years or older, and agree to participate in our study." + }, + { + "type": "text", + "bbox": [ + 0.189, + 0.359, + 0.212, + 0.364 + ], + "angle": 0, + "content": "Yes" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.369, + 0.211, + 0.375 + ], + "angle": 0, + "content": "No" + }, + { + "type": "text", + "bbox": [ + 0.476, + 0.423, + 0.522, + 0.431 + ], + "angle": 0, + "content": "Move forward" + }, + { + "type": "image_caption", + "bbox": [ + 0.292, + 0.448, + 0.705, + 0.464 + ], + "angle": 0, + "content": "Figure 6: Annotator Consent. Blacked out for anonymity." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.568, + 0.27, + 0.577 + ], + "angle": 0, + "content": "Annotation Task" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.588, + 0.807, + 0.605 + ], + "angle": 0, + "content": "You will be presented with 2 sentences: an source original sentence in English, and its automatic translation in Thai. Your task is to assess the quality of the translation and if the source/translated sentences contain unsafe language." + }, + { + "type": "title", + "bbox": [ + 0.188, + 0.61, + 0.304, + 0.621 + ], + "angle": 0, + "content": "Quality of Translation" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.624, + 0.382, + 0.631 + ], + "angle": 0, + "content": "Evaluate the quality of the translation in Thai. Consider both:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.637, + 0.531, + 0.644 + ], + "angle": 0, + "content": "- Meaning: Does the translation convey and reflect the same meaning as the source sentence in English?" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.645, + 0.382, + 0.653 + ], + "angle": 0, + "content": "- Fluency: Is the translation easy to read and understand?" + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.637, + 0.531, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.658, + 0.383, + 0.666 + ], + "angle": 0, + "content": "Rate the translation quality on a continuous scale from 0 to 6." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.671, + 0.252, + 0.68 + ], + "angle": 0, + "content": "Rating Scale:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.683, + 0.746, + 0.69 + ], + "angle": 0, + "content": "0 - No Meaning Preserved: The translation is confusing, makes no sense, or is completely different from the English sentence. Most of the meaning and information is lost." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.691, + 0.801, + 0.706 + ], + "angle": 0, + "content": "- 2 - Some Meaning Preserved: The translation captures only part of the meaning but misses key details. It may be hard to understand because of major mistakes like completely incorrect or irrelevant words or very poor grammar." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.708, + 0.805, + 0.722 + ], + "angle": 0, + "content": "- 4 - Most Meaning Preserved: The translation retains most of the meaning of the English sentence -- the main idea is clear with just a few small mistakes, like minor grammar issues, awkward wording, or slight changes in meaning." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.723, + 0.661, + 0.73 + ], + "angle": 0, + "content": "- 6 - Perfect Translation: The translation is clear, natural, and grammatically correct -- same meaning like the source sentence, with no mistakes." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.683, + 0.805, + 0.73 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.19, + 0.736, + 0.233, + 0.744 + ], + "angle": 0, + "content": "Key Tip:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.747, + 0.471, + 0.754 + ], + "angle": 0, + "content": "- You can move the slider to any point on the scale, not just whole numbers (e.g., 5.3)." + }, + { + "type": "title", + "bbox": [ + 0.188, + 0.761, + 0.345, + 0.772 + ], + "angle": 0, + "content": "Identifying Unsafe Language" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.774, + 0.541, + 0.782 + ], + "angle": 0, + "content": "Please check if the original sentence in English or the translation in Thai contain any unsafe or harmful language." + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.787, + 0.381, + 0.796 + ], + "angle": 0, + "content": "What Counts as Unsafe or Harmful Language?" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.801, + 0.643, + 0.808 + ], + "angle": 0, + "content": "A sentence is considered unsafe or harmful if it supports, encourages, or facilitates harmful, illegal, or unethical actions. This includes BOTH:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.814, + 0.787, + 0.829 + ], + "angle": 0, + "content": "Content that is itself harmful: (e.g., promoting violence, supporting crimes, providing inappropriate sexual content, expressing hate speech towards Jews and Muslim communities, revealing confidential information or health records, encouraging misuse of technology)." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.83, + 0.81, + 0.845 + ], + "angle": 0, + "content": "Sentences that request harmful content: (e.g., making unauthorized requests to edit medical records and prescription, asking for instructions on illegal activities, ways to create or use dangerous weapons, promoting self-harm, ways to exploit children, how to spread false election information, even if the language seems neutral)." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.814, + 0.81, + 0.845 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.381, + 0.861, + 0.617, + 0.876 + ], + "angle": 0, + "content": "Figure 7: Annotator instructions" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.517, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.102, + 0.824, + 0.374 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.302, + 0.384, + 0.698, + 0.402 + ], + "angle": 0, + "content": "Figure 8: Annotator Framework with Example in Thai" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.464, + 0.824, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.106, + 0.296, + 0.121 + ], + "angle": 0, + "content": "User Prompt" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.134, + 0.307, + 0.203 + ], + "angle": 0, + "content": "Human user: {prompt} AI assistant: {response}" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.225, + 0.827, + 0.298 + ], + "angle": 0, + "content": "We train our models \\(^{17}\\) to generate formatted responses containing the user input prompt safety label, policies violated, model response refusal, model response safety label, and policies violated (shown below). Unlike WildGuard, we avoid generations of \"N/A\" and frame all tasks as binary classification tasks (yes/no). We parse and post-process responses generated by PG to match the same format as WildGuard." + }, + { + "type": "title", + "bbox": [ + 0.2, + 0.31, + 0.382, + 0.324 + ], + "angle": 0, + "content": "POLYGON Generation" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.335, + 0.512, + 0.433 + ], + "angle": 0, + "content": "Harmful request: yes/no \nRequest safety violations: None / List of violated categories \nResponse refusal: yes/no \nHarmful response: yes/no \nResponse safety violations: None / List of violated categories" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.467, + 0.715, + 0.485 + ], + "angle": 0, + "content": "D Out-of-Distribution Benchmarking Dataset Annotations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.501, + 0.828, + 0.629 + ], + "angle": 0, + "content": "In this section, we list the formulation of ground-truth labels for the out-of-distribution benchmarks. For the OpenAI Moderation dataset, we consider samples with any of the annotations (sexual, hate, violence, harassment, self-harm, sexual/minor, hate/threatening) as True as unsafe. For RTP-LX, we consider samples with a Toxicity score above 1 unsafe. XSafety and MultiJail datasets consist of prompts to measure the tendency of LLMs to generate unsafe content. Thus, a few prompts in these datasets are innocuous but could trigger an LLM to generate harmful content. Therefore, we use GPT-4o to determine the safety label of the samples. Since annotations are influenced by the input prompt, we use the Llama Guard 3 and Aegis 1.0 prompts to create two sets of ground-truth labels." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.651, + 0.436, + 0.67 + ], + "angle": 0, + "content": "E Patronus AI Safety Study" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.827, + 0.744 + ], + "angle": 0, + "content": "Patronus AI benchmarked Llama Guard 3 on a small number of samples (500) from various English and multilingual toxicity and safety datasets illustrating its poor recall of unsafe data points (PatronusAI, 2024). Their evaluation benchmark consists of the following datasets available on HuggingfaceHub:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.758, + 0.44, + 0.773 + ], + "angle": 0, + "content": "1. nicholasKluge/toxic-text-en" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.774, + 0.496, + 0.789 + ], + "angle": 0, + "content": "2. Arsive/toxicity_classification_jigsaw" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.79, + 0.461, + 0.805 + ], + "angle": 0, + "content": "3. ukr-detect/ukr-toxicity-dataset" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.806, + 0.437, + 0.821 + ], + "angle": 0, + "content": "4. tmu-nlp/thai_toxicity tweet" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.822, + 0.436, + 0.837 + ], + "angle": 0, + "content": "5. nicholasKluge/toxic-text-pt" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.838, + 0.358, + 0.852 + ], + "angle": 0, + "content": "6. lmsys/toxic-chat" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.853, + 0.445, + 0.869 + ], + "angle": 0, + "content": "7. PKU-Alignment/BeaverTails" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.87, + 0.436, + 0.884 + ], + "angle": 0, + "content": "8. OpenSafetyLab/Salad-Data" + }, + { + "type": "list", + "bbox": [ + 0.211, + 0.758, + 0.496, + 0.884 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.897, + 0.825, + 0.926 + ], + "angle": 0, + "content": "\\(^{17}\\)Qwen2.5-7B-Instruct and Ministrial-8B-Instruct-2410 are available for modifications under the Apache 2.0 license and Mistral Research License respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.102, + 0.563, + 0.121 + ], + "angle": 0, + "content": "F Influence of low-quality translated data" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.135, + 0.828, + 0.234 + ], + "angle": 0, + "content": "We distill GPT-4o's knowledge of translation quality into a Qwen2.5 7B classifier to filter out samples with low translation quality. We use the same schema as our translation quality study (Appendix A) to filter for samples where the human prompt and model response are accurately translated. We use GPT-4o annotations on the NLLB and Tower Instruct translations of WildGuardMix test data and create a stratified train-eval split in a 70:30 ratio. Similar to PG, we train a Qwen2.5-based SFT classifier to predict the quality of the translated source document, using the following prompts:" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.244, + 0.314, + 0.26 + ], + "angle": 0, + "content": "System Prompt" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.273, + 0.504, + 0.356 + ], + "angle": 0, + "content": "You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text`" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.378, + 0.296, + 0.394 + ], + "angle": 0, + "content": "User Prompt" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.406, + 0.391, + 0.435 + ], + "angle": 0, + "content": "```\n`source_text': {source}\n`target_text': {target}" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.451, + 0.825, + 0.494 + ], + "angle": 0, + "content": "The model is trained on 60,346 training samples and achieves an overall accuracy of \\(82\\%\\) on the validation set of 25,863 samples. A complete evaluation report is shown below in Table 11." + }, + { + "type": "table", + "bbox": [ + 0.292, + 0.504, + 0.709, + 0.566 + ], + "angle": 0, + "content": "
LabelPrecisionRecallF1Support
Bad7073712066
Partially Correct7663697704
Entirely Correct87939016093
" + }, + { + "type": "table_caption", + "bbox": [ + 0.28, + 0.574, + 0.719, + 0.591 + ], + "angle": 0, + "content": "Table 11: Translation Quality Classifier performance metrics" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.619, + 0.828, + 0.76 + ], + "angle": 0, + "content": "Removal of low-quality training data does not necessarily improve model performance. Intuitively, the presence of poor-quality translated data should harm model performance. However, PG models show contrastive trends when low-quality samples are removed from the training data mix (Figure 9). The performance of Qwen2.5 degrades for most datasets, whereas the performance of Ministrial improves. The performance degradation in the case of Qwen2.5 can be attributed to noisy samples in safety and toxicity evaluation datasets. Harmful text is considered to belong to low-quality data; web-crawls implement word blocklist filters to enhance data quality (Dodge et al., 2021). Thus, we hypothesize that the noise induced by poor translations bridges the gap between training and evaluation data, thus leading to performance improvement." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.78, + 0.318, + 0.796 + ], + "angle": 0, + "content": "G Limitations" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.812, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We describe several limitations of our work. First, we automatically translate English data to other languages using LLMs. However, automatic translations can introduce deviations in toxicity and safety risks due to incorrect translations and hallucinations (Specia et al., 2021; Sharou & Specia, 2022; Team et al., 2022; Costa-jussa et al., 2023). Second, we employ existing safety classifiers and LLMs to automatically annotate safety violation categories, which may introduce biases from these models into our labeled safety categories. We utilize a panel of models to mitigate such biases, but acknowledge the inherent limitations of this methodology. Third, we follow Llama-Guard-3-8B (Dubey et al., 2024) and define" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.325, + 0.107, + 0.675, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.454, + 0.828, + 0.486 + ], + "angle": 0, + "content": "Figure 9: Performance difference on removing low-quality data. Takeaway: Removal of low-quality training data does not necessarily improve model performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.51, + 0.828, + 0.584 + ], + "angle": 0, + "content": "our safety violation taxonomy according to the MLCommons Safety Taxonomy18. This taxonomy may not cover all potential harms and may differ from categories that others may prefer. Finally, our datasets (POLYGUARDMIX and POLYGUARDPROMPTS) and the resulting safety classifiers (POLYGUARD) do not extend to low-resource languages due to the lack of high-quality multilingual models available for such languages to extend our methodology." + }, + { + "type": "page_footnote", + "bbox": [ + 0.187, + 0.909, + 0.589, + 0.925 + ], + "angle": 0, + "content": "18https://mlcommons.org/2024/04/mlc-aisafety-v0-5-poc/" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_origin.pdf b/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..61b8d2ec74c0f9d86ae0a6601d1ea78f84677ff1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/3cb1148d-2625-44e8-a64a-225c0e814138_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e282f2379504690aec1ce98ba0ff70d5bb19649855275740157e768739d677a9 +size 1433636 diff --git a/data/2025/2504_04xxx/2504.04377/full.md b/data/2025/2504_04xxx/2504.04377/full.md new file mode 100644 index 0000000000000000000000000000000000000000..482ff8882d79ed93b05e97f6343175eda3e64be1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/full.md @@ -0,0 +1,472 @@ +![](images/01334081f210a9c5d39bed84a79543b78c394d85d18689b9e3b6b4e720660630.jpg) + +# PolyGuard: A Multilingual Safety Moderation Tool for 17 Languages + +Priyanshu Kumar $^{\text{♥1}}$ Devansh Jain $^{\text{♥1}}$ Akhila Yerukola $^{\text{♥}}$ + +Liwei Jiang\* Himanshu Beniwal△ $\diamond$ Thomas Hartvigsen Maarten Sap + +Carnegie Mellon University $\spadesuit$ University of Washington $\triangle$ IIT Gandhinagar $\diamond$ University of Virginia $\clubsuit$ Allen Institute for AI + +# Abstract + +Truly multilingual safety moderation efforts for Large Language Models (LLMs) have been hindered by a narrow focus on a small set of languages (e.g., English, Chinese) as well as a limited scope of safety definition, resulting in significant gaps in moderation capabilities. To bridge these gaps, we release POLYGUARD, a new state-of-the-art multilingual safety model for safeguarding LLM generations, and the corresponding training and evaluation datasets. POLYGUARD is trained on POLYGUARDMIX, the largest multilingual safety training corpus to date containing 1.91M samples across 17 languages (e.g., Chinese, Czech, English, Hindi). We also introduce POLYGUARDPROMPTS, a high quality multilingual benchmark with 29K samples for the evaluation of safety guardrails. Created by combining naturally occurring multilingual human-LLM interactions and human-verified machine translations of an English-only safety dataset (WildGuardMix; Han et al., 2024), our datasets contain prompt-output pairs with labels of prompt harmfulness, response harmfulness, and response refusal. Through extensive evaluations across multiple safety and toxicity benchmarks, we demonstrate that POLYGUARD outperforms existing state-of-the-art open-weight and commercial safety classifiers by $5.5\%$ . Our contributions advance efforts toward safer multilingual LLMs for all global users. + +![](images/07b1c8fbc3923a4e6aed477b493bd29125cec7df8131a0b52664d39ca90e3329.jpg) + +PolyGuard Collection + +![](images/14d692a5dd9fb9f6ca098b1d92717b3f4009134c5c31cf1e964e4a115286a3f3.jpg) + +kpriyanshu256/polyguard + +# 1 Introduction + +Recent advances in large language models (LLMs), especially their multilingual capabilities, have led to their deployment to a diverse global user base that spans multiple languages. Despite this global reach, safety research has focused primarily on the English language (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024), exposing global users to potential safety risks such as harmful content and privacy violations. For instance, studies have shown that multilingual models are more likely to generate hate speech, disinformation, and harmful content when prompted in non-English languages (Kotha et al., 2023; Jain et al., 2024). + +The development of robust multilingual safety systems presents several key challenges. First, building multilingual systems is inherently difficult due to challenges such as the lack of comprehensive datasets, the "curse of multilinguality" (Aharoni et al., 2019; Conneau et al., 2020; Gurgurov et al., 2024), and the inherent biases embedded in training corpora (Xu et al., 2024). Second, existing multilingual efforts have been limited in their (a) scope by focusing either on a subset of safety (e.g., PerspectiveAPI covering only toxicity, ignoring other unsafe content) and/or on a narrow set of language coverage (e.g., Llama-Guard-1 + +only covering English safety, ignoring toxicity and DuoGuard being evaluated on 4 very high resource languages only; Inan et al., 2023; Jain et al., 2024; Deng et al., 2025), or (b) performance (e.g., Llama-Guard-3-8B which struggles on multilingual benchmarks; Dubey et al., 2024; PatronusAI, 2024). Finally, most existing safety frameworks address only the single task of classifying safety and often rely on simplistic binary settings (safe/unsafe), which fail to capture the complex spectrum of harmful content that can manifest differently across cultural and linguistic contexts (Sap et al., 2020; Zhou et al., 2023). + +To address these gaps, we release POLYGUARD (PG), a new state-of-the-art fine-tuned language model for multi-task safety detection and moderation. As Figure 1 highlights, PG can classify a multilingual input of a user prompt and an LLM response on five dimensions. + +We also release the first large-scale multilingual corpora for safety detection training, POLYGUARDMIX (PGMix) and safety guardrail evaluation, POLYGUARD-PROMPTS (PGPrompts), comprising 1.91M and 29K user prompt - LLM output pairs, respectively, across 17 languages. Our datasets contain binary and categorical labels for prompt harmfulness and response harmfulness, and response refusal (if the LLM response complies with the user request). We use a systematic labeling process that leverages a panel of English safety classifiers and LLM-as-a-judge (proprietary and open-weight LLM) to obtain these labels. + +We create our PGMix dataset by combining both: (a) naturally occurring multilingual human-LLM interactions from In-The-Wild (ITW) datasets, and (b) machine translations of WildGuardMix (Han et al., 2024), to ensure data diversity which is crucial for improved model performance (Davani et al., 2024). We utilize multiple LLMs to ensure high-quality translations of WildGuardMix, verified by a high average translation score of 81.15 as rated by human annotators. + +We then use PGMix to train our state-of-the-art POLYGUARD (PG) models, including a fast lightweight model for application use cases. Our empirical results show that PG + +outperforms existing open-source and proprietary safety detectors on English-only as well as multilingual safety and toxicity benchmarks. Furthermore, we find that the incorporation of ITW samples in the training datasets makes PG models more robust to various data distributions, including code-switched and translated data. + +Overall, our datasets and models2 serve as a starting point for building powerful and robust multilingual safety detectors and advance efforts towards multilingual safe AI systems. + +# 2 Dataset + +To address the critical need for multilingual safety detection, we introduce POLYGUARDMIX (PGMix) and POLYGUARDPROMPTS (PGPrompts), multilingual datasets specifically designed to train and evaluate robust safety classifiers. PGMix comprises 1.91M human-LLM interactions, including 1.47M machine-translated samples from WildGuardMix and 0.43M + +naturally-occurring samples from In-The-Wild datasets, whereas PGPrompts comprises 29K translated samples. + +Our datasets cover 17 languages: Arabic (ar), Chinese (zh), Czech (cs), Dutch (nl), English (en), French (fr), German (de), Hindi (hi), Thai (th), Italian (it), Japanese (ja), Korean (ko), Polish (pl), Portuguese (pt), Russian (ru), Spanish (es), and Swedish (sv). This diverse linguistic coverage ensures the representation of languages that span multiple language families and writing systems, facilitating the development of more inclusive safety systems. + +Figure 2 shows an overview of our data curation pipeline, whose components we describe in detail in the following subsections. + +# 2.1 Data Sources + +Both PGMix and PGPrompts are constructed from the train and test samples of WildGuardMix (Han et al., 2024), a dataset of synthetic and natural single-turn human-LLM interactions with fine-grained annotations, respectively. In addition, PGMix also contains samples from In-TheWild datasets: LMSys-Chat1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024). We posit that the combination of natural and synthetic sam + +![](images/64f1eb9148050067cf1c5ad96d1ffffa8146bc4b4e995ba80a783bd2ca1b0b99.jpg) +Figure 2: Data curation process for PGMix (safety detection training) and PGPrompts (safety guardrail evaluation). Takeaway: PGMix combines machine-translated and naturally occurring data to improve data diversity and, consequently, model performance. + +plies improves the diversity of data and consequently improves model performance (Davani et al., 2024). + +# 2.2 Machine Translation Pipeline + +We develop an efficient machine translation pipeline using open-weight models to minimize computational costs when translating WildGuardMix for our training data. We employ two state-of-the-art translation models: TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022). For optimal performance, we utilize TowerInstruct-7B-v0.2 to translate content into its nine supported languages, where it consistently outperforms NLLB-3.3B. We then leverage NLLB-3.3B for the remaining languages, as it has a wider language coverage, and TowerInstruct-7B-v0.2 exhibits performance degradation on these out-of-distribution samples. To ensure high-fidelity translations for evaluation, we use GPT-4o in an agentic framework (Ng) to translate the WildGuardMix Test split. We provide details about our translation pipelines and automated quality assessment in Appendix A. + +# 2.3 Safety Annotation + +We leverage a panel of English safety classifiers and LLM-as-judges to annotate safety violation categories automatically. We follow Llama-Guard-3-8B (Dubey et al., 2024) and define our safety violation taxonomy according to the MLCommons Safety Taxonomy4. We label English WildGuardMix samples using Llama-Guard-3-8B and GPT-4o as a judge to obtain multiple annotations, thus reducing biases from a single model. Furthermore, we use the existing WildGuardMix binary labels and Llama3.1-405B-Instruct (Dubey et al., 2024) as a judge to resolve conflicts and obtain the final annotations5. Finally, since PGMix and PGPrompts contain translations of WildGuardMix, we propagate safety labels from the + +![](images/da2dedc8d353ba756792c421a28eb3d58a44bc6e99849e62e8c2dfddfcef3244.jpg) +Figure 3: Safety category distribution for user prompts and model responses for WildGuard-Mix train samples. The model name (GPT-4o and Llama-Guard-3-8B) represents the LLM used as a judge to automatically annotate the safety category. These annotations are then ensembled together, using Llama3.1-405B-Instruct to break ties (Combined). Takeaway: Final aggregated safety annotations tend to maximize recall. + +![](images/f9f0c1c9bd1f15fc16731e2aa7d66b6c071d5ab63757f5412e1f1c2f8fd487e7.jpg) + +annotated English samples to other languages. ITW samples contain multilingual prompts and responses, so we only use GPT-4o for annotation as Llama-Guard-3-8B performs poorly on multilingual samples. + +Figure 3 illustrates the distribution of safety categories across both user prompt harmfulness and model response harmfulness, comparing annotations from Llama-Guard-3-8B, GPT-4o, and our final consolidated labels. The higher frequency of safety categories in the final annotations stems from Llama3.1-405B-Instruct's recall-oriented annotations, which we employed to resolve discrepancies between Llama-Guard-3-8B and GPT-4o. Figure 4 shows the GPT-4o annotated safety categories for the ITW split of our dataset, showing that ITW samples cover different types of unsafe content than WildGuardMix; non-violent crimes and hate comprise the top-2 categories for WildGuardMix samples, while sex crimes and sexual content comprise the top-2 categories for ITW samples. + +# 2.4 Human Validation + +To validate the translation quality and the generated safety labels, we conduct human validation across all 16 languages. Due to budget constraints, we randomly sample 50 data points per language, ensuring a balanced distribution across PGMix (train) and PGPrompts (test), harmful and harmless labels, as well as user prompts and model responses. We recruit workers from Prolific, filtering them based on their proficiency in each language. Each data point is evaluated by three annotators. + +For each data point, we ask the annotators to assess the following. + +1. Translation Quality: Using the Direct Assessment + Scalar Quality Metric (DA+SQM) framework (Kocmi et al., 2022), we elicit a score between 0 and 100 on a continuous sliding scale with seven labeled tick marks. +2. Safety Label for the Source Sentence: Annotators assign a label of either 'harmful' or 'safe' for the source sentence in English. +3. Safety Label for the Translated Sentence: Annotators assign a 'harmful' or 'safe' label for the corresponding translation. + +Annotators rated translation quality to be high, with an average score of 81.15 across all 16 languages. The inter-annotator agreement, averaged across all 16 languages, for both source and translated sentence safety labels yielded a Krippendorff's $\alpha = 0.46$ . Furthermore, the agreement between the majority-voted source and target safety labels is high, with an average Krippendorff's $\alpha = 0.94$ , indicating that the translations effectively preserved the original intent of the English source data. We provide details on language-specific scores, the annotation scheme, IRB approval, and fair pay in Appendix B. + +![](images/86d7b5bd1f6dc6ae24ed987b871645b581f681659f8d1f3bb052cbaa5de2d04c.jpg) +Figure 4: Safety category distributions for PGMix ITW samples. + +# 3 POLYGUARD: A 17-Language Safety Moderation Tool + +To build POLYGUARD, we fine-tune Qwen2.5-7B-Instruct (Yang et al., 2024a) and Ministral-8B-Instruct-2410, both of which have been shown to have state-of-the-art performance in multilingual knowledge and commonsense, code, and math settings (Qwen; Mistral). We refer to these models as PG Qwen2.5 and PG Ministral In addition, we also fine-tune Qwen2.5-0.5B-Instruct to build PG Smol. + +The models are fine-tuned on PGMix using Low-Rank Adapters (Hu et al., 2022). We follow Han et al. (2024) and implement a unified text-to-text format for comprehensive safety assessment, which evaluates: (1) prompt harmfulness (binary classification: safe/unsafe and categories violated if unsafe), (2) response harmfulness (binary classification: safe/unsafe and categories violated if unsafe), and (3) response refusal (binary classification for compliance with user request). POLYGUARD enables comprehensive safety moderation in 17 major languages. We provide detailed training specifications in Appendix C. + +# 4 Results & Research Questions + +A multilingual system must be robust; that is, it should perform consistently on data belonging to different distributions (sources and languages). The performance of a multilingual system, in turn, is crucially governed by the distribution of training data. Hence, we study the performance of POLYGUARD on POLYGUARDPROMPTS and multiple out-of-distribution evaluation benchmarks, and the influence of ITW samples and low-quality translations on model performance. We perform one run per evaluation due to computational constraints. + +Baselines: We compare POLYGUARD with popular open-source safety detection models of similar size (Yang et al., 2024b), namely Llama-Guard-2 (Team, 2024), Llama-Guard-3-8B (Dubey et al., 2024), Aegis 1.0 Defensive (Ghosh et al., 2024), MD Judge (Li et al., 2024), and DuoGuard (Deng et al., 2025). We also benchmark proprietary models, namely Perspective API7, OpenAI Omni Moderation8, and Google Moderation9. + +# 4.1 How do PG models perform on the in-distribution PGPrompts benchmark? + +We first evaluate PG and open-source baselines on POLYGUARDPROMPTs benchmark, comprising 29K samples, using the following metrics: (1) for binary tasks of prompt harmfulness, response harmfulness, and response refusal, we use F1 score for the positive label (unsafe for harmfulness and yes for response refusal), and (2) for the tasks of prompt violations and response violations, we compare the list of ground truth and predicted categories using Exact Match and Jaccard Similarity. + +# PG models based on Qwen2.5 and Ministral achieve state-of-the-art performance on PGPrompts with Qwen2.5 performing marginally better. PG Smol outperforms DuoGuard, + +
ModelHarmful Request F1 ScoreResponse Refusal F1 ScoreHarmful Response F1 ScorePrompt Safety ViolationsResponse Safety Violations
Exact MatchJaccardExact MatchJaccard
Aegis-Defensive66.45------
MD Judge43.54-49.12----
Llama Guard 260.87-63.62----
Llama Guard 367.98-65.7471.9874.5987.2488.37
DuoGuard62.59-37.99----
PG Qwen2.5 7B (Ours)87.1283.5974.0880.8785.4486.6788.79
PG Ministral (Ours)86.0284.4573.7579.9284.3086.8588.78
PG Smol (Ours)83.7681.3666.8277.0281.5184.0585.92
+ +its similar size counterpart (Table 1). Aegis Defensive supports only a single text as input and is hence evaluated for Harmful Request only. Since the remaining baselines do not explicitly support Harmful Response, we approximate the prediction by executing them on prompt + response. None of the baselines support the Response Refusal task. Out of all baselines, the safety category taxonomy is the same for Llama-Guard-3 and PG. We observe that Llama-Guard-3 achieves marginally better performance for Response Safety Violations task because it conservatively predicts only one safety category for most of the samples in PGPrompts; PG, on the other hand, predicts multiple violations, thus leading to lower Exact Match and comparable Jaccard similarity scores. + +# 4.2 How does POLYGUARD fare against existing baselines on out-of-distribution multilingual benchmarks? + +Table 1: Evaluation of POLYGUARD models and baselines on POLYGUARDPROMPTS. Take-away: PG models outperform baselines on in-distribution data. + +
TypeModelRTP-LX En.RTP-LX Mul.Mod. En.Mod. Mul.XS En. (LG)XS Mul. (LG)XS En. (Aegis)XS Mul. (Aegis)MJ En. (LG)MJ Mul. (LG)MJ En. (Aegis)MJ Mul. (Aegis)Avg
Open -WeightAegis-Defensive84.2383.2171.1359.2266.5935.4769.4636.7590.9179.5290.6179.3770.54
MD Judge85.2838.6079.8661.4669.0017.2269.5617.7191.2138.4790.9137.9758.10
Llama Guard 239.4734.9975.8372.5553.7022.3250.5722.5677.5262.3876.8661.5654.19
Llama Guard 348.5144.8778.7373.9860.8425.7057.5026.9879.9278.1479.6777.5261.03
Duo Guard91.8350.4670.8549.4461.1626.0364.8327.3189.1841.8489.2641.4458.64
Closed -SourcePerspective API97.0981.9769.4064.1927.646.6433.926.8553.7945.3753.2344.7348.73
OpenAI Omni87.5274.1074.4368.0858.0222.4860.1123.5282.5966.9482.7366.9463.95
Google Mod.90.4483.2159.6453.8950.4441.8455.7144.7983.1480.8583.6681.0067.38
OursPG Qwen2.591.3483.2174.3969.5172.0735.3374.9337.1393.9386.4493.9786.3374.88
PG Ministrial87.2579.5874.9070.5171.3034.9374.0736.6895.7183.1195.3983.0273.87
PG Smol92.371.5669.363.0070.2833.2274.3835.1994.3973.5993.7273.3470.36
+ +Table 2: F1 scores of safety detectors on Multilingual Guardrail Test Suite; metrics are in bold and underlined for the best second-best performing models respectively. Mod.=Moderation, XS=XSafety, MJ=MultiJail, En.=English, Mul.=Multilingual, LG=Llama Guard. Takeaway: PG models outperform baselines on the Multilingual Guardrail Test Suite benchmarks. + +Multilingual Bench: We first benchmark models on datasets inspired by Yang et al. (2024b). This comprises multilingual toxicity and safety datasets, namely RTP-LX (de Wynter et al., 2024), OpenAI Moderation (Markov et al., 2023), $^{10}$ XSafety (Wang et al., 2023), and MultiJail (Deng et al., 2024). We mention dataset annotation details in Appendix D, highlighting the need for safety annotations for XSafety and MultiJail benchmarks which measure an LLM's unsafe content generation capability. + +Patronus AI Bench: We also evaluate models using the recall score on the benchmarks reported by PatronusAI (2024), consisting of toxic/unsafe samples from English and multi- + +lingual toxicity and safety datasets. We perform our evaluations on all samples instead of a small subset. Appendix E contains details about the benchmark. + +Results show that our PG models outperform the baselines on most datasets, achieving higher scores for the unsafe class (Table 2). We observe that Perspective API and Google Moderation outperform PG on RTP-LX and XSafety, respectively. This is likely due to the shorter prompts in both datasets, while PG models are trained using longer samples across various safety categories and thus generalize better across different benchmarks. PG models also outperform existing detectors on safety datasets in the Patronus AI benchmark and also achieve the best average performance (Table 3). + +
TypeModeltoxic-text-enjigsawukr-toxicitythai-toxicity-tweettoxic-text-pttoxic-chatBeaver TailsSalad-DataAvg
Open-WeightAegis-Defensive80.3279.2762.8067.2986.54--91.6477.98
MD Judge68.4573.405.800.8056.8663.5481.4196.6855.87
Llama Guard 223.7320.676.324.8353.5123.1759.2016.1425.95
Llama Guard 340.0327.209.6011.5053.7827.3052.6829.4231.43
Duo Guard93.6593.180.729.2774.2254.1787.5470.7060.43
Closed-SourcePerspective API77.2086.20--93.0015.8923.001.8037.14
OpenAI Omni54.2086.8041.6034.0099.8046.3567.8045.8059.54
Google Mod.95.2098.0086.6041.8097.6069.2777.6027.2074.16
OursPG Qwen2.585.3283.4765.2446.4784.2697.6590.6597.0881.27
PG Ministrial82.6079.1155.5235.7680.5197.3990.5396.8877.29
PG Smol89.5785.7259.1637.2081.8496.1084.6096.4278.83
+ +# 4.3 Are PG models robust? + +We study the average performance of the PG models trained using 3 datasets: only translated data, only ITW data, and translated + ITW data. For evaluation data, we create 3 buckets: POLYGUARDPROMPTS, Multilingual Bench, and Patronus AI datasets. + +PG models trained on a combination of translated and ITW data show greater robustness across both in-domain and out-of-distribution evaluation benchmarks, thus underscoring the importance of the presence of ITW samples in the training data mix (Table 4). Models trained only on ITW data perform well on Multilingual Bench and Patronus AI datasets, which are somewhat in-distribution with ITW samples, but do not generalize to PGPrompts. + +Table 3: Recall scores on unsafe samples from Patronus' benchmarking; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. Takeaway: PG models outperform baselines on Patronus AI's benchmarks. + +
POLYGUARDTraining DataPGPromptsMultilingual BenchPatronus AI
Qwen2.5Translated84.9574.5679.79
ITW64.6974.6382.26
Translated + ITW83.7974.8881.27
MinistralTranslated84.3273.8677.07
ITW63.1175.3585.76
Translated + ITW83.4473.8777.29
SmolTranslated82.2269.9974.84
ITW59.465.0872.21
Translated + ITW80.0670.3578.82
+ +Table 4: Average F1 score on POLYGUARDPROMPTS and Multilingual Bench, and Recall on PatronusAI, when models are trained with different training dataset settings. Underlined values represent in-distribution evaluations. Takeaway: Models trained with translated + ITW samples are robust on different distributions of evaluation data + +Furthermore, we investigate in detail the influence of the presence of ITW data in our training data mix for each benchmark dataset (Figure 5). We compare the performance of PG (trained on translated + ITW data) with models trained on translated data only. We observe that the performance of Qwen2.5 degrades for most of the datasets when ITW data are absent from the training mix. The performance differences for Ministrial are more balanced compared to Qwen2.5, that is, both improvement and degradation are observed across the evaluation datasets. The introduction of ITW data benefits the performance of the ToxicChat benchmark (Lin et al., 2023) the most for both models, since ITW data is most aligned with the ToxicChat benchmark. + +# 4.4 How does performance vary on English vs Translated vs Code-Switched data? + +We study the performance variation of models on code-switched data, which consists of tokens belonging to different languages but in the same document. Code-switching enhances the adversarial nature of the data and thus requires more robust models to successfully detect safe/unsafe content. + +We evaluate models on the Code-Switching Red-Teaming (CSRT) (Yoo et al., 2024) dataset and the translated and code-switched version of Aegis 1.0 (Ghosh et al., 2024) as provided by Yang et al. (2024b). Since CSRT also evaluates LLMs' tendency to generate unsafe content, we use the same automatic annotation pipeline as described in Appendix D. + +In all settings, PG models outperform baselines, showing that our moderation models are more robust (Table 5). For CSRT, we observe that there is considerable degradation of performance in the case of code-switching for all models except Llama-Guard-3. For Aegis 1.0, there is a performance drop from English to the translated version. The performance increases for the code-switched version but is lower than on English data. + +
TypeModelCSRT English (LG)CSRT English (Aegis)CSRT Code-switch (LG)CSRT Code-switch (Aegis)Aegis English*Aegis Translated*Aegis Code-switch*Avg
Open -WeightAegis-Defensive90.9190.6181.3881.5383.8975.1580.3583.40
MD Judge91.2190.9150.0050.0082.9842.5474.0668.81
Llama Guard 277.5276.8665.8864.7960.8251.6959.1665.25
Llama Guard 379.6679.4279.8379.1667.3962.1566.8673.50
Duo Guard89.1852.8289.2652.2883.3759.1073.4971.36
Closed -SourcePerspective API53.7953.2332.5231.7531.1526.1127.2636.54
OpenAI Omni82.8382.9774.2474.0373.3063.8268.1474.19
Google Mod.83.1483.6682.1981.9474.5473.6072.8978.85
OursPG Qwen2.594.1093.7888.5587.8887.8583.0085.1388.61
PG Ministrial95.1995.2290.0289.3586.9681.1883.8188.82
PG Smol94.3993.7284.1383.8684.7172.8980.3284.86
+ +Table 5: F1 scores comparison on English only, translated, and code-switched data; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. * represent results averaged across 3 annotations, LG=Llama Guard Takeaway: All models suffer performance degradation for code-switched data, with PG models outperforming baselines. + +# 4.5 How is performance affected by removing low-quality translated data? + +Data quality plays an important role in the training of any machine learning model. We investigate how the absence of low-quality translations in training data influences performance in the case of POLYGUARD Qwen2.5 and Ministral. Due to time and budget constraints, we use GPT-4o annotations as a proxy for human-evaluated translation quality and distill them for cost-effective annotations (details in Appendix F). + +Empirical evaluations show that the elimination of low-quality translations does not necessarily improve model performance (Figure 9, Appendix F) since contrastive trends + +
ModelAverageStd Dev
POLYGONQwen2.587.018.27
POLYGONMinistral84.0412.25
POLYGONSmol65.2525.02
+ +Table 6: Recall scores for POLYGUARD models on human-written samples from the Aya RedTeam benchmark. Takeaway: POLYGUARD models generalize on data from different distributions despite being trained only on machine-translated data. + +are observed for Qwen2.5 and Ministral. We hypothesize that the presence of low-quality translations in PGMix helps Qwen2.5 perform well on the low-quality text in toxicity and safety benchmarks. + +# 4.6 Does POLYGUARD superficially align with artifacts of machine-translated text only? + +The use of machine-translated data for training POLYGUARD models can lead to the hypothesis that models learn only to rely on machine-translation artifacts in the data to evaluate safety. To investigate if this behavior exists, we evaluate our models on the Aya Red-teaming dataset (Ahmadian et al., 2024), which consists of manually created 7,419 samples in 8 languages, thus lacking the noise patterns present in machine-translated texts. We do not observe empirical evidence supporting the hypothesis (Table 6). + +# 5 POLYGUARD Runtime Comparison + +We have trained and open-sourced models of three sizes (0.5B, 7B, and 8B). While all three can run on consumer hardware, the 0.5B can benefit on-device or latency-critical applications. We also test the latency of our models on 7419 samples from the Aya RedTeaming dataset (Ahmadian et al., 2024) on an NVIDIA L40S GPU using VLLM (Table 7), and find that our 0.5B model has a high throughput. However, our 7B and + +8B models run comparatively slower than their similarly sized Llama Guard counterparts. Compared to Llama Guard, POLYGUARD models solve more tasks, and thus require longer prompts and generate more output tokens, which leads to increased runtime. + +![](images/7aa7eb18842bdeb0a90f34bf39ebc26933d8984b21590840eab966e47578f07f.jpg) +Figure 5: Performance difference on removing ITW data Takeaway: Removal of ITW data generally degrades model performance by reducing training data diversity. + +
ModelSizeInput TokensOutput TokensTime (m:ss)
Llama Guard 28B1575800275362:13
Llama Guard 38B1657409363642:14
POLYGON Smol0.5B18702062393370:31
POLYGON Qwen2.57B18702062430433:27
POLYGON Ministral8B18810522424263:58
+ +Table 7: Latency comparison of POLYGUARD models on Aya RedTeaming Takeaway: Smol is highly efficient, whereas Qwen and Ministral are slower than LlamaGuards as POLYGUARD models solve multiple tasks. + +# 6 Background & Related Work + +Safety Training Datasets and Safety Evaluations AI Safety, the field of research focused on ensuring that AI systems are developed and deployed in a manner that is trustworthy, responsible, reliable, and beneficial to humans (Chen et al., 2024), has become widely studied in recent years (Chua et al., 2024; Hendrycks, 2025; Bengio et al., 2025; Bullwinkel et al., 2025). This increasing interest has led to the procurement of datasets for training and evaluating safety guardrails for AI systems (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024; Lin et al., 2023; Ji et al., 2023; Li et al., 2024). Similarly, safety benchmarks have been curated to evaluate the safety risks exhibited by AI systems (Xie et al., 2024; Mazeika et al., 2024; Jain et al., 2024; Kumar et al., 2024; Yoo et al., 2024; Zeng et al., 2024b; Zhang et al., 2024a;b; Tan et al., 2024). However, almost all of the aforementioned datasets are limited to the English or Chinese language only or focus on specific subsets of AI safety Jain et al. (2024). + +Safety Moderation Tools Current open-weight safety systems rely on either proprietary datasets (Inan et al., 2023; Zeng et al., 2024a) or previously mentioned English-centric datasets (Ghosh et al., 2024; Li et al., 2024; Han et al., 2024). Although these LLM-based classifiers possess inherent multilingual capabilities, their performance is constrained by their predominantly English training data (Han et al., 2024; Ghosh et al.). Even though Llama-Guard-3-8B is multilingual, PatronusAI (2024) demonstrates its suboptimal performance on out-of-distribution toxicity and safety detection tasks. Additionally, existing models face structural limitations; most are restricted to binary safety classification (with WildGuardMix (Han et al., 2024) being a notable exception), or ignore the structure of user-LLM interactions by processing only a single text at a time (Aegis 1.0 Ghosh et al. (2024) and DuoGuard Deng et al. (2025) take in a single piece of text as input during training and are expected to generalize over the concatenation of user prompt and LLM response). + +# 7 Conclusion + +We present POLYGUARDMIX, the first massive multilingual safety detection training dataset, comprising 1.91M user-LLM interactions across 17 languages. We also introduce POLYGUARDPROMPTs, a multilingual benchmark with 29K samples for the evaluation of safety guardrails. Further, we train robust multilingual LLM-based safety detectors, POLYGUARD, which perform better or comparably to existing open-weight and proprietary safety detectors across numerous evaluation benchmarks belonging to different data distributions. + +# Ethics Statement + +Although POLYGUARD demonstrates state-of-the-art performance for multilingual safety detection, it may occasionally produce incorrect predictions. Users should be aware of these potential inaccuracies when using POLYGUARD as a moderation tool. + +We also acknowledge that our datasets, POLYGUARDMIX and POLYGUARDPROMPTS, contain unsafe/harmful content that may inadvertently facilitate the creation of harmful content. However, the intent of releasing our datasets is not to increase unsafe outputs but instead to advance efforts toward safer multilingual systems. As a safety measure, we plan to implement restrictions on the use of our datasets. + +# Acknowledgments + +This research was supported in part by Google Jigsaw, DSO National Laboratories and Microsoft's Accelerating Foundation Models Research program. + +Data We express our gratitude to the authors whose meticulous efforts were instrumental in the creation of our data set: WildGuardMix (Han et al., 2024), LMSys-Chat-1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024). + +Software and Models We would like to thank the authors of TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022) which we use for automatic translations, contributors and maintainers of vLLM (Kwon et al., 2023) and LiteLLM $^{12}$ which we leverage to generate continuations from models, and OpenRLHF (Hu et al., 2024) which we use to fine-tune models. Finally, we thank Jigsaw for providing access to Perspective API. + +# References + +Roee Aharoni, Melvin Johnson, and Orhan First. Massively multilingual neural machine translation. In Jill Burstein, Christy Doran, and Thamar Solorio (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 3874-3884, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1388. URL https://aclanthology.org/N19-1388/. +Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, Sara Hooker, et al. The multilingual alignment prism: Aligning global and local preferences to reduce harm. arXiv preprint arXiv:2406.18682, 2024. +Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733, 2024. +Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025. +Blake Bullwinkel, Amanda Minnich, Shiven Chawla, Gary Lopez, Martin Pouliot, Whitney Maxwell, Joris de Gruyter, Katherine Pratt, Saphir Qi, Nina Chikanov, et al. Lessons from red teaming 100 generative ai products. arXiv preprint arXiv:2501.07238, 2025. +Chen Chen, Ziyao Liu, Weifeng Jiang, Si Qi Goh, and KwoK-Yan Lam. Trustworthy, responsible, and safe ai: A comprehensive architectural framework for ai safety with challenges and mitigations. arXiv preprint arXiv:2408.12935, 2024. +Jaymari Chua, Yun Li, Shiyi Yang, Chen Wang, and Lina Yao. Ai safety in generative ai large language models: A survey. arXiv preprint arXiv:2407.18369, 2024. + +Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. Unsupervised cross-lingual representation learning at scale. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 8440-8451, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.747. URL https://aclanthology.org/2020.acl-main.747/. +Marta Costa-jussà, Eric Smith, Christophe Ropers, Daniel Licht, Jean Maillard, Javier Ferrando, and Carlos Escolano. Toxicity in multilingual machine translation at scale. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 9570-9586, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.642. URL https://aclanthology.org/2023-findings-emnlp.642. +Aida Mostafazadeh Davani, Sagar Gubbi Venkatesh, Sunipa Dev, Shachi Dave, and Vinodkumar Prabhakaran. Genil: A multilingual dataset on generalizing language. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=kLH4ccaL21. +Adrian de Wynter, Ishaan Watts, Nektar Ege Altintoprak, Tua Wongsangaroonsri, Minghui Zhang, Noura Farra, Lena Baur, Samantha Claudet, Pavel Gajdusek, Can Gören, et al. Rtplx: Can llms evaluate toxicity in multilingual scenarios? arXiv preprint arXiv:2404.14397, 2024. +Yihe Deng, Yu Yang, Junkai Zhang, Wei Wang, and Bo Li. Duoguard: A two-player rl-driven framework for multilingual llm guardrails. arXiv preprint arXiv:2502.05163, 2025. +Yue Deng, Wenxuan Zhang, Sinno Jialin Pan, and Lidong Bing. Multilingual jailbreak challenges in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vESNKdEMGp. +Jesse Dodge, Maarten Sap, Ana Marasovic, William Agnew, Gabriel Ilharco, Dirk Groeneweld, Margaret Mitchell, and Matt Gardner. Documenting large webtext corpora: A case study on the colossal clean crawled corpus. arXiv preprint arXiv:2104.08758, 2021. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024. +Shaona Ghosh, Prasoon Varshney, Makes Narsimhan Sreedhar, Aishwarya Padmakumar, Traian Rebedea, Jibin Rajan Varghese, and Christopher Parisien. Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails. In Neurips Safe Generative AI Workshop 2024. +Shaona Ghosh, Prasoon Varshney, Erick Galinkin, and Christopher Parisien. Aegis: Online adaptive ai content safety moderation with ensemble of llm experts. arXiv preprint arXiv:2404.05993, 2024. +Daniil Gurgurov, Tanja Bäumel, and Tatiana Anikina. Multilingual large language models and curse of multilinguality. 2024. doi: 10.48550/ARXIV.2406.10602. URL https://arxiv.org/abs/2406.10602. +Seungju Han, Kavel Rao, Allyson Ettinger, Liwei Jiang, Bill Yuchen Lin, Nathan Lambert, Yejin Choi, and Nouha Dziri. Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms. arXiv preprint arXiv:2406.18495, 2024. +Dan Hendrycks. Introduction to ai safety, ethics, and society, 2025. +Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=nZeVKeeFYf9. + +Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024. +Hakan Inan, Kartikeya Upasani, Jianfeng Chi, Rashi Rungta, Krithika Iyer, Yuning Mao, Michael Tontchev, Qing Hu, Brian Fuller, Davide Testuggine, et al. Llama guard: Llm-based input-output safeguard for human-ai conversations. arXiv preprint arXiv:2312.06674, 2023. +Devansh Jain, Priyanshu Kumar, Samuel Gehman, Xuhui Zhou, Thomas Hartvigsen, and Maarten Sap. Polyglotoxicityprompts: Multilingual evaluation of neural toxic degeneration in large language models. arXiv preprint arXiv:2405.09373, 2024. +Jiaming Ji, Mickel Liu, Juntao Dai, Xuehai Pan, Chi Zhang, Ce Bian, Boyuan Chen, Ruiyang Sun, Yizhou Wang, and Yaodong Yang. Beavertails: Towards improved safety alignment of LLM via a human-preference dataset. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=g0QovXbFw3. +Tom Kocmi, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Thamme Gowda, Yvette Graham, Roman Grundkiewicz, Barry Haddow, Rebecca Knowles, Philipp Koehn, Christof Monz, Makoto Morishita, Masaaki Nagata, Toshiaki Nakazawa, Michal Novák, Martin Popel, and Maja Popovic. Findings of the 2022 conference on machine translation (wmt22). In Conference on Machine Translation, 2022. URL https://apisemantic scholar.org/CorpusID:256461033. +Suhas Kotha, Jacob M. Springer, and Aditi Raghunathan. Understanding catastrophic forgetting in language models via implicit inference. ArXiv, abs/2309.10105, 2023. URL https://api_semanticscholar.org/CorpusID:262054014. +Priyanshu Kumar, Elaine Lau, Saranya Vijayakumar, Tu Trinh, Scale Red Team, Elaine Chang, Vaughn Robinson, Sean Hendryx, Shuyan Zhou, Matt Fredrikson, et al. Refusal-trained llms are easily jailbroken as browser agents. arXiv preprint arXiv:2410.13886, 2024. +Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023. +Lijun Li, Bowen Dong, Ruohui Wang, Xuhao Hu, Wangmeng Zuo, Dahua Lin, Yu Qiao, and Jing Shao. SALAD-bench: A hierarchical and comprehensive safety benchmark for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 3923-3954, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.235. URL https://aclanthology.org/2024-findings-acl.235. +Zi Lin, Zihan Wang, Yongqi Tong, Yangkun Wang, Yuxin Guo, Yujia Wang, and Jingbo Shang. ToxicChat: Unveiling hidden challenges of toxicity detection in real-world user-AI conversation. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 4694-4702, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.311. URL https://aclanthology.org/2023.findings-emnlp.311. +AI @ Meta Llama Team. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783. +Todor Markov, Chong Zhang, Sandhini Agarwal, Florentine Eloundou Nekoul, Theodore Lee, Steven Adler, Angela Jiang, and Lilian Weng. A holistic approach to undesired content detection in the real world. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 15009-15018, 2023. + +Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakaehie, Nathaniel Li, Steven Basart, Bo Li, David Forsyth, and Dan Hendrycks. Harmbench: a standardized evaluation framework for automated red teaming and robust refusal. In Proceedings of the 41st International Conference on Machine Learning, ICML'24. JMLR.org, 2024. +Mistral. Un ministral, des ministraux. URL https://mistral.ai/en/news/ministraux. +Andrew Ng. Agentic translation. URL https://github.com/andrewyng/translation-agent. +PatronusAI. Llama guard is off duty. https://www.patronus.ai/blog/llama-guard-is-off-duty, 2024. +Qwen. Qwen2.5: A party of foundation models! URL https://qwenlm.github.io/blog/qwen2.5/. +Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Jurafsky, Noah A. Smith, and Yejin Choi. Social bias frames: Reasoning about social and power implications of language. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 5477-5490, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.486. URL https://aclanthology.org/2020.acl-main.486/. +Khetam Al Sharou and Lucia Specia. A taxonomy and study of critical errors in machine translation. In Helena Moniz, Lieve Macken, Andrew Rufener, Loici Barrault, Marta R. Costa-jussa, Christophe Declercq, Maarit Koponen, Ellie Kemp, Spyridon Pilos, Mikel L. Forcada, Carolina Scarton, Joachim Van den Bogaert, Joke Daems, Arda Tezcan, Bram Vanroy, and Margot Fonteyne (eds.), Proceedings of the 23rd Annual Conference of the European Association for Machine Translation, pp. 171-180, Ghent, Belgium, June 2022. European Association for Machine Translation. URL https://aclanthology.org/2022.eamt-1.20. +Lucia Specia, Frédéric Blain, Marina Fomicheva, Chrysoula Zerva, Zhenhao Li, Vishrav Chaudhary, and André F. T. Martins. Findings of the WMT 2021 shared task on quality estimation. In Loic Barrault, Ondrej Bojar, Fethi Bougares, Rajen Chatterjee, Marta R. Costa-jussa, Christian Federmann, Mark Fishel, Alexander Fraser, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Paco Guzman, Barry Haddow, Matthias Huck, Antonio Jimeno Yepes, Philipp Koehn, Tom Kocmi, Andre Martins, Makoto Morishita, and Christof Monz (eds.), Proceedings of the Sixth Conference on Machine Translation, pp. 684-725, Online, November 2021. Association for Computational Linguistics. URL https://aclanthology.org/2021.wmt-1.71. +Yingshui Tan, Boren Zheng, Baihui Zheng, Kerui Cao, Huiyun Jing, Jincheng Wei, Jiaheng Liu, Yancheng He, Wenbo Su, Xiangyong Zhu, et al. Chinese safetyqa: A safety short-form factuality benchmark for large language models. arXiv preprint arXiv:2412.15265, 2024. +Llama Team. Meta llama guard 2. https://github.com/meta-llama/PurpleLlama/blob/main/Llama-Guard2/MODEL_CARD.md, 2024. +NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022. +Wenxuan Wang, Zhaopeng Tu, Chang Chen, Youliang Yuan, Jen-tse Huang, Wenxiang Jiao, and Michael R Lyu. All languages matter: On the multilingual safety of large language models. arXiv preprint arXiv:2310.00905, 2023. + +Tinghao Xie, Xiangyu Qi, Yi Zeng, Yangsibo Huang, Udari Madhushani Sehwag, Kaixuan Huang, Luxi He, Boyi Wei, Dacheng Li, Ying Sheng, et al. Sorry-bench: Systematically evaluating large language model safety refusal behaviors. arXiv preprint arXiv:2406.14598, 2024. +Yuemei Xu, Ling Hu, Jiayi Zhao, Zihan Qiu, Yuqi Ye, and Hanwen Gu. A survey on multilingual large language models: Corpora, alignment, and bias. ArXiv, abs/2404.00929, 2024. URL https://api_semanticscholar.org/CorpusID:268819377. +An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a. +Yahan Yang, Soham Dan, Dan Roth, and Insup Lee. Benchmarking llm guardrails in handling multilingual toxicity. arXiv preprint arXiv:2410.22153, 2024b. +Haneul Yoo, Yongjin Yang, and Hwaran Lee. Code-switching red-teaming: Lm evaluation for safety and multilingual understanding. arXiv preprint arXiv:2406.15481, 2024. +Wenjun Zeng, Yuchi Liu, Ryan Mullins, Ludovic Peran, Joe Fernandez, Hamza Harkous, Karthik Narasimhan, Drew Proud, Piyush Kumar, Bhaktipriya Radharapu, et al. Shieldgemma: Generative ai content moderation based on gemma. arXiv preprint arXiv:2407.21772, 2024a. +Yi Zeng, Yu Yang, Andy Zhou, Jeffrey Ziwei Tan, Yuheng Tu, Yifan Mai, Kevin Klyman, Minzhou Pan, Ruoxi Jia, Dawn Song, et al. Air-bench 2024: A safety benchmark based on risk categories from regulations and policies. arXiv preprint arXiv:2407.17436, 2024b. +Hengxiang Zhang, Hongfu Gao, Qiang Hu, Guanhua Chen, Lili Yang, Bingyi Jing, Hongxin Wei, Bing Wang, Haifeng Bai, and Lei Yang. Chinesesa: A chinese benchmark for evaluating safety in large language models. arXiv preprint arXiv:2410.18491, 2024a. +Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Meijuan An, Bikun Yang, KaiKai Zhao, Kai Wang, and Shiguo Lian. Chisafetybench: A chinese hierarchical safety benchmark for large language models. arXiv preprint arXiv:2406.10311, 2024b. +Wenting Zhao, Xiang Ren, Jack Hessel, Claire Cardie, Yejin Choi, and Yuntian Deng. Wildchat: 1m chatgpt interaction logs in the wild. arXiv preprint arXiv:2405.01470, 2024. +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Tianle Li, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zhuohan Li, Zi Lin, Eric P Xing, et al. Lmsys-chat-1m: A large-scale real-world llm conversation dataset. arXiv preprint arXiv:2309.11998, 2023. +Xuhui Zhou, Hao Zhu, Akhila Yerukola, Thomas Davidson, Jena D. Hwang, Swabha Swayamdipta, and Maarten Sap. COBRA frames: Contextual reasoning about effects and harms of offensive statements. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 6294–6315, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.392. URL https://aclanthology.org/2023-findings-acl.392/. + +# A Translation Pipeline and Quality Evaluation + +Our training data translation pipeline segments the source text into chunks using *blingfire*13, translates each chunk independently, and reconstructs the target text by concatenating the translated segments. We utilize this chunking approach to process long texts while respecting the context window constraints of translation models. + +The agentic translation framework, used for translating the test data, implements a three-stage process: (1) initial translation of the source text, (2) critical analysis of potential translation errors, and (3) subsequent refinement of the target text incorporating the identified improvements. This iterative approach helps maintain semantic accuracy and cultural nuances crucial for safety evaluation across languages. + +Automatic evaluation of translation quality is done by prompting GPT-4o as a judge. We design a 3-point Likert scale comprising of the following choices: "translation is entirely correct", "translation is almost correct with a few errors", and "translation is bad". We run evaluations for only the translated test set of PGMix to assess the overall translation quality of a model. The evaluation prompts are shown below: + +# System Prompt + +```txt +You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text`. +``` + +# User Prompt + +```verilog +`source_text`: {src} +`target_text`: {tgt} +``` + +Following is the format of structure generations for translation quality evaluation. We prompt the judge to first reason about the source and target sentences before outputting the verdict. + +```python +class QualityEnum(str, Enum): incorrect $=$ 'translation is bad' almost.correct $=$ 'translation is almost correct with a few errors' entirely.correct $=$ 'translation is entirely correct' +class Result(BaseModel): reason: str $=$ Field(description $\equiv$ "brief pointers on why the translation is correct or wrong") verdict: QualityEnum $=$ Field(description $\equiv$ "the verdict about the translation quality") +``` + +Tables 8 and 9 show the verdicts of the GPT-4o judge for the human prompt and model response respectively. We observe that TowerInstruct generates higher-quality translations when compared to NLLB for the languages it supports. However, in the case of Hindi (which is not supported by Tower), the quality is poor. + +
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB636688401-
Tower12023601621
ESNLLB1437218682
Tower1374303471
FRNLLB1406245722
Tower1499177472
DENLLB12753481011
Tower1335323661
KONLLB10754901582
Tower12783361092
ITNLLB1384260801
Tower144222756-
PTNLLB146320260-
Tower153214251-
NLNLLB1339306773
Tower139926462-
RUNLLB1379240106-
Tower1406233851
HINLLB147018669-
Tower72516912
+ +Table 8: GPT-4o Judge verdicts for human prompts translation. Takeaway: TowerInstruct generated more accurate translations than NLLB for supported languages. + +
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB15311474241
Tower822729174-
ESNLLB858426441-
Tower583105785-
FRNLLB883741101-
Tower481116381-
DENLLB811790124-
Tower625102872-
KONLLB72192084-
Tower7079161011
ITNLLB809566350-
Tower5291103921
PTNLLB8846232162
Tower4891131105-
NLNLLB8287721241
Tower5931049821
RUNLLB906663156-
Tower512112390-
HINLLB128641128
Tower611718
+ +Table 9: GPT-40 Judge verdicts for model generation translation. Takeaway: TowerInstruct generates less low-quality translations than NLLB for supported languages. + +# B Human Validation + +We use Prolific14 to collect annotations. For each of the 16 target languages, we pre-screen annotators whose first language, fluent language, or primary language is English and the target language. Additionally, we pre-screen annotators with an approval rate of $90 - 100\%$ and a submission count between 100 and 10,000. Annotators were compensated at the rate of $\$12/$ hr. Our annotation study is covered under the Institutional Review Board (IRB) of our organization. + +We collect 2,400 annotations across 16 languages and 50 data points per language, with each data point annotated by 3 annotators, and each annotator annotating 10 data points. We recruited 191 unique annotators15 via Prolific, spanning across 24 countries. They self-identified as 110 male and 81 female. In terms of ethnicity, they described themselves as 84 White, 79 Black, 12 Mixed, 10 Asian, and 5 Other. + +Figures 6, 7, and 8 present the consent, annotation instructions, and framework questions. The human validation results for each language are shown in Table 10. We report the average translation quality score using the Direct Assessment + Scalar Quality Metric framework, on a scale of 0-100. Inter-annotator agreement is computed using Krippendorff's $\alpha$ for both source and target language safety labels. + +
LanguageAvg. Trans- lation ScoreSource Safety αTarget Safety αSource - Target α
Arabic80.990.410.400.96
Chinese78.550.430.420.91
Czech81.110.470.480.96
Dutch77.150.370.330.96
French82.120.480.471.0
German82.670.440.450.92
Hindi84.720.340.370.96
Italian83.210.380.370.91
Japanese76.390.390.360.76
Korean81.550.430.460.96
Polish80.330.390.400.96
Portuguese81.090.460.450.92
Russian80.440.420.430.96
Spanish84.110.450.441.0
Swedish79.660.360.351.0
Thai78.890.410.420.92
+ +Table 10: Human validation results for translation quality and safety labels. Translation scores are on a 0-100 scale, using the DA+SQM framework. Inter-annotator agreement (Krippendorff's $\alpha$ ) for source and target safety labels is reported, along with agreement between majority-voted source and target labels. + +# C POLYGUARD Training Details + +We train our models using OPENRLHF $^{16}$ on 8 NVIDIA A6000 GPUs. We set LoRA rank to 8 and alpha to 16. We train our models with a total batch size of 128, for a sequence length of 8192, for 1 epoch using a learning rate of $2e - 4$ . The system and user prompts (adapted from WildGuard and Llama Guard v3) used by PG are as follows: + +Instruction and Consent + +# Background: + +Hi! We are a team of researchers who are passionate about making AI systems safer to use across multiple languages. We are trying to test out a few systems for automatically translating sentences across languages to see how well they preserve the original meaning and if they translate unsafe content as well. We appreciate your help in making AI systems safer across multiple cultures and languages! + +# Task Overview: + +In this study, we are interested in making AI systems safer to use in multiple languages. You will be presented with a sentence in English and its machine-generated translation in another language. Your task is to judge the quality of the translation, and if either the original sentence/translation contains any unsafe language. + +# Data collection & sharing: + +We will not ask you for your name, and the data collected in this study will be made identifiable to the best of our extent. We will securely store the data on our servers and only share with qualified researchers (e.g., who want to further the study of hate speech detection). If you later decide that you do not want your responses included in this study, please email so we can exclude your work from the analysis. + +# Contact: + +If you have any questions about this study, you should feel free to ask them by contacting us (via the MTurk interface or via email at: info@mturk.com). If you have questions later, desire additional information, or wish to withdraw your participation please contact e-mail in accordance with the contact information listed above. If you have questions pertaining to your rights as a research participant; or to report concerns to this study, you should contact + +Please do not use AI systems when answering the tasks. + +We will sample some of the responses and manually review them, if we find evidence to show that annotators are not giving proper efforts to this task, we will exclude them from future tasks. + +# I have read and understood the information above, are 18 years or older, and agree to participate in our study. + +Yes + +No + +Move forward + +Figure 6: Annotator Consent. Blacked out for anonymity. + +# Annotation Task + +You will be presented with 2 sentences: an source original sentence in English, and its automatic translation in Thai. Your task is to assess the quality of the translation and if the source/translated sentences contain unsafe language. + +# Quality of Translation + +Evaluate the quality of the translation in Thai. Consider both: + +- Meaning: Does the translation convey and reflect the same meaning as the source sentence in English? +- Fluency: Is the translation easy to read and understand? + +Rate the translation quality on a continuous scale from 0 to 6. + +# Rating Scale: + +0 - No Meaning Preserved: The translation is confusing, makes no sense, or is completely different from the English sentence. Most of the meaning and information is lost. +- 2 - Some Meaning Preserved: The translation captures only part of the meaning but misses key details. It may be hard to understand because of major mistakes like completely incorrect or irrelevant words or very poor grammar. +- 4 - Most Meaning Preserved: The translation retains most of the meaning of the English sentence -- the main idea is clear with just a few small mistakes, like minor grammar issues, awkward wording, or slight changes in meaning. +- 6 - Perfect Translation: The translation is clear, natural, and grammatically correct -- same meaning like the source sentence, with no mistakes. + +# Key Tip: + +- You can move the slider to any point on the scale, not just whole numbers (e.g., 5.3). + +# Identifying Unsafe Language + +Please check if the original sentence in English or the translation in Thai contain any unsafe or harmful language. + +# What Counts as Unsafe or Harmful Language? + +A sentence is considered unsafe or harmful if it supports, encourages, or facilitates harmful, illegal, or unethical actions. This includes BOTH: + +Content that is itself harmful: (e.g., promoting violence, supporting crimes, providing inappropriate sexual content, expressing hate speech towards Jews and Muslim communities, revealing confidential information or health records, encouraging misuse of technology). +Sentences that request harmful content: (e.g., making unauthorized requests to edit medical records and prescription, asking for instructions on illegal activities, ways to create or use dangerous weapons, promoting self-harm, ways to exploit children, how to spread false election information, even if the language seems neutral). + +Figure 7: Annotator instructions + +![](images/8c1ec5dc3465a48fd1833e0f2501ef6f8b1d046c8f4ff78cbdf04101d4148ee2.jpg) +Figure 8: Annotator Framework with Example in Thai + +![](images/23369c7221938247f171a9f65b265388e08de765f08367b5bf2d80d1302b1041.jpg) + +# User Prompt + +```txt +Human user: {prompt} AI assistant: {response} +``` + +We train our models $^{17}$ to generate formatted responses containing the user input prompt safety label, policies violated, model response refusal, model response safety label, and policies violated (shown below). Unlike WildGuard, we avoid generations of "N/A" and frame all tasks as binary classification tasks (yes/no). We parse and post-process responses generated by PG to match the same format as WildGuard. + +# POLYGON Generation + +```txt +Harmful request: yes/no +Request safety violations: None / List of violated categories +Response refusal: yes/no +Harmful response: yes/no +Response safety violations: None / List of violated categories +``` + +# D Out-of-Distribution Benchmarking Dataset Annotations + +In this section, we list the formulation of ground-truth labels for the out-of-distribution benchmarks. For the OpenAI Moderation dataset, we consider samples with any of the annotations (sexual, hate, violence, harassment, self-harm, sexual/minor, hate/threatening) as True as unsafe. For RTP-LX, we consider samples with a Toxicity score above 1 unsafe. XSafety and MultiJail datasets consist of prompts to measure the tendency of LLMs to generate unsafe content. Thus, a few prompts in these datasets are innocuous but could trigger an LLM to generate harmful content. Therefore, we use GPT-4o to determine the safety label of the samples. Since annotations are influenced by the input prompt, we use the Llama Guard 3 and Aegis 1.0 prompts to create two sets of ground-truth labels. + +# E Patronus AI Safety Study + +Patronus AI benchmarked Llama Guard 3 on a small number of samples (500) from various English and multilingual toxicity and safety datasets illustrating its poor recall of unsafe data points (PatronusAI, 2024). Their evaluation benchmark consists of the following datasets available on HuggingfaceHub: + +1. nicholasKluge/toxic-text-en +2. Arsive/toxicity_classification_jigsaw +3. ukr-detect/ukr-toxicity-dataset +4. tmu-nlp/thai_toxicity tweet +5. nicholasKluge/toxic-text-pt +6. lmsys/toxic-chat +7. PKU-Alignment/BeaverTails +8. OpenSafetyLab/Salad-Data + +# F Influence of low-quality translated data + +We distill GPT-4o's knowledge of translation quality into a Qwen2.5 7B classifier to filter out samples with low translation quality. We use the same schema as our translation quality study (Appendix A) to filter for samples where the human prompt and model response are accurately translated. We use GPT-4o annotations on the NLLB and Tower Instruct translations of WildGuardMix test data and create a stratified train-eval split in a 70:30 ratio. Similar to PG, we train a Qwen2.5-based SFT classifier to predict the quality of the translated source document, using the following prompts: + +# System Prompt + +```txt +You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text` +``` + +# User Prompt + +```python +`source_text': {source} +`target_text': {target} +``` + +The model is trained on 60,346 training samples and achieves an overall accuracy of $82\%$ on the validation set of 25,863 samples. A complete evaluation report is shown below in Table 11. + +
LabelPrecisionRecallF1Support
Bad7073712066
Partially Correct7663697704
Entirely Correct87939016093
+ +Table 11: Translation Quality Classifier performance metrics + +Removal of low-quality training data does not necessarily improve model performance. Intuitively, the presence of poor-quality translated data should harm model performance. However, PG models show contrastive trends when low-quality samples are removed from the training data mix (Figure 9). The performance of Qwen2.5 degrades for most datasets, whereas the performance of Ministrial improves. The performance degradation in the case of Qwen2.5 can be attributed to noisy samples in safety and toxicity evaluation datasets. Harmful text is considered to belong to low-quality data; web-crawls implement word blocklist filters to enhance data quality (Dodge et al., 2021). Thus, we hypothesize that the noise induced by poor translations bridges the gap between training and evaluation data, thus leading to performance improvement. + +# G Limitations + +We describe several limitations of our work. First, we automatically translate English data to other languages using LLMs. However, automatic translations can introduce deviations in toxicity and safety risks due to incorrect translations and hallucinations (Specia et al., 2021; Sharou & Specia, 2022; Team et al., 2022; Costa-jussa et al., 2023). Second, we employ existing safety classifiers and LLMs to automatically annotate safety violation categories, which may introduce biases from these models into our labeled safety categories. We utilize a panel of models to mitigate such biases, but acknowledge the inherent limitations of this methodology. Third, we follow Llama-Guard-3-8B (Dubey et al., 2024) and define + +![](images/e8d291c17038f0e90e663c6a3add7960cc90021d61231e732d1932d341e6d655.jpg) +Figure 9: Performance difference on removing low-quality data. Takeaway: Removal of low-quality training data does not necessarily improve model performance. + +our safety violation taxonomy according to the MLCommons Safety Taxonomy18. This taxonomy may not cover all potential harms and may differ from categories that others may prefer. Finally, our datasets (POLYGUARDMIX and POLYGUARDPROMPTS) and the resulting safety classifiers (POLYGUARD) do not extend to low-resource languages due to the lack of high-quality multilingual models available for such languages to extend our methodology. \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04377/images/01334081f210a9c5d39bed84a79543b78c394d85d18689b9e3b6b4e720660630.jpg b/data/2025/2504_04xxx/2504.04377/images/01334081f210a9c5d39bed84a79543b78c394d85d18689b9e3b6b4e720660630.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1bb769fcd982375fe68e553fefd67c1f46f873c6 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/01334081f210a9c5d39bed84a79543b78c394d85d18689b9e3b6b4e720660630.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a824ea4330354dc555119b4bc08a201356d098106371da688d68fd9a6a72037c +size 5352 diff --git a/data/2025/2504_04xxx/2504.04377/images/038615ea481e4df5198d2af6b4e813cc8c85be3aa566f2c629cf2b62c4e725fa.jpg b/data/2025/2504_04xxx/2504.04377/images/038615ea481e4df5198d2af6b4e813cc8c85be3aa566f2c629cf2b62c4e725fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9de77e5514045e952086fc68c237c7c4c988022 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/038615ea481e4df5198d2af6b4e813cc8c85be3aa566f2c629cf2b62c4e725fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88231393685434bcdbce0d5e2df251f5dfcd19353f92e57c4abc74e771e5107c +size 54495 diff --git a/data/2025/2504_04xxx/2504.04377/images/05148bca329f0174cfa52c7f72c148d82cffe6df95917784369a9249a0046357.jpg b/data/2025/2504_04xxx/2504.04377/images/05148bca329f0174cfa52c7f72c148d82cffe6df95917784369a9249a0046357.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cec34b51cee55887bc3154bb4359f065d7603020 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/05148bca329f0174cfa52c7f72c148d82cffe6df95917784369a9249a0046357.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65ab2d8079426f714fa44b3a2ad4db081c0fa07be9a013369e3c53b4d05a7e00 +size 81570 diff --git a/data/2025/2504_04xxx/2504.04377/images/07b1c8fbc3923a4e6aed477b493bd29125cec7df8131a0b52664d39ca90e3329.jpg b/data/2025/2504_04xxx/2504.04377/images/07b1c8fbc3923a4e6aed477b493bd29125cec7df8131a0b52664d39ca90e3329.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a18ea0daf3de77db0d96f40c73d2bae23aae936 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/07b1c8fbc3923a4e6aed477b493bd29125cec7df8131a0b52664d39ca90e3329.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2692a987759cfed437a1b6fcffae9e07404fdb327a21cce6095257b3de8f183b +size 1065 diff --git a/data/2025/2504_04xxx/2504.04377/images/14d692a5dd9fb9f6ca098b1d92717b3f4009134c5c31cf1e964e4a115286a3f3.jpg b/data/2025/2504_04xxx/2504.04377/images/14d692a5dd9fb9f6ca098b1d92717b3f4009134c5c31cf1e964e4a115286a3f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6bd2f306b410f37e954dc14f46b58739f8ccede7 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/14d692a5dd9fb9f6ca098b1d92717b3f4009134c5c31cf1e964e4a115286a3f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45da226d3dee6d09afcd770937b4784c05345236e942ca2e7ac55a3e3b176996 +size 1100 diff --git a/data/2025/2504_04xxx/2504.04377/images/23369c7221938247f171a9f65b265388e08de765f08367b5bf2d80d1302b1041.jpg b/data/2025/2504_04xxx/2504.04377/images/23369c7221938247f171a9f65b265388e08de765f08367b5bf2d80d1302b1041.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c80ced05ffb7fbf6973f63d81f2f51b1084f5c0 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/23369c7221938247f171a9f65b265388e08de765f08367b5bf2d80d1302b1041.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3abf62adc48bdce5fde68eeaf9480c6fac66501d86a7a18be83c0d175b3db4c +size 118046 diff --git a/data/2025/2504_04xxx/2504.04377/images/64f1eb9148050067cf1c5ad96d1ffffa8146bc4b4e995ba80a783bd2ca1b0b99.jpg b/data/2025/2504_04xxx/2504.04377/images/64f1eb9148050067cf1c5ad96d1ffffa8146bc4b4e995ba80a783bd2ca1b0b99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4db4cf002ac0fe06c49b7f03bab8a15adbefb113 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/64f1eb9148050067cf1c5ad96d1ffffa8146bc4b4e995ba80a783bd2ca1b0b99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7bc6065c0ff7f038a4b7f016db45ce64493fb8871a925a37cd4698ac14277eea +size 24225 diff --git a/data/2025/2504_04xxx/2504.04377/images/65c3265b462c4ea8c93985a5838b37d9d9f2ecd49b1593fa2b9e5cf51fe3081e.jpg b/data/2025/2504_04xxx/2504.04377/images/65c3265b462c4ea8c93985a5838b37d9d9f2ecd49b1593fa2b9e5cf51fe3081e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9dfd414a5a9c3a469afeff4f0df85b148b0e2270 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/65c3265b462c4ea8c93985a5838b37d9d9f2ecd49b1593fa2b9e5cf51fe3081e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8115d7f7a47b18b7cb02e5e094e1b49c4a135a7d40be93360ff47ca4d8a1fba +size 109364 diff --git a/data/2025/2504_04xxx/2504.04377/images/7aa7eb18842bdeb0a90f34bf39ebc26933d8984b21590840eab966e47578f07f.jpg b/data/2025/2504_04xxx/2504.04377/images/7aa7eb18842bdeb0a90f34bf39ebc26933d8984b21590840eab966e47578f07f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78ea2215fd13abbdc1d2fb15977a489f15864e95 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/7aa7eb18842bdeb0a90f34bf39ebc26933d8984b21590840eab966e47578f07f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ede19208184b3724874a3ae8334f1cca8cee35345cba0bf3c925b97a0313d77 +size 54247 diff --git a/data/2025/2504_04xxx/2504.04377/images/7c06a04f5905654a8188566a61f60456844d509c5f416f57e789d1529a3ba559.jpg b/data/2025/2504_04xxx/2504.04377/images/7c06a04f5905654a8188566a61f60456844d509c5f416f57e789d1529a3ba559.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18dd500a18283e4b2dd3eab7bc2d1a302d15b8b4 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/7c06a04f5905654a8188566a61f60456844d509c5f416f57e789d1529a3ba559.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36d5cc36097eb26ce69ec42ebe6ef56f3c5222d4628640ee8f9545d7205ba869 +size 113642 diff --git a/data/2025/2504_04xxx/2504.04377/images/86d7b5bd1f6dc6ae24ed987b871645b581f681659f8d1f3bb052cbaa5de2d04c.jpg b/data/2025/2504_04xxx/2504.04377/images/86d7b5bd1f6dc6ae24ed987b871645b581f681659f8d1f3bb052cbaa5de2d04c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..682c8655cb0c18e755f69a8c3a00c47b690fb62a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/86d7b5bd1f6dc6ae24ed987b871645b581f681659f8d1f3bb052cbaa5de2d04c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96a6357e2fa11f91afacf84424dc6841078f1f9864b713b18067401c180df0a7 +size 18393 diff --git a/data/2025/2504_04xxx/2504.04377/images/8b8df921c7157f55eb7b46dc8c5f5a2e939ccc7fd21b159503d855dc7d135b5f.jpg b/data/2025/2504_04xxx/2504.04377/images/8b8df921c7157f55eb7b46dc8c5f5a2e939ccc7fd21b159503d855dc7d135b5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f368f6faac0ca2ad8cc73d561cc8ca38f9d8e69f --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/8b8df921c7157f55eb7b46dc8c5f5a2e939ccc7fd21b159503d855dc7d135b5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d5efb3f4d65988a06b9db89026776aade141e0b5d0c974f25500c50b28e2a52 +size 79814 diff --git a/data/2025/2504_04xxx/2504.04377/images/8c1ec5dc3465a48fd1833e0f2501ef6f8b1d046c8f4ff78cbdf04101d4148ee2.jpg b/data/2025/2504_04xxx/2504.04377/images/8c1ec5dc3465a48fd1833e0f2501ef6f8b1d046c8f4ff78cbdf04101d4148ee2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41fb1dbaf990d6da601a25695c91ed8716f57712 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/8c1ec5dc3465a48fd1833e0f2501ef6f8b1d046c8f4ff78cbdf04101d4148ee2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:036bea9790d19d4c1b98f550a6c19b3f4d46c3cbebf16a63a287b9d353a25185 +size 49564 diff --git a/data/2025/2504_04xxx/2504.04377/images/c55222dbf21f2abff079f48af27aa803b24e7526403aa98dc764f985f5738d93.jpg b/data/2025/2504_04xxx/2504.04377/images/c55222dbf21f2abff079f48af27aa803b24e7526403aa98dc764f985f5738d93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d11cb2285d704a0fda930cc30a08678c4bf01631 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/c55222dbf21f2abff079f48af27aa803b24e7526403aa98dc764f985f5738d93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc5bdabfb76b3ecdf382f7f36face0e702afe32f362b13a2bed4f05adb3105f0 +size 109514 diff --git a/data/2025/2504_04xxx/2504.04377/images/c8c5b104c08252fb9df60205d9c2185c775a21889177f63de0f3b67525d663a0.jpg b/data/2025/2504_04xxx/2504.04377/images/c8c5b104c08252fb9df60205d9c2185c775a21889177f63de0f3b67525d663a0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5881dfabd3aecb9f1e157d89b91c239b9729f5d --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/c8c5b104c08252fb9df60205d9c2185c775a21889177f63de0f3b67525d663a0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0b93365579f872c9271d1728b3b16a3536d32927e48479e345419e1177d35b2 +size 19756 diff --git a/data/2025/2504_04xxx/2504.04377/images/d5b00f408637d7cf518f8a12ec5653f7af824060843b2097368f54d9d0e1a04b.jpg b/data/2025/2504_04xxx/2504.04377/images/d5b00f408637d7cf518f8a12ec5653f7af824060843b2097368f54d9d0e1a04b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..494db8df096e1f78052e4465b07a33cab44dd0e2 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/d5b00f408637d7cf518f8a12ec5653f7af824060843b2097368f54d9d0e1a04b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:978cfccfa717d6ede3dd5a96ebef41e08eea0f81b743d7eaf42d6e302cf3e47c +size 21327 diff --git a/data/2025/2504_04xxx/2504.04377/images/d98006c005474071287e277efe122673eb39e0c87bc1b6a8e8b92e50ace4d38d.jpg b/data/2025/2504_04xxx/2504.04377/images/d98006c005474071287e277efe122673eb39e0c87bc1b6a8e8b92e50ace4d38d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71bea3bf2187f0f37fe63f0e9fb1c0835c423714 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/d98006c005474071287e277efe122673eb39e0c87bc1b6a8e8b92e50ace4d38d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0652a98368892d75148fc85db9f549a4e5cf9c2a0a773e3944121165478bdb17 +size 82820 diff --git a/data/2025/2504_04xxx/2504.04377/images/da2dedc8d353ba756792c421a28eb3d58a44bc6e99849e62e8c2dfddfcef3244.jpg b/data/2025/2504_04xxx/2504.04377/images/da2dedc8d353ba756792c421a28eb3d58a44bc6e99849e62e8c2dfddfcef3244.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d83fff4cb9fd72e1f7e2c8acf8051d44a4a51be3 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/da2dedc8d353ba756792c421a28eb3d58a44bc6e99849e62e8c2dfddfcef3244.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f846febe7fb1359f916c92ed0ac770447eab97dfb825ce0c8cfea6a3c162f191 +size 32208 diff --git a/data/2025/2504_04xxx/2504.04377/images/de2a11dde32c0308543546988d5a694ba9a7d72072127e3414ddf4f43b0c5fca.jpg b/data/2025/2504_04xxx/2504.04377/images/de2a11dde32c0308543546988d5a694ba9a7d72072127e3414ddf4f43b0c5fca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d6977703c181c1270b43a760981901634a10fd0 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/de2a11dde32c0308543546988d5a694ba9a7d72072127e3414ddf4f43b0c5fca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b89f3e98669efdf06bf9068ee4a6aa9fdf6d5d3999d7234c3a0ecef5556a21e +size 46625 diff --git a/data/2025/2504_04xxx/2504.04377/images/e8d291c17038f0e90e663c6a3add7960cc90021d61231e732d1932d341e6d655.jpg b/data/2025/2504_04xxx/2504.04377/images/e8d291c17038f0e90e663c6a3add7960cc90021d61231e732d1932d341e6d655.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6de17b639d5e4bf2c778c64d3a4ee41c3128133 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/e8d291c17038f0e90e663c6a3add7960cc90021d61231e732d1932d341e6d655.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1138060bde059a1a86a887a3cb6cba282a13f678397d3217a9044d61f12b97db +size 51120 diff --git a/data/2025/2504_04xxx/2504.04377/images/f2520c3d4db2dc1f554f4fef2eb312adfe3cdc4cd29305c0f48bc3f31bda42b9.jpg b/data/2025/2504_04xxx/2504.04377/images/f2520c3d4db2dc1f554f4fef2eb312adfe3cdc4cd29305c0f48bc3f31bda42b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4336be70bd0a44caa7c6331e6bfdf74ef1c0d96 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/f2520c3d4db2dc1f554f4fef2eb312adfe3cdc4cd29305c0f48bc3f31bda42b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40372cb8327ddcc3d14d5c990067fa6e2b9012eba250293df44b78a93b1a100b +size 71839 diff --git a/data/2025/2504_04xxx/2504.04377/images/f9f0c1c9bd1f15fc16731e2aa7d66b6c071d5ab63757f5412e1f1c2f8fd487e7.jpg b/data/2025/2504_04xxx/2504.04377/images/f9f0c1c9bd1f15fc16731e2aa7d66b6c071d5ab63757f5412e1f1c2f8fd487e7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d0487e1f6311a27d0d5c104c3955c87e0e7b730 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/images/f9f0c1c9bd1f15fc16731e2aa7d66b6c071d5ab63757f5412e1f1c2f8fd487e7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:650f5bebaa8e4755a25dcbe2c06a919716a288a65b72e283657eb93152fd32f8 +size 30467 diff --git a/data/2025/2504_04xxx/2504.04377/layout.json b/data/2025/2504_04xxx/2504.04377/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..50a982da2142ebcf370da67704e31a83c6a1cb51 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04377/layout.json @@ -0,0 +1,12163 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 137, + 69, + 192, + 125 + ], + "blocks": [ + { + "bbox": [ + 137, + 69, + 192, + 125 + ], + "lines": [ + { + "bbox": [ + 137, + 69, + 192, + 125 + ], + "spans": [ + { + "bbox": [ + 137, + 69, + 192, + 125 + ], + "type": "image", + "image_path": "01334081f210a9c5d39bed84a79543b78c394d85d18689b9e3b6b4e720660630.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 217, + 79, + 445, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 79, + 445, + 114 + ], + "spans": [ + { + "bbox": [ + 217, + 79, + 445, + 114 + ], + "type": "text", + "content": "PolyGuard: A Multilingual Safety Moderation Tool for 17 Languages" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "spans": [ + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "type": "text", + "content": "Priyanshu Kumar" + }, + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "type": "inline_equation", + "content": "^{\\text{♥1}}" + }, + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "type": "text", + "content": " Devansh Jain" + }, + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "type": "inline_equation", + "content": "^{\\text{♥1}}" + }, + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "type": "text", + "content": " Akhila Yerukola" + }, + { + "bbox": [ + 168, + 142, + 441, + 157 + ], + "type": "inline_equation", + "content": "^{\\text{♥}}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 171, + 489, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 171, + 489, + 186 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 489, + 186 + ], + "type": "text", + "content": "Liwei Jiang\\* Himanshu Beniwal△ " + }, + { + "bbox": [ + 121, + 171, + 489, + 186 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 121, + 171, + 489, + 186 + ], + "type": "text", + "content": " Thomas Hartvigsen Maarten Sap" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "spans": [ + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "text", + "content": "Carnegie Mellon University " + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "inline_equation", + "content": "\\spadesuit" + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "text", + "content": "University of Washington " + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "inline_equation", + "content": "\\triangle" + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "text", + "content": "IIT Gandhinagar " + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "text", + "content": "University of Virginia " + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "inline_equation", + "content": "\\clubsuit" + }, + { + "bbox": [ + 126, + 194, + 485, + 223 + ], + "type": "text", + "content": "Allen Institute for AI" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 280, + 241, + 331, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 241, + 331, + 254 + ], + "spans": [ + { + "bbox": [ + 280, + 241, + 331, + 254 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 266, + 471, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 266, + 471, + 476 + ], + "spans": [ + { + "bbox": [ + 140, + 266, + 471, + 476 + ], + "type": "text", + "content": "Truly multilingual safety moderation efforts for Large Language Models (LLMs) have been hindered by a narrow focus on a small set of languages (e.g., English, Chinese) as well as a limited scope of safety definition, resulting in significant gaps in moderation capabilities. To bridge these gaps, we release POLYGUARD, a new state-of-the-art multilingual safety model for safeguarding LLM generations, and the corresponding training and evaluation datasets. POLYGUARD is trained on POLYGUARDMIX, the largest multilingual safety training corpus to date containing 1.91M samples across 17 languages (e.g., Chinese, Czech, English, Hindi). We also introduce POLYGUARDPROMPTS, a high quality multilingual benchmark with 29K samples for the evaluation of safety guardrails. Created by combining naturally occurring multilingual human-LLM interactions and human-verified machine translations of an English-only safety dataset (WildGuardMix; Han et al., 2024), our datasets contain prompt-output pairs with labels of prompt harmfulness, response harmfulness, and response refusal. Through extensive evaluations across multiple safety and toxicity benchmarks, we demonstrate that POLYGUARD outperforms existing state-of-the-art open-weight and commercial safety classifiers by " + }, + { + "bbox": [ + 140, + 266, + 471, + 476 + ], + "type": "inline_equation", + "content": "5.5\\%" + }, + { + "bbox": [ + 140, + 266, + 471, + 476 + ], + "type": "text", + "content": ". Our contributions advance efforts toward safer multilingual LLMs for all global users." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 233, + 483, + 249, + 494 + ], + "blocks": [ + { + "bbox": [ + 233, + 483, + 249, + 494 + ], + "lines": [ + { + "bbox": [ + 233, + 483, + 249, + 494 + ], + "spans": [ + { + "bbox": [ + 233, + 483, + 249, + 494 + ], + "type": "image", + "image_path": "07b1c8fbc3923a4e6aed477b493bd29125cec7df8131a0b52664d39ca90e3329.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 267, + 483, + 365, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 483, + 365, + 495 + ], + "spans": [ + { + "bbox": [ + 267, + 483, + 365, + 495 + ], + "type": "text", + "content": "PolyGuard Collection" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 233, + 495, + 248, + 507 + ], + "blocks": [ + { + "bbox": [ + 233, + 495, + 248, + 507 + ], + "lines": [ + { + "bbox": [ + 233, + 495, + 248, + 507 + ], + "spans": [ + { + "bbox": [ + 233, + 495, + 248, + 507 + ], + "type": "image", + "image_path": "14d692a5dd9fb9f6ca098b1d92717b3f4009134c5c31cf1e964e4a115286a3f3.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 256, + 495, + 376, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 495, + 376, + 508 + ], + "spans": [ + { + "bbox": [ + 256, + 495, + 376, + 508 + ], + "type": "text", + "content": "kpriyanshu256/polyguard" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 526, + 195, + 539 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 526, + 195, + 539 + ], + "spans": [ + { + "bbox": [ + 105, + 526, + 195, + 539 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 552, + 506, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 631 + ], + "type": "text", + "content": "Recent advances in large language models (LLMs), especially their multilingual capabilities, have led to their deployment to a diverse global user base that spans multiple languages. Despite this global reach, safety research has focused primarily on the English language (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024), exposing global users to potential safety risks such as harmful content and privacy violations. For instance, studies have shown that multilingual models are more likely to generate hate speech, disinformation, and harmful content when prompted in non-English languages (Kotha et al., 2023; Jain et al., 2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 634, + 506, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 714 + ], + "type": "text", + "content": "The development of robust multilingual safety systems presents several key challenges. First, building multilingual systems is inherently difficult due to challenges such as the lack of comprehensive datasets, the \"curse of multilinguality\" (Aharoni et al., 2019; Conneau et al., 2020; Gurgurov et al., 2024), and the inherent biases embedded in training corpora (Xu et al., 2024). Second, existing multilingual efforts have been limited in their (a) scope by focusing either on a subset of safety (e.g., PerspectiveAPI covering only toxicity, ignoring other unsafe content) and/or on a narrow set of language coverage (e.g., Llama-Guard-1" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 351, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 351, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 351, + 732 + ], + "type": "text", + "content": "1Equal contributors, correspondence at msap2@cs.cmu.edu." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 14, + 223, + 37, + 567 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 223, + 37, + 567 + ], + "spans": [ + { + "bbox": [ + 14, + 223, + 37, + 567 + ], + "type": "text", + "content": "arXiv:2504.04377v2 [cs.CL] 7 Aug 2025" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "only covering English safety, ignoring toxicity and DuoGuard being evaluated on 4 very high resource languages only; Inan et al., 2023; Jain et al., 2024; Deng et al., 2025), or (b) performance (e.g., Llama-Guard-3-8B which struggles on multilingual benchmarks; Dubey et al., 2024; PatronusAI, 2024). Finally, most existing safety frameworks address only the single task of classifying safety and often rely on simplistic binary settings (safe/unsafe), which fail to capture the complex spectrum of harmful content that can manifest differently across cultural and linguistic contexts (Sap et al., 2020; Zhou et al., 2023)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 164, + 506, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 164, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 164, + 506, + 200 + ], + "type": "text", + "content": "To address these gaps, we release POLYGUARD (PG), a new state-of-the-art fine-tuned language model for multi-task safety detection and moderation. As Figure 1 highlights, PG can classify a multilingual input of a user prompt and an LLM response on five dimensions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 204, + 298, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 204, + 298, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 204, + 298, + 370 + ], + "type": "text", + "content": "We also release the first large-scale multilingual corpora for safety detection training, POLYGUARDMIX (PGMix) and safety guardrail evaluation, POLYGUARD-PROMPTS (PGPrompts), comprising 1.91M and 29K user prompt - LLM output pairs, respectively, across 17 languages. Our datasets contain binary and categorical labels for prompt harmfulness and response harmfulness, and response refusal (if the LLM response complies with the user request). We use a systematic labeling process that leverages a panel of English safety classifiers and LLM-as-a-judge (proprietary and open-weight LLM) to obtain these labels." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 374, + 298, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 298, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 298, + 496 + ], + "type": "text", + "content": "We create our PGMix dataset by combining both: (a) naturally occurring multilingual human-LLM interactions from In-The-Wild (ITW) datasets, and (b) machine translations of WildGuardMix (Han et al., 2024), to ensure data diversity which is crucial for improved model performance (Davani et al., 2024). We utilize multiple LLMs to ensure high-quality translations of WildGuardMix, verified by a high average translation score of 81.15 as rated by human annotators." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 500, + 298, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 500, + 298, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 500, + 298, + 544 + ], + "type": "text", + "content": "We then use PGMix to train our state-of-the-art POLYGUARD (PG) models, including a fast lightweight model for application use cases. Our empirical results show that PG" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 544, + 504, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 504, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 504, + 590 + ], + "type": "text", + "content": "outperforms existing open-source and proprietary safety detectors on English-only as well as multilingual safety and toxicity benchmarks. Furthermore, we find that the incorporation of ITW samples in the training datasets makes PG models more robust to various data distributions, including code-switched and translated data." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 595, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 595, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 104, + 595, + 504, + 619 + ], + "type": "text", + "content": "Overall, our datasets and models2 serve as a starting point for building powerful and robust multilingual safety detectors and advance efforts towards multilingual safe AI systems." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 637, + 168, + 649 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 168, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 168, + 649 + ], + "type": "text", + "content": "2 Dataset" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 663, + 506, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 663, + 506, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 663, + 506, + 709 + ], + "type": "text", + "content": "To address the critical need for multilingual safety detection, we introduce POLYGUARDMIX (PGMix) and POLYGUARDPROMPTS (PGPrompts), multilingual datasets specifically designed to train and evaluate robust safety classifiers. PGMix comprises 1.91M human-LLM interactions, including 1.47M machine-translated samples from WildGuardMix and 0.43M" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 719, + 373, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 719, + 373, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 719, + 373, + 731 + ], + "type": "text", + "content": "2Model, code, and data are available under the ODC-BY license." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "naturally-occurring samples from In-The-Wild datasets, whereas PGPrompts comprises 29K translated samples." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 167 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 167 + ], + "type": "text", + "content": "Our datasets cover 17 languages: Arabic (ar), Chinese (zh), Czech (cs), Dutch (nl), English (en), French (fr), German (de), Hindi (hi), Thai (th), Italian (it), Japanese (ja), Korean (ko), Polish (pl), Portuguese (pt), Russian (ru), Spanish (es), and Swedish (sv). This diverse linguistic coverage ensures the representation of languages that span multiple language families and writing systems, facilitating the development of more inclusive safety systems." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "content": "Figure 2 shows an overview of our data curation pipeline, whose components we describe in detail in the following subsections." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 208, + 190, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 208, + 190, + 219 + ], + "spans": [ + { + "bbox": [ + 105, + 208, + 190, + 219 + ], + "type": "text", + "content": "2.1 Data Sources" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 228, + 242, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 228, + 242, + 395 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 242, + 395 + ], + "type": "text", + "content": "Both PGMix and PGPrompts are constructed from the train and test samples of WildGuardMix (Han et al., 2024), a dataset of synthetic and natural single-turn human-LLM interactions with fine-grained annotations, respectively. In addition, PGMix also contains samples from In-TheWild datasets: LMSys-Chat1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024). We posit that the combination of natural and synthetic sam" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 256, + 233, + 494, + 323 + ], + "blocks": [ + { + "bbox": [ + 256, + 233, + 494, + 323 + ], + "lines": [ + { + "bbox": [ + 256, + 233, + 494, + 323 + ], + "spans": [ + { + "bbox": [ + 256, + 233, + 494, + 323 + ], + "type": "image", + "image_path": "64f1eb9148050067cf1c5ad96d1ffffa8146bc4b4e995ba80a783bd2ca1b0b99.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 332, + 506, + 389 + ], + "lines": [ + { + "bbox": [ + 246, + 332, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 246, + 332, + 506, + 389 + ], + "type": "text", + "content": "Figure 2: Data curation process for PGMix (safety detection training) and PGPrompts (safety guardrail evaluation). Takeaway: PGMix combines machine-translated and naturally occurring data to improve data diversity and, consequently, model performance." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 395, + 504, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 395, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 395, + 504, + 417 + ], + "type": "text", + "content": "plies improves the diversity of data and consequently improves model performance (Davani et al., 2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 431, + 265, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 431, + 265, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 431, + 265, + 443 + ], + "type": "text", + "content": "2.2 Machine Translation Pipeline" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 452, + 506, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 452, + 506, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 506, + 563 + ], + "type": "text", + "content": "We develop an efficient machine translation pipeline using open-weight models to minimize computational costs when translating WildGuardMix for our training data. We employ two state-of-the-art translation models: TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022). For optimal performance, we utilize TowerInstruct-7B-v0.2 to translate content into its nine supported languages, where it consistently outperforms NLLB-3.3B. We then leverage NLLB-3.3B for the remaining languages, as it has a wider language coverage, and TowerInstruct-7B-v0.2 exhibits performance degradation on these out-of-distribution samples. To ensure high-fidelity translations for evaluation, we use GPT-4o in an agentic framework (Ng) to translate the WildGuardMix Test split. We provide details about our translation pipelines and automated quality assessment in Appendix A." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 577, + 214, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 577, + 214, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 214, + 588 + ], + "type": "text", + "content": "2.3 Safety Annotation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 597, + 506, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 690 + ], + "type": "text", + "content": "We leverage a panel of English safety classifiers and LLM-as-judges to annotate safety violation categories automatically. We follow Llama-Guard-3-8B (Dubey et al., 2024) and define our safety violation taxonomy according to the MLCommons Safety Taxonomy4. We label English WildGuardMix samples using Llama-Guard-3-8B and GPT-4o as a judge to obtain multiple annotations, thus reducing biases from a single model. Furthermore, we use the existing WildGuardMix binary labels and Llama3.1-405B-Instruct (Dubey et al., 2024) as a judge to resolve conflicts and obtain the final annotations5. Finally, since PGMix and PGPrompts contain translations of WildGuardMix, we propagate safety labels from the" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 698, + 403, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 698, + 403, + 709 + ], + "spans": [ + { + "bbox": [ + 116, + 698, + 403, + 709 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 116, + 698, + 403, + 709 + ], + "type": "text", + "content": "WildChat-1M is available for modifications under the ODC-BY license." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 710, + 359, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 359, + 720 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 359, + 720 + ], + "type": "text", + "content": "4https://mlcommons.org/2024/04/mlc-aisafety-v0-5-poc/" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 720, + 395, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 720, + 395, + 732 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 395, + 732 + ], + "type": "text", + "content": "5We use the same prompt as Llama-Guard-3-8B for all LLM-as-judges." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 82, + 294, + 248 + ], + "blocks": [ + { + "bbox": [ + 107, + 82, + 294, + 248 + ], + "lines": [ + { + "bbox": [ + 107, + 82, + 294, + 248 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 294, + 248 + ], + "type": "image", + "image_path": "da2dedc8d353ba756792c421a28eb3d58a44bc6e99849e62e8c2dfddfcef3244.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 258, + 506, + 316 + ], + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 316 + ], + "type": "text", + "content": "Figure 3: Safety category distribution for user prompts and model responses for WildGuard-Mix train samples. The model name (GPT-4o and Llama-Guard-3-8B) represents the LLM used as a judge to automatically annotate the safety category. These annotations are then ensembled together, using Llama3.1-405B-Instruct to break ties (Combined). Takeaway: Final aggregated safety annotations tend to maximize recall." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 312, + 82, + 500, + 248 + ], + "blocks": [ + { + "bbox": [ + 312, + 82, + 500, + 248 + ], + "lines": [ + { + "bbox": [ + 312, + 82, + 500, + 248 + ], + "spans": [ + { + "bbox": [ + 312, + 82, + 500, + 248 + ], + "type": "image", + "image_path": "f9f0c1c9bd1f15fc16731e2aa7d66b6c071d5ab63757f5412e1f1c2f8fd487e7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 338, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 373 + ], + "type": "text", + "content": "annotated English samples to other languages. ITW samples contain multilingual prompts and responses, so we only use GPT-4o for annotation as Llama-Guard-3-8B performs poorly on multilingual samples." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 376, + 506, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 506, + 478 + ], + "type": "text", + "content": "Figure 3 illustrates the distribution of safety categories across both user prompt harmfulness and model response harmfulness, comparing annotations from Llama-Guard-3-8B, GPT-4o, and our final consolidated labels. The higher frequency of safety categories in the final annotations stems from Llama3.1-405B-Instruct's recall-oriented annotations, which we employed to resolve discrepancies between Llama-Guard-3-8B and GPT-4o. Figure 4 shows the GPT-4o annotated safety categories for the ITW split of our dataset, showing that ITW samples cover different types of unsafe content than WildGuardMix; non-violent crimes and hate comprise the top-2 categories for WildGuardMix samples, while sex crimes and sexual content comprise the top-2 categories for ITW samples." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 493, + 216, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 216, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 216, + 504 + ], + "type": "text", + "content": "2.4 Human Validation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 514, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 504, + 583 + ], + "type": "text", + "content": "To validate the translation quality and the generated safety labels, we conduct human validation across all 16 languages. Due to budget constraints, we randomly sample 50 data points per language, ensuring a balanced distribution across PGMix (train) and PGPrompts (test), harmful and harmless labels, as well as user prompts and model responses. We recruit workers from Prolific, filtering them based on their proficiency in each language. Each data point is evaluated by three annotators." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 588, + 398, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 588, + 398, + 601 + ], + "spans": [ + { + "bbox": [ + 105, + 588, + 398, + 601 + ], + "type": "text", + "content": "For each data point, we ask the annotators to assess the following." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 610, + 506, + 702 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 129, + 610, + 504, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 610, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 129, + 610, + 504, + 644 + ], + "type": "text", + "content": "1. Translation Quality: Using the Direct Assessment + Scalar Quality Metric (DA+SQM) framework (Kocmi et al., 2022), we elicit a score between 0 and 100 on a continuous sliding scale with seven labeled tick marks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 127, + 650, + 505, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 650, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 127, + 650, + 505, + 673 + ], + "type": "text", + "content": "2. Safety Label for the Source Sentence: Annotators assign a label of either 'harmful' or 'safe' for the source sentence in English." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 127, + 678, + 506, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 678, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 127, + 678, + 506, + 702 + ], + "type": "text", + "content": "3. Safety Label for the Translated Sentence: Annotators assign a 'harmful' or 'safe' label for the corresponding translation." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 720, + 233, + 731 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 720, + 233, + 731 + ], + "spans": [ + { + "bbox": [ + 117, + 720, + 233, + 731 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 117, + 720, + 233, + 731 + ], + "type": "text", + "content": "https://www.prolific.com" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 351, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 351, + 214 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 351, + 214 + ], + "type": "text", + "content": "Annotators rated translation quality to be high, with an average score of 81.15 across all 16 languages. The inter-annotator agreement, averaged across all 16 languages, for both source and translated sentence safety labels yielded a Krippendorff's " + }, + { + "bbox": [ + 104, + 82, + 351, + 214 + ], + "type": "inline_equation", + "content": "\\alpha = 0.46" + }, + { + "bbox": [ + 104, + 82, + 351, + 214 + ], + "type": "text", + "content": ". Furthermore, the agreement between the majority-voted source and target safety labels is high, with an average Krippendorff's " + }, + { + "bbox": [ + 104, + 82, + 351, + 214 + ], + "type": "inline_equation", + "content": "\\alpha = 0.94" + }, + { + "bbox": [ + 104, + 82, + 351, + 214 + ], + "type": "text", + "content": ", indicating that the translations effectively preserved the original intent of the English source data. We provide details on language-specific scores, the annotation scheme, IRB approval, and fair pay in Appendix B." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 362, + 60, + 499, + 188 + ], + "blocks": [ + { + "bbox": [ + 362, + 60, + 499, + 188 + ], + "lines": [ + { + "bbox": [ + 362, + 60, + 499, + 188 + ], + "spans": [ + { + "bbox": [ + 362, + 60, + 499, + 188 + ], + "type": "image", + "image_path": "86d7b5bd1f6dc6ae24ed987b871645b581f681659f8d1f3bb052cbaa5de2d04c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 353, + 197, + 506, + 220 + ], + "lines": [ + { + "bbox": [ + 353, + 197, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 353, + 197, + 506, + 220 + ], + "type": "text", + "content": "Figure 4: Safety category distributions for PGMix ITW samples." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 230, + 418, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 230, + 418, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 418, + 245 + ], + "type": "text", + "content": "3 POLYGUARD: A 17-Language Safety Moderation Tool" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 255, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 255, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 506, + 310 + ], + "type": "text", + "content": "To build POLYGUARD, we fine-tune Qwen2.5-7B-Instruct (Yang et al., 2024a) and Ministral-8B-Instruct-2410, both of which have been shown to have state-of-the-art performance in multilingual knowledge and commonsense, code, and math settings (Qwen; Mistral). We refer to these models as PG Qwen2.5 and PG Ministral In addition, we also fine-tune Qwen2.5-0.5B-Instruct to build PG Smol." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 315, + 506, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 506, + 395 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 506, + 395 + ], + "type": "text", + "content": "The models are fine-tuned on PGMix using Low-Rank Adapters (Hu et al., 2022). We follow Han et al. (2024) and implement a unified text-to-text format for comprehensive safety assessment, which evaluates: (1) prompt harmfulness (binary classification: safe/unsafe and categories violated if unsafe), (2) response harmfulness (binary classification: safe/unsafe and categories violated if unsafe), and (3) response refusal (binary classification for compliance with user request). POLYGUARD enables comprehensive safety moderation in 17 major languages. We provide detailed training specifications in Appendix C." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 408, + 291, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 408, + 291, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 408, + 291, + 422 + ], + "type": "text", + "content": "4 Results & Research Questions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 433, + 506, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 433, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 433, + 506, + 501 + ], + "type": "text", + "content": "A multilingual system must be robust; that is, it should perform consistently on data belonging to different distributions (sources and languages). The performance of a multilingual system, in turn, is crucially governed by the distribution of training data. Hence, we study the performance of POLYGUARD on POLYGUARDPROMPTS and multiple out-of-distribution evaluation benchmarks, and the influence of ITW samples and low-quality translations on model performance. We perform one run per evaluation due to computational constraints." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 506, + 505, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 505, + 564 + ], + "type": "text", + "content": "Baselines: We compare POLYGUARD with popular open-source safety detection models of similar size (Yang et al., 2024b), namely Llama-Guard-2 (Team, 2024), Llama-Guard-3-8B (Dubey et al., 2024), Aegis 1.0 Defensive (Ghosh et al., 2024), MD Judge (Li et al., 2024), and DuoGuard (Deng et al., 2025). We also benchmark proprietary models, namely Perspective API7, OpenAI Omni Moderation8, and Google Moderation9." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 576, + 477, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 576, + 477, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 576, + 477, + 590 + ], + "type": "text", + "content": "4.1 How do PG models perform on the in-distribution PGPrompts benchmark?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 597, + 506, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 664 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 664 + ], + "type": "text", + "content": "We first evaluate PG and open-source baselines on POLYGUARDPROMPTs benchmark, comprising 29K samples, using the following metrics: (1) for binary tasks of prompt harmfulness, response harmfulness, and response refusal, we use F1 score for the positive label (unsafe for harmfulness and yes for response refusal), and (2) for the tasks of prompt violations and response violations, we compare the list of ground truth and predicted categories using Exact Match and Jaccard Similarity." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 669, + 506, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 506, + 693 + ], + "type": "text", + "content": "PG models based on Qwen2.5 and Ministral achieve state-of-the-art performance on PGPrompts with Qwen2.5 performing marginally better. PG Smol outperforms DuoGuard," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 698, + 246, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 698, + 246, + 709 + ], + "spans": [ + { + "bbox": [ + 116, + 698, + 246, + 709 + ], + "type": "text", + "content": "7https://perspectiveapi.com/" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 710, + 403, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 403, + 720 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 403, + 720 + ], + "type": "text", + "content": "8https://platform.openai.com/docs/models/omni-moderation-latest" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 720, + 403, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 720, + 403, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 720, + 403, + 731 + ], + "type": "text", + "content": "9https://cloud.google.com/natural-language/docs/moderating-text" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 79, + 504, + 206 + ], + "blocks": [ + { + "bbox": [ + 108, + 79, + 504, + 206 + ], + "lines": [ + { + "bbox": [ + 108, + 79, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 108, + 79, + 504, + 206 + ], + "type": "table", + "html": "
ModelHarmful Request F1 ScoreResponse Refusal F1 ScoreHarmful Response F1 ScorePrompt Safety ViolationsResponse Safety Violations
Exact MatchJaccardExact MatchJaccard
Aegis-Defensive66.45------
MD Judge43.54-49.12----
Llama Guard 260.87-63.62----
Llama Guard 367.98-65.7471.9874.5987.2488.37
DuoGuard62.59-37.99----
PG Qwen2.5 7B (Ours)87.1283.5974.0880.8785.4486.6788.79
PG Ministral (Ours)86.0284.4573.7579.9284.3086.8588.78
PG Smol (Ours)83.7681.3666.8277.0281.5184.0585.92
", + "image_path": "f2520c3d4db2dc1f554f4fef2eb312adfe3cdc4cd29305c0f48bc3f31bda42b9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 258, + 506, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 360 + ], + "type": "text", + "content": "its similar size counterpart (Table 1). Aegis Defensive supports only a single text as input and is hence evaluated for Harmful Request only. Since the remaining baselines do not explicitly support Harmful Response, we approximate the prediction by executing them on prompt + response. None of the baselines support the Response Refusal task. Out of all baselines, the safety category taxonomy is the same for Llama-Guard-3 and PG. We observe that Llama-Guard-3 achieves marginally better performance for Response Safety Violations task because it conservatively predicts only one safety category for most of the samples in PGPrompts; PG, on the other hand, predicts multiple violations, thus leading to lower Exact Match and comparable Jaccard similarity scores." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 372, + 480, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 480, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 480, + 396 + ], + "type": "text", + "content": "4.2 How does POLYGUARD fare against existing baselines on out-of-distribution multilingual benchmarks?" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 108, + 410, + 504, + 534 + ], + "blocks": [ + { + "bbox": [ + 104, + 213, + 506, + 237 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 506, + 237 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 506, + 237 + ], + "type": "text", + "content": "Table 1: Evaluation of POLYGUARD models and baselines on POLYGUARDPROMPTS. Take-away: PG models outperform baselines on in-distribution data." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 410, + 504, + 534 + ], + "lines": [ + { + "bbox": [ + 108, + 410, + 504, + 534 + ], + "spans": [ + { + "bbox": [ + 108, + 410, + 504, + 534 + ], + "type": "table", + "html": "
TypeModelRTP-LX En.RTP-LX Mul.Mod. En.Mod. Mul.XS En. (LG)XS Mul. (LG)XS En. (Aegis)XS Mul. (Aegis)MJ En. (LG)MJ Mul. (LG)MJ En. (Aegis)MJ Mul. (Aegis)Avg
Open -WeightAegis-Defensive84.2383.2171.1359.2266.5935.4769.4636.7590.9179.5290.6179.3770.54
MD Judge85.2838.6079.8661.4669.0017.2269.5617.7191.2138.4790.9137.9758.10
Llama Guard 239.4734.9975.8372.5553.7022.3250.5722.5677.5262.3876.8661.5654.19
Llama Guard 348.5144.8778.7373.9860.8425.7057.5026.9879.9278.1479.6777.5261.03
Duo Guard91.8350.4670.8549.4461.1626.0364.8327.3189.1841.8489.2641.4458.64
Closed -SourcePerspective API97.0981.9769.4064.1927.646.6433.926.8553.7945.3753.2344.7348.73
OpenAI Omni87.5274.1074.4368.0858.0222.4860.1123.5282.5966.9482.7366.9463.95
Google Mod.90.4483.2159.6453.8950.4441.8455.7144.7983.1480.8583.6681.0067.38
OursPG Qwen2.591.3483.2174.3969.5172.0735.3374.9337.1393.9386.4493.9786.3374.88
PG Ministrial87.2579.5874.9070.5171.3034.9374.0736.6895.7183.1195.3983.0273.87
PG Smol92.371.5669.363.0070.2833.2274.3835.1994.3973.5993.7273.3470.36
", + "image_path": "c55222dbf21f2abff079f48af27aa803b24e7526403aa98dc764f985f5738d93.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 542, + 507, + 588 + ], + "lines": [ + { + "bbox": [ + 104, + 542, + 507, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 507, + 588 + ], + "type": "text", + "content": "Table 2: F1 scores of safety detectors on Multilingual Guardrail Test Suite; metrics are in bold and underlined for the best second-best performing models respectively. Mod.=Moderation, XS=XSafety, MJ=MultiJail, En.=English, Mul.=Multilingual, LG=Llama Guard. Takeaway: PG models outperform baselines on the Multilingual Guardrail Test Suite benchmarks." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 606, + 506, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 606, + 506, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 606, + 506, + 676 + ], + "type": "text", + "content": "Multilingual Bench: We first benchmark models on datasets inspired by Yang et al. (2024b). This comprises multilingual toxicity and safety datasets, namely RTP-LX (de Wynter et al., 2024), OpenAI Moderation (Markov et al., 2023)," + }, + { + "bbox": [ + 104, + 606, + 506, + 676 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 104, + 606, + 506, + 676 + ], + "type": "text", + "content": " XSafety (Wang et al., 2023), and MultiJail (Deng et al., 2024). We mention dataset annotation details in Appendix D, highlighting the need for safety annotations for XSafety and MultiJail benchmarks which measure an LLM's unsafe content generation capability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 677, + 507, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 507, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 507, + 702 + ], + "type": "text", + "content": "Patronus AI Bench: We also evaluate models using the recall score on the benchmarks reported by PatronusAI (2024), consisting of toxic/unsafe samples from English and multi-" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "10The OpenAI Moderation dataset comprises only English samples and is extended to a multilingual setting using Google Translate." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 761 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "lingual toxicity and safety datasets. We perform our evaluations on all samples instead of a small subset. Appendix E contains details about the benchmark." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 189 + ], + "type": "text", + "content": "Results show that our PG models outperform the baselines on most datasets, achieving higher scores for the unsafe class (Table 2). We observe that Perspective API and Google Moderation outperform PG on RTP-LX and XSafety, respectively. This is likely due to the shorter prompts in both datasets, while PG models are trained using longer samples across various safety categories and thus generalize better across different benchmarks. PG models also outperform existing detectors on safety datasets in the Patronus AI benchmark and also achieve the best average performance (Table 3)." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 106, + 200, + 504, + 358 + ], + "blocks": [ + { + "bbox": [ + 106, + 200, + 504, + 358 + ], + "lines": [ + { + "bbox": [ + 106, + 200, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 200, + 504, + 358 + ], + "type": "table", + "html": "
TypeModeltoxic-text-enjigsawukr-toxicitythai-toxicity-tweettoxic-text-pttoxic-chatBeaver TailsSalad-DataAvg
Open-WeightAegis-Defensive80.3279.2762.8067.2986.54--91.6477.98
MD Judge68.4573.405.800.8056.8663.5481.4196.6855.87
Llama Guard 223.7320.676.324.8353.5123.1759.2016.1425.95
Llama Guard 340.0327.209.6011.5053.7827.3052.6829.4231.43
Duo Guard93.6593.180.729.2774.2254.1787.5470.7060.43
Closed-SourcePerspective API77.2086.20--93.0015.8923.001.8037.14
OpenAI Omni54.2086.8041.6034.0099.8046.3567.8045.8059.54
Google Mod.95.2098.0086.6041.8097.6069.2777.6027.2074.16
OursPG Qwen2.585.3283.4765.2446.4784.2697.6590.6597.0881.27
PG Ministrial82.6079.1155.5235.7680.5197.3990.5396.8877.29
PG Smol89.5785.7259.1637.2081.8496.1084.6096.4278.83
", + "image_path": "65c3265b462c4ea8c93985a5838b37d9d9f2ecd49b1593fa2b9e5cf51fe3081e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 422, + 237, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 422, + 237, + 434 + ], + "spans": [ + { + "bbox": [ + 105, + 422, + 237, + 434 + ], + "type": "text", + "content": "4.3 Are PG models robust?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 444, + 506, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 444, + 506, + 478 + ], + "spans": [ + { + "bbox": [ + 104, + 444, + 506, + 478 + ], + "type": "text", + "content": "We study the average performance of the PG models trained using 3 datasets: only translated data, only ITW data, and translated + ITW data. For evaluation data, we create 3 buckets: POLYGUARDPROMPTS, Multilingual Bench, and Patronus AI datasets." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 483, + 507, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 507, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 507, + 540 + ], + "type": "text", + "content": "PG models trained on a combination of translated and ITW data show greater robustness across both in-domain and out-of-distribution evaluation benchmarks, thus underscoring the importance of the presence of ITW samples in the training data mix (Table 4). Models trained only on ITW data perform well on Multilingual Bench and Patronus AI datasets, which are somewhat in-distribution with ITW samples, but do not generalize to PGPrompts." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 117, + 551, + 493, + 673 + ], + "blocks": [ + { + "bbox": [ + 104, + 365, + 504, + 400 + ], + "lines": [ + { + "bbox": [ + 104, + 365, + 504, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 504, + 400 + ], + "type": "text", + "content": "Table 3: Recall scores on unsafe samples from Patronus' benchmarking; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. Takeaway: PG models outperform baselines on Patronus AI's benchmarks." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 117, + 551, + 493, + 673 + ], + "lines": [ + { + "bbox": [ + 117, + 551, + 493, + 673 + ], + "spans": [ + { + "bbox": [ + 117, + 551, + 493, + 673 + ], + "type": "table", + "html": "
POLYGUARDTraining DataPGPromptsMultilingual BenchPatronus AI
Qwen2.5Translated84.9574.5679.79
ITW64.6974.6382.26
Translated + ITW83.7974.8881.27
MinistralTranslated84.3273.8677.07
ITW63.1175.3585.76
Translated + ITW83.4473.8777.29
SmolTranslated82.2269.9974.84
ITW59.465.0872.21
Translated + ITW80.0670.3578.82
", + "image_path": "038615ea481e4df5198d2af6b4e813cc8c85be3aa566f2c629cf2b62c4e725fa.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 684, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 104, + 684, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 104, + 684, + 504, + 730 + ], + "type": "text", + "content": "Table 4: Average F1 score on POLYGUARDPROMPTS and Multilingual Bench, and Recall on PatronusAI, when models are trained with different training dataset settings. Underlined values represent in-distribution evaluations. Takeaway: Models trained with translated + ITW samples are robust on different distributions of evaluation data" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": "Furthermore, we investigate in detail the influence of the presence of ITW data in our training data mix for each benchmark dataset (Figure 5). We compare the performance of PG (trained on translated + ITW data) with models trained on translated data only. We observe that the performance of Qwen2.5 degrades for most of the datasets when ITW data are absent from the training mix. The performance differences for Ministrial are more balanced compared to Qwen2.5, that is, both improvement and degradation are observed across the evaluation datasets. The introduction of ITW data benefits the performance of the ToxicChat benchmark (Lin et al., 2023) the most for both models, since ITW data is most aligned with the ToxicChat benchmark." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 195, + 480, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 195, + 480, + 209 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 480, + 209 + ], + "type": "text", + "content": "4.4 How does performance vary on English vs Translated vs Code-Switched data?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 216, + 504, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 261 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 261 + ], + "type": "text", + "content": "We study the performance variation of models on code-switched data, which consists of tokens belonging to different languages but in the same document. Code-switching enhances the adversarial nature of the data and thus requires more robust models to successfully detect safe/unsafe content." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 266, + 506, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 506, + 312 + ], + "type": "text", + "content": "We evaluate models on the Code-Switching Red-Teaming (CSRT) (Yoo et al., 2024) dataset and the translated and code-switched version of Aegis 1.0 (Ghosh et al., 2024) as provided by Yang et al. (2024b). Since CSRT also evaluates LLMs' tendency to generate unsafe content, we use the same automatic annotation pipeline as described in Appendix D." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 316, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 504, + 373 + ], + "type": "text", + "content": "In all settings, PG models outperform baselines, showing that our moderation models are more robust (Table 5). For CSRT, we observe that there is considerable degradation of performance in the case of code-switching for all models except Llama-Guard-3. For Aegis 1.0, there is a performance drop from English to the translated version. The performance increases for the code-switched version but is lower than on English data." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 106, + 382, + 504, + 544 + ], + "blocks": [ + { + "bbox": [ + 106, + 382, + 504, + 544 + ], + "lines": [ + { + "bbox": [ + 106, + 382, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 106, + 382, + 504, + 544 + ], + "type": "table", + "html": "
TypeModelCSRT English (LG)CSRT English (Aegis)CSRT Code-switch (LG)CSRT Code-switch (Aegis)Aegis English*Aegis Translated*Aegis Code-switch*Avg
Open -WeightAegis-Defensive90.9190.6181.3881.5383.8975.1580.3583.40
MD Judge91.2190.9150.0050.0082.9842.5474.0668.81
Llama Guard 277.5276.8665.8864.7960.8251.6959.1665.25
Llama Guard 379.6679.4279.8379.1667.3962.1566.8673.50
Duo Guard89.1852.8289.2652.2883.3759.1073.4971.36
Closed -SourcePerspective API53.7953.2332.5231.7531.1526.1127.2636.54
OpenAI Omni82.8382.9774.2474.0373.3063.8268.1474.19
Google Mod.83.1483.6682.1981.9474.5473.6072.8978.85
OursPG Qwen2.594.1093.7888.5587.8887.8583.0085.1388.61
PG Ministrial95.1995.2290.0289.3586.9681.1883.8188.82
PG Smol94.3993.7284.1383.8684.7172.8980.3284.86
", + "image_path": "7c06a04f5905654a8188566a61f60456844d509c5f416f57e789d1529a3ba559.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 609 + ], + "type": "text", + "content": "Table 5: F1 scores comparison on English only, translated, and code-switched data; metrics for the best performing model are in bold, whereas those for the second-best performing model are underlined. * represent results averaged across 3 annotations, LG=Llama Guard Takeaway: All models suffer performance degradation for code-switched data, with PG models outperforming baselines." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 628, + 453, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 453, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 453, + 641 + ], + "type": "text", + "content": "4.5 How is performance affected by removing low-quality translated data?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 648, + 506, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 506, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 506, + 706 + ], + "type": "text", + "content": "Data quality plays an important role in the training of any machine learning model. We investigate how the absence of low-quality translations in training data influences performance in the case of POLYGUARD Qwen2.5 and Ministral. Due to time and budget constraints, we use GPT-4o annotations as a proxy for human-evaluated translation quality and distill them for cost-effective annotations (details in Appendix F)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": "Empirical evaluations show that the elimination of low-quality translations does not necessarily improve model performance (Figure 9, Appendix F) since contrastive trends" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 198, + 79, + 413, + 137 + ], + "blocks": [ + { + "bbox": [ + 198, + 79, + 413, + 137 + ], + "lines": [ + { + "bbox": [ + 198, + 79, + 413, + 137 + ], + "spans": [ + { + "bbox": [ + 198, + 79, + 413, + 137 + ], + "type": "table", + "html": "
ModelAverageStd Dev
POLYGONQwen2.587.018.27
POLYGONMinistral84.0412.25
POLYGONSmol65.2525.02
", + "image_path": "d5b00f408637d7cf518f8a12ec5653f7af824060843b2097368f54d9d0e1a04b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 144, + 506, + 179 + ], + "lines": [ + { + "bbox": [ + 104, + 144, + 506, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 179 + ], + "type": "text", + "content": "Table 6: Recall scores for POLYGUARD models on human-written samples from the Aya RedTeam benchmark. Takeaway: POLYGUARD models generalize on data from different distributions despite being trained only on machine-translated data." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 205, + 504, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 504, + 239 + ], + "type": "text", + "content": "are observed for Qwen2.5 and Ministral. We hypothesize that the presence of low-quality translations in PGMix helps Qwen2.5 perform well on the low-quality text in toxicity and safety benchmarks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 259, + 505, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 259, + 505, + 274 + ], + "spans": [ + { + "bbox": [ + 104, + 259, + 505, + 274 + ], + "type": "text", + "content": "4.6 Does POLYGUARD superficially align with artifacts of machine-translated text only?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 283, + 284, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 283, + 284, + 440 + ], + "spans": [ + { + "bbox": [ + 104, + 283, + 284, + 440 + ], + "type": "text", + "content": "The use of machine-translated data for training POLYGUARD models can lead to the hypothesis that models learn only to rely on machine-translation artifacts in the data to evaluate safety. To investigate if this behavior exists, we evaluate our models on the Aya Red-teaming dataset (Ahmadian et al., 2024), which consists of manually created 7,419 samples in 8 languages, thus lacking the noise patterns present in machine-translated texts. We do not observe empirical evidence supporting the hypothesis (Table 6)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 463, + 227, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 463, + 227, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 463, + 227, + 491 + ], + "type": "text", + "content": "5 POLYGUARD Runtime Comparison" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 506, + 283, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 283, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 283, + 628 + ], + "type": "text", + "content": "We have trained and open-sourced models of three sizes (0.5B, 7B, and 8B). While all three can run on consumer hardware, the 0.5B can benefit on-device or latency-critical applications. We also test the latency of our models on 7419 samples from the Aya RedTeaming dataset (Ahmadian et al., 2024) on an NVIDIA L40S GPU using VLLM (Table 7), and find that our 0.5B model has a high throughput. However, our 7B and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 627, + 506, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 506, + 662 + ], + "type": "text", + "content": "8B models run comparatively slower than their similarly sized Llama Guard counterparts. Compared to Llama Guard, POLYGUARD models solve more tasks, and thus require longer prompts and generate more output tokens, which leads to increased runtime." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 294, + 283, + 503, + 563 + ], + "blocks": [ + { + "bbox": [ + 294, + 283, + 503, + 563 + ], + "lines": [ + { + "bbox": [ + 294, + 283, + 503, + 563 + ], + "spans": [ + { + "bbox": [ + 294, + 283, + 503, + 563 + ], + "type": "image", + "image_path": "7aa7eb18842bdeb0a90f34bf39ebc26933d8984b21590840eab966e47578f07f.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 287, + 574, + 504, + 620 + ], + "lines": [ + { + "bbox": [ + 287, + 574, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 287, + 574, + 504, + 620 + ], + "type": "text", + "content": "Figure 5: Performance difference on removing ITW data Takeaway: Removal of ITW data generally degrades model performance by reducing training data diversity." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 680, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 680, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 504, + 733 + ], + "type": "text", + "content": "11We also use the Aya Red-teaming dataset to assess the need for multilingual safety classifiers by translating it to English via TowerInstruct-7B-v0.2 and then evaluating an English-only classifier (Llama-Guard-3-8B). PG Qwen2.5 significantly outperforms this setup - achieving a higher recall in French (0.916 vs. 0.706), Russian (0.926 vs. 0.669) and Spanish (0.952 vs. 0.681) - highlighting the limitations of relying solely on translation for multilingual safety moderation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 123, + 80, + 487, + 164 + ], + "blocks": [ + { + "bbox": [ + 123, + 80, + 487, + 164 + ], + "lines": [ + { + "bbox": [ + 123, + 80, + 487, + 164 + ], + "spans": [ + { + "bbox": [ + 123, + 80, + 487, + 164 + ], + "type": "table", + "html": "
ModelSizeInput TokensOutput TokensTime (m:ss)
Llama Guard 28B1575800275362:13
Llama Guard 38B1657409363642:14
POLYGON Smol0.5B18702062393370:31
POLYGON Qwen2.57B18702062430433:27
POLYGON Ministral8B18810522424263:58
", + "image_path": "de2a11dde32c0308543546988d5a694ba9a7d72072127e3414ddf4f43b0c5fca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 171, + 504, + 205 + ], + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 205 + ], + "type": "text", + "content": "Table 7: Latency comparison of POLYGUARD models on Aya RedTeaming Takeaway: Smol is highly efficient, whereas Qwen and Ministral are slower than LlamaGuards as POLYGUARD models solve multiple tasks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 225, + 284, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 225, + 284, + 240 + ], + "spans": [ + { + "bbox": [ + 105, + 225, + 284, + 240 + ], + "type": "text", + "content": "6 Background & Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 250, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 250, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 506, + 373 + ], + "type": "text", + "content": "Safety Training Datasets and Safety Evaluations AI Safety, the field of research focused on ensuring that AI systems are developed and deployed in a manner that is trustworthy, responsible, reliable, and beneficial to humans (Chen et al., 2024), has become widely studied in recent years (Chua et al., 2024; Hendrycks, 2025; Bengio et al., 2025; Bullwinkel et al., 2025). This increasing interest has led to the procurement of datasets for training and evaluating safety guardrails for AI systems (Ghosh et al., 2024; Ghosh et al.; Han et al., 2024; Lin et al., 2023; Ji et al., 2023; Li et al., 2024). Similarly, safety benchmarks have been curated to evaluate the safety risks exhibited by AI systems (Xie et al., 2024; Mazeika et al., 2024; Jain et al., 2024; Kumar et al., 2024; Yoo et al., 2024; Zeng et al., 2024b; Zhang et al., 2024a;b; Tan et al., 2024). However, almost all of the aforementioned datasets are limited to the English or Chinese language only or focus on specific subsets of AI safety Jain et al. (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 384, + 507, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 507, + 518 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 507, + 518 + ], + "type": "text", + "content": "Safety Moderation Tools Current open-weight safety systems rely on either proprietary datasets (Inan et al., 2023; Zeng et al., 2024a) or previously mentioned English-centric datasets (Ghosh et al., 2024; Li et al., 2024; Han et al., 2024). Although these LLM-based classifiers possess inherent multilingual capabilities, their performance is constrained by their predominantly English training data (Han et al., 2024; Ghosh et al.). Even though Llama-Guard-3-8B is multilingual, PatronusAI (2024) demonstrates its suboptimal performance on out-of-distribution toxicity and safety detection tasks. Additionally, existing models face structural limitations; most are restricted to binary safety classification (with WildGuardMix (Han et al., 2024) being a notable exception), or ignore the structure of user-LLM interactions by processing only a single text at a time (Aegis 1.0 Ghosh et al. (2024) and DuoGuard Deng et al. (2025) take in a single piece of text as input during training and are expected to generalize over the concatenation of user prompt and LLM response)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 533, + 189, + 545 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 533, + 189, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 533, + 189, + 545 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 558, + 507, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 507, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 507, + 625 + ], + "type": "text", + "content": "We present POLYGUARDMIX, the first massive multilingual safety detection training dataset, comprising 1.91M user-LLM interactions across 17 languages. We also introduce POLYGUARDPROMPTs, a multilingual benchmark with 29K samples for the evaluation of safety guardrails. Further, we train robust multilingual LLM-based safety detectors, POLYGUARD, which perform better or comparably to existing open-weight and proprietary safety detectors across numerous evaluation benchmarks belonging to different data distributions." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 201, + 94 + ], + "type": "text", + "content": "Ethics Statement" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 140 + ], + "type": "text", + "content": "Although POLYGUARD demonstrates state-of-the-art performance for multilingual safety detection, it may occasionally produce incorrect predictions. Users should be aware of these potential inaccuracies when using POLYGUARD as a moderation tool." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 144, + 507, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 507, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 507, + 201 + ], + "type": "text", + "content": "We also acknowledge that our datasets, POLYGUARDMIX and POLYGUARDPROMPTS, contain unsafe/harmful content that may inadvertently facilitate the creation of harmful content. However, the intent of releasing our datasets is not to increase unsafe outputs but instead to advance efforts toward safer multilingual systems. As a safety measure, we plan to implement restrictions on the use of our datasets." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 217, + 212, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 212, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 212, + 231 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 240, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 504, + 266 + ], + "type": "text", + "content": "This research was supported in part by Google Jigsaw, DSO National Laboratories and Microsoft's Accelerating Foundation Models Research program." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 269, + 505, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 505, + 304 + ], + "type": "text", + "content": "Data We express our gratitude to the authors whose meticulous efforts were instrumental in the creation of our data set: WildGuardMix (Han et al., 2024), LMSys-Chat-1M (Zheng et al., 2023) and WildChat (Zhao et al., 2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 314, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 506, + 373 + ], + "type": "text", + "content": "Software and Models We would like to thank the authors of TowerInstruct-7B-v0.2 (Alves et al., 2024) and NLLB-3.3B (Team et al., 2022) which we use for automatic translations, contributors and maintainers of vLLM (Kwon et al., 2023) and LiteLLM " + }, + { + "bbox": [ + 104, + 314, + 506, + 373 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 104, + 314, + 506, + 373 + ], + "type": "text", + "content": " which we leverage to generate continuations from models, and OpenRLHF (Hu et al., 2024) which we use to fine-tune models. Finally, we thank Jigsaw for providing access to Perspective API." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 387, + 168, + 399 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 387, + 168, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 168, + 399 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 405, + 507, + 715 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 405, + 507, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 405, + 507, + 473 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 507, + 473 + ], + "type": "text", + "content": "Roee Aharoni, Melvin Johnson, and Orhan First. Massively multilingual neural machine translation. In Jill Burstein, Christy Doran, and Thamar Solorio (eds.), Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 3874-3884, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1388. URL https://aclanthology.org/N19-1388/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 479, + 507, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 479, + 507, + 513 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 507, + 513 + ], + "type": "text", + "content": "Arash Ahmadian, Beyza Ermis, Seraphina Goldfarb-Tarrant, Julia Kreutzer, Marzieh Fadaee, Sara Hooker, et al. The multilingual alignment prism: Aligning global and local preferences to reduce harm. arXiv preprint arXiv:2406.18682, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 518, + 505, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 518, + 505, + 564 + ], + "spans": [ + { + "bbox": [ + 106, + 518, + 505, + 564 + ], + "type": "text", + "content": "Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Peters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 570, + 507, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 570, + 507, + 604 + ], + "spans": [ + { + "bbox": [ + 106, + 570, + 507, + 604 + ], + "type": "text", + "content": "Yoshua Bengio, Soren Mindermann, Daniel Privitera, Tamay Besiroglu, Rishi Bommasani, Stephen Casper, Yejin Choi, Philip Fox, Ben Garfinkel, Danielle Goldfarb, et al. International ai safety report. arXiv preprint arXiv:2501.17805, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 609, + 505, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 609, + 505, + 644 + ], + "spans": [ + { + "bbox": [ + 106, + 609, + 505, + 644 + ], + "type": "text", + "content": "Blake Bullwinkel, Amanda Minnich, Shiven Chawla, Gary Lopez, Martin Pouliot, Whitney Maxwell, Joris de Gruyter, Katherine Pratt, Saphir Qi, Nina Chikanov, et al. Lessons from red teaming 100 generative ai products. arXiv preprint arXiv:2501.07238, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 650, + 507, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 650, + 507, + 684 + ], + "spans": [ + { + "bbox": [ + 106, + 650, + 507, + 684 + ], + "type": "text", + "content": "Chen Chen, Ziyao Liu, Weifeng Jiang, Si Qi Goh, and KwoK-Yan Lam. Trustworthy, responsible, and safe ai: A comprehensive architectural framework for ai safety with challenges and mitigations. arXiv preprint arXiv:2408.12935, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 690, + 505, + 715 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 690, + 505, + 715 + ], + "spans": [ + { + "bbox": [ + 106, + 690, + 505, + 715 + ], + "type": "text", + "content": "Jaymari Chua, Yun Li, Shiyi Yang, Chen Wang, and Lina Yao. Ai safety in generative ai large language models: A survey. arXiv preprint arXiv:2407.18369, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 114, + 719, + 277, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 719, + 277, + 732 + ], + "spans": [ + { + "bbox": [ + 114, + 719, + 277, + 732 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 114, + 719, + 277, + 732 + ], + "type": "text", + "content": "https://github.com/BerriAI/litellm" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 160 + ], + "type": "text", + "content": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. Unsupervised cross-lingual representation learning at scale. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 8440-8451, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.747. URL https://aclanthology.org/2020.acl-main.747/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 167, + 506, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 167, + 506, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 506, + 235 + ], + "type": "text", + "content": "Marta Costa-jussà, Eric Smith, Christophe Ropers, Daniel Licht, Jean Maillard, Javier Ferrando, and Carlos Escolano. Toxicity in multilingual machine translation at scale. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 9570-9586, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-emnlp.642. URL https://aclanthology.org/2023-findings-emnlp.642." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 241, + 506, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 241, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 105, + 241, + 506, + 277 + ], + "type": "text", + "content": "Aida Mostafazadeh Davani, Sagar Gubbi Venkatesh, Sunipa Dev, Shachi Dave, and Vinodkumar Prabhakaran. Genil: A multilingual dataset on generalizing language. In First Conference on Language Modeling, 2024. URL https://openreview.net/forum?id=kLH4ccaL21." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 283, + 506, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 506, + 327 + ], + "type": "text", + "content": "Adrian de Wynter, Ishaan Watts, Nektar Ege Altintoprak, Tua Wongsangaroonsri, Minghui Zhang, Noura Farra, Lena Baur, Samantha Claudet, Pavel Gajdusek, Can Gören, et al. Rtplx: Can llms evaluate toxicity in multilingual scenarios? arXiv preprint arXiv:2404.14397, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 335, + 504, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 504, + 360 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 504, + 360 + ], + "type": "text", + "content": "Yihe Deng, Yu Yang, Junkai Zhang, Wei Wang, and Bo Li. Duoguard: A two-player rl-driven framework for multilingual llm guardrails. arXiv preprint arXiv:2502.05163, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 365, + 504, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 365, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 105, + 365, + 504, + 401 + ], + "type": "text", + "content": "Yue Deng, Wenxuan Zhang, Sinno Jialin Pan, and Lidong Bing. Multilingual jailbreak challenges in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=vESNKdEMGp." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 407, + 506, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 506, + 443 + ], + "type": "text", + "content": "Jesse Dodge, Maarten Sap, Ana Marasovic, William Agnew, Gabriel Ilharco, Dirk Groeneweld, Margaret Mitchell, and Matt Gardner. Documenting large webtext corpora: A case study on the colossal clean crawled corpus. arXiv preprint arXiv:2104.08758, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 449, + 506, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 506, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 506, + 483 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 490, + 506, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 490, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 490, + 506, + 536 + ], + "type": "text", + "content": "Shaona Ghosh, Prasoon Varshney, Makes Narsimhan Sreedhar, Aishwarya Padmakumar, Traian Rebedea, Jibin Rajan Varghese, and Christopher Parisien. Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails. In Neurips Safe Generative AI Workshop 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 543, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 543, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 543, + 504, + 578 + ], + "type": "text", + "content": "Shaona Ghosh, Prasoon Varshney, Erick Galinkin, and Christopher Parisien. Aegis: Online adaptive ai content safety moderation with ensemble of llm experts. arXiv preprint arXiv:2404.05993, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 584, + 506, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 506, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 506, + 619 + ], + "type": "text", + "content": "Daniil Gurgurov, Tanja Bäumel, and Tatiana Anikina. Multilingual large language models and curse of multilinguality. 2024. doi: 10.48550/ARXIV.2406.10602. URL https://arxiv.org/abs/2406.10602." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 625, + 506, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 506, + 661 + ], + "type": "text", + "content": "Seungju Han, Kavel Rao, Allyson Ettinger, Liwei Jiang, Bill Yuchen Lin, Nathan Lambert, Yejin Choi, and Nouha Dziri. Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms. arXiv preprint arXiv:2406.18495, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 667, + 400, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 667, + 400, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 667, + 400, + 681 + ], + "type": "text", + "content": "Dan Hendrycks. Introduction to ai safety, ethics, and society, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 686, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 686, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 506, + 731 + ], + "type": "text", + "content": "Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=nZeVKeeFYf9." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "text", + "content": "Jian Hu, Xibin Wu, Zilin Zhu, Xianyu, Weixun Wang, Dehao Zhang, and Yu Cao. Openrlhf: An easy-to-use, scalable and high-performance rlhf framework. arXiv preprint arXiv:2405.11143, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 507, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 507, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 507, + 168 + ], + "type": "text", + "content": "Hakan Inan, Kartikeya Upasani, Jianfeng Chi, Rashi Rungta, Krithika Iyer, Yuning Mao, Michael Tontchev, Qing Hu, Brian Fuller, Davide Testuggine, et al. Llama guard: Llm-based input-output safeguard for human-ai conversations. arXiv preprint arXiv:2312.06674, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 177, + 507, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 177, + 507, + 212 + ], + "spans": [ + { + "bbox": [ + 105, + 177, + 507, + 212 + ], + "type": "text", + "content": "Devansh Jain, Priyanshu Kumar, Samuel Gehman, Xuhui Zhou, Thomas Hartvigsen, and Maarten Sap. Polyglotoxicityprompts: Multilingual evaluation of neural toxic degeneration in large language models. arXiv preprint arXiv:2405.09373, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 219, + 507, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 507, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 507, + 275 + ], + "type": "text", + "content": "Jiaming Ji, Mickel Liu, Juntao Dai, Xuehai Pan, Chi Zhang, Ce Bian, Boyuan Chen, Ruiyang Sun, Yizhou Wang, and Yaodong Yang. Beavertails: Towards improved safety alignment of LLM via a human-preference dataset. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track, 2023. URL https://openreview.net/forum?id=g0QovXbFw3." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 283, + 507, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 507, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 507, + 350 + ], + "type": "text", + "content": "Tom Kocmi, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Thamme Gowda, Yvette Graham, Roman Grundkiewicz, Barry Haddow, Rebecca Knowles, Philipp Koehn, Christof Monz, Makoto Morishita, Masaaki Nagata, Toshiaki Nakazawa, Michal Novák, Martin Popel, and Maja Popovic. Findings of the 2022 conference on machine translation (wmt22). In Conference on Machine Translation, 2022. URL https://apisemantic scholar.org/CorpusID:256461033." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 357, + 507, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 507, + 392 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 507, + 392 + ], + "type": "text", + "content": "Suhas Kotha, Jacob M. Springer, and Aditi Raghunathan. Understanding catastrophic forgetting in language models via implicit inference. ArXiv, abs/2309.10105, 2023. URL https://api_semanticscholar.org/CorpusID:262054014." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 399, + 507, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 507, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 507, + 444 + ], + "type": "text", + "content": "Priyanshu Kumar, Elaine Lau, Saranya Vijayakumar, Tu Trinh, Scale Red Team, Elaine Chang, Vaughn Robinson, Sean Hendryx, Shuyan Zhou, Matt Fredrikson, et al. Refusal-trained llms are easily jailbroken as browser agents. arXiv preprint arXiv:2410.13886, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 453, + 507, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 453, + 507, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 453, + 507, + 498 + ], + "type": "text", + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 506, + 507, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 507, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 507, + 573 + ], + "type": "text", + "content": "Lijun Li, Bowen Dong, Ruohui Wang, Xuhao Hu, Wangmeng Zuo, Dahua Lin, Yu Qiao, and Jing Shao. SALAD-bench: A hierarchical and comprehensive safety benchmark for large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Findings of the Association for Computational Linguistics: ACL 2024, pp. 3923-3954, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024-findings-acl.235. URL https://aclanthology.org/2024-findings-acl.235." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 581, + 507, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 581, + 507, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 507, + 649 + ], + "type": "text", + "content": "Zi Lin, Zihan Wang, Yongqi Tong, Yangkun Wang, Yuxin Guo, Yujia Wang, and Jingbo Shang. ToxicChat: Unveiling hidden challenges of toxicity detection in real-world user-AI conversation. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 4694-4702, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.findings-emnlp.311. URL https://aclanthology.org/2023.findings-emnlp.311." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 656, + 507, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 656, + 507, + 677 + ], + "spans": [ + { + "bbox": [ + 105, + 656, + 507, + 677 + ], + "type": "text", + "content": "AI @ Meta Llama Team. The llama 3 herd of models, 2024. URL https://arxiv.org/abs/2407.21783." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 686, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 686, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 686, + 507, + 732 + ], + "type": "text", + "content": "Todor Markov, Chong Zhang, Sandhini Agarwal, Florentine Eloundou Nekoul, Theodore Lee, Steven Adler, Angela Jiang, and Lilian Weng. A holistic approach to undesired content detection in the real world. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 37, pp. 15009-15018, 2023." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 139 + ], + "type": "text", + "content": "Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakaehie, Nathaniel Li, Steven Basart, Bo Li, David Forsyth, and Dan Hendrycks. Harmbench: a standardized evaluation framework for automated red teaming and robust refusal. In Proceedings of the 41st International Conference on Machine Learning, ICML'24. JMLR.org, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 144, + 488, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 144, + 488, + 157 + ], + "spans": [ + { + "bbox": [ + 107, + 144, + 488, + 157 + ], + "type": "text", + "content": "Mistral. Un ministral, des ministraux. URL https://mistral.ai/en/news/ministraux." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 164, + 504, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 164, + 504, + 186 + ], + "spans": [ + { + "bbox": [ + 107, + 164, + 504, + 186 + ], + "type": "text", + "content": "Andrew Ng. Agentic translation. URL https://github.com/andrewyng/translation-agent." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 194, + 505, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 505, + 217 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 505, + 217 + ], + "type": "text", + "content": "PatronusAI. Llama guard is off duty. https://www.patronus.ai/blog/llama-guard-is-off-duty, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 224, + 505, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 224, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 107, + 224, + 505, + 247 + ], + "type": "text", + "content": "Qwen. Qwen2.5: A party of foundation models! URL https://qwenlm.github.io/blog/qwen2.5/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 254, + 506, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 254, + 506, + 321 + ], + "spans": [ + { + "bbox": [ + 107, + 254, + 506, + 321 + ], + "type": "text", + "content": "Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Jurafsky, Noah A. Smith, and Yejin Choi. Social bias frames: Reasoning about social and power implications of language. In Dan Jurafsky, Joyce Chai, Natalie Schluter, and Joel Tetreault (eds.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 5477-5490, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.486. URL https://aclanthology.org/2020.acl-main.486/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 328, + 506, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 328, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 328, + 506, + 406 + ], + "type": "text", + "content": "Khetam Al Sharou and Lucia Specia. A taxonomy and study of critical errors in machine translation. In Helena Moniz, Lieve Macken, Andrew Rufener, Loici Barrault, Marta R. Costa-jussa, Christophe Declercq, Maarit Koponen, Ellie Kemp, Spyridon Pilos, Mikel L. Forcada, Carolina Scarton, Joachim Van den Bogaert, Joke Daems, Arda Tezcan, Bram Vanroy, and Margot Fonteyne (eds.), Proceedings of the 23rd Annual Conference of the European Association for Machine Translation, pp. 171-180, Ghent, Belgium, June 2022. European Association for Machine Translation. URL https://aclanthology.org/2022.eamt-1.20." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 413, + 506, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 413, + 506, + 513 + ], + "spans": [ + { + "bbox": [ + 107, + 413, + 506, + 513 + ], + "type": "text", + "content": "Lucia Specia, Frédéric Blain, Marina Fomicheva, Chrysoula Zerva, Zhenhao Li, Vishrav Chaudhary, and André F. T. Martins. Findings of the WMT 2021 shared task on quality estimation. In Loic Barrault, Ondrej Bojar, Fethi Bougares, Rajen Chatterjee, Marta R. Costa-jussa, Christian Federmann, Mark Fishel, Alexander Fraser, Markus Freitag, Yvette Graham, Roman Grundkiewicz, Paco Guzman, Barry Haddow, Matthias Huck, Antonio Jimeno Yepes, Philipp Koehn, Tom Kocmi, Andre Martins, Makoto Morishita, and Christof Monz (eds.), Proceedings of the Sixth Conference on Machine Translation, pp. 684-725, Online, November 2021. Association for Computational Linguistics. URL https://aclanthology.org/2021.wmt-1.71." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 520, + 505, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 520, + 505, + 555 + ], + "spans": [ + { + "bbox": [ + 107, + 520, + 505, + 555 + ], + "type": "text", + "content": "Yingshui Tan, Boren Zheng, Baihui Zheng, Kerui Cao, Huiyun Jing, Jincheng Wei, Jiaheng Liu, Yancheng He, Wenbo Su, Xiangyong Zhu, et al. Chinese safetyqa: A safety short-form factuality benchmark for large language models. arXiv preprint arXiv:2412.15265, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 561, + 505, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 561, + 505, + 584 + ], + "spans": [ + { + "bbox": [ + 107, + 561, + 505, + 584 + ], + "type": "text", + "content": "Llama Team. Meta llama guard 2. https://github.com/meta-llama/PurpleLlama/blob/main/Llama-Guard2/MODEL_CARD.md, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 591, + 506, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 591, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 107, + 591, + 506, + 691 + ], + "type": "text", + "content": "NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, and Jeff Wang. No language left behind: Scaling human-centered machine translation, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 505, + 732 + ], + "type": "text", + "content": "Wenxuan Wang, Zhaopeng Tu, Chang Chen, Youliang Yuan, Jen-tse Huang, Wenxiang Jiao, and Michael R Lyu. All languages matter: On the multilingual safety of large language models. arXiv preprint arXiv:2310.00905, 2023." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 588 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "text", + "content": "Tinghao Xie, Xiangyu Qi, Yi Zeng, Yangsibo Huang, Udari Madhushani Sehwag, Kaixuan Huang, Luxi He, Boyi Wei, Dacheng Li, Ying Sheng, et al. Sorry-bench: Systematically evaluating large language model safety refusal behaviors. arXiv preprint arXiv:2406.14598, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 168 + ], + "type": "text", + "content": "Yuemei Xu, Ling Hu, Jiayi Zhao, Zihan Qiu, Yuqi Ye, and Hanwen Gu. A survey on multilingual large language models: Corpora, alignment, and bias. ArXiv, abs/2404.00929, 2024. URL https://api_semanticscholar.org/CorpusID:268819377." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 175, + 505, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 505, + 208 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 215, + 504, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 215, + 504, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 215, + 504, + 239 + ], + "type": "text", + "content": "Yahan Yang, Soham Dan, Dan Roth, and Insup Lee. Benchmarking llm guardrails in handling multilingual toxicity. arXiv preprint arXiv:2410.22153, 2024b." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 245, + 504, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 245, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 245, + 504, + 269 + ], + "type": "text", + "content": "Haneul Yoo, Yongjin Yang, and Hwaran Lee. Code-switching red-teaming: Lm evaluation for safety and multilingual understanding. arXiv preprint arXiv:2406.15481, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 274, + 506, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 506, + 319 + ], + "type": "text", + "content": "Wenjun Zeng, Yuchi Liu, Ryan Mullins, Ludovic Peran, Joe Fernandez, Hamza Harkous, Karthik Narasimhan, Drew Proud, Piyush Kumar, Bhaktipriya Radharapu, et al. Shieldgemma: Generative ai content moderation based on gemma. arXiv preprint arXiv:2407.21772, 2024a." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 327, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 505, + 361 + ], + "type": "text", + "content": "Yi Zeng, Yu Yang, Andy Zhou, Jeffrey Ziwei Tan, Yuheng Tu, Yifan Mai, Kevin Klyman, Minzhou Pan, Ruoxi Jia, Dawn Song, et al. Air-bench 2024: A safety benchmark based on risk categories from regulations and policies. arXiv preprint arXiv:2407.17436, 2024b." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 368, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 368, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 504, + 403 + ], + "type": "text", + "content": "Hengxiang Zhang, Hongfu Gao, Qiang Hu, Guanhua Chen, Lili Yang, Bingyi Jing, Hongxin Wei, Bing Wang, Haifeng Bai, and Lei Yang. Chinesesa: A chinese benchmark for evaluating safety in large language models. arXiv preprint arXiv:2410.18491, 2024a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 408, + 504, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 408, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 105, + 408, + 504, + 443 + ], + "type": "text", + "content": "Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Meijuan An, Bikun Yang, KaiKai Zhao, Kai Wang, and Shiguo Lian. Chisafetybench: A chinese hierarchical safety benchmark for large language models. arXiv preprint arXiv:2406.10311, 2024b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 449, + 506, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 506, + 473 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 506, + 473 + ], + "type": "text", + "content": "Wenting Zhao, Xiang Ren, Jack Hessel, Claire Cardie, Yejin Choi, and Yuntian Deng. Wildchat: 1m chatgpt interaction logs in the wild. arXiv preprint arXiv:2405.01470, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 479, + 506, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 506, + 514 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Tianle Li, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zhuohan Li, Zi Lin, Eric P Xing, et al. Lmsys-chat-1m: A large-scale real-world llm conversation dataset. arXiv preprint arXiv:2309.11998, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 520, + 506, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 520, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 520, + 506, + 588 + ], + "type": "text", + "content": "Xuhui Zhou, Hao Zhu, Akhila Yerukola, Thomas Davidson, Jena D. Hwang, Swabha Swayamdipta, and Maarten Sap. COBRA frames: Contextual reasoning about effects and harms of offensive statements. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Findings of the Association for Computational Linguistics: ACL 2023, pp. 6294–6315, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023-findings-acl.392. URL https://aclanthology.org/2023-findings-acl.392/." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 371, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 371, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 371, + 95 + ], + "type": "text", + "content": "A Translation Pipeline and Quality Evaluation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 111, + 506, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 111, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 111, + 506, + 158 + ], + "type": "text", + "content": "Our training data translation pipeline segments the source text into chunks using *blingfire*13, translates each chunk independently, and reconstructs the target text by concatenating the translated segments. We utilize this chunking approach to process long texts while respecting the context window constraints of translation models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 162, + 506, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 162, + 506, + 219 + ], + "spans": [ + { + "bbox": [ + 104, + 162, + 506, + 219 + ], + "type": "text", + "content": "The agentic translation framework, used for translating the test data, implements a three-stage process: (1) initial translation of the source text, (2) critical analysis of potential translation errors, and (3) subsequent refinement of the target text incorporating the identified improvements. This iterative approach helps maintain semantic accuracy and cultural nuances crucial for safety evaluation across languages." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 223, + 506, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 223, + 506, + 280 + ], + "spans": [ + { + "bbox": [ + 104, + 223, + 506, + 280 + ], + "type": "text", + "content": "Automatic evaluation of translation quality is done by prompting GPT-4o as a judge. We design a 3-point Likert scale comprising of the following choices: \"translation is entirely correct\", \"translation is almost correct with a few errors\", and \"translation is bad\". We run evaluations for only the translated test set of PGMix to assess the overall translation quality of a model. The evaluation prompts are shown below:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 120, + 293, + 192, + 306 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 293, + 192, + 306 + ], + "spans": [ + { + "bbox": [ + 120, + 293, + 192, + 306 + ], + "type": "text", + "content": "System Prompt" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 120, + 315, + 304, + 383 + ], + "blocks": [ + { + "bbox": [ + 120, + 315, + 304, + 383 + ], + "lines": [ + { + "bbox": [ + 120, + 315, + 304, + 383 + ], + "spans": [ + { + "bbox": [ + 120, + 315, + 304, + 383 + ], + "type": "text", + "content": "You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text`." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 121, + 403, + 181, + 416 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 403, + 181, + 416 + ], + "spans": [ + { + "bbox": [ + 121, + 403, + 181, + 416 + ], + "type": "text", + "content": "User Prompt" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 121, + 426, + 224, + 449 + ], + "blocks": [ + { + "bbox": [ + 121, + 426, + 224, + 449 + ], + "lines": [ + { + "bbox": [ + 121, + 426, + 224, + 449 + ], + "spans": [ + { + "bbox": [ + 121, + 426, + 224, + 449 + ], + "type": "text", + "content": "`source_text`: {src} \n`target_text`: {tgt}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "verilog" + }, + { + "bbox": [ + 104, + 467, + 506, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 467, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 467, + 506, + 500 + ], + "type": "text", + "content": "Following is the format of structure generations for translation quality evaluation. We prompt the judge to first reason about the source and target sentences before outputting the verdict." + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 106, + 513, + 485, + 635 + ], + "blocks": [ + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "lines": [ + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "spans": [ + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": "class QualityEnum(str, Enum): incorrect " + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": " 'translation is bad' almost.correct " + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": " 'translation is almost correct with a few errors' entirely.correct " + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": " 'translation is entirely correct' \nclass Result(BaseModel): reason: str " + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": " Field(description " + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": " \"brief pointers on why the translation is correct or wrong\") verdict: QualityEnum " + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": " Field(description " + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 106, + 513, + 485, + 635 + ], + "type": "text", + "content": " \"the verdict about the translation quality\")" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 104, + 654, + 506, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 701 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 701 + ], + "type": "text", + "content": "Tables 8 and 9 show the verdicts of the GPT-4o judge for the human prompt and model response respectively. We observe that TowerInstruct generates higher-quality translations when compared to NLLB for the languages it supports. However, in the case of Hindi (which is not supported by Tower), the quality is poor." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 113, + 719, + 277, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 719, + 277, + 732 + ], + "spans": [ + { + "bbox": [ + 113, + 719, + 277, + 732 + ], + "type": "text", + "content": "13https://pypi.org/project/blingfire" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 106, + 519, + 342 + ], + "blocks": [ + { + "bbox": [ + 106, + 106, + 519, + 342 + ], + "lines": [ + { + "bbox": [ + 106, + 106, + 519, + 342 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 519, + 342 + ], + "type": "table", + "html": "
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB636688401-
Tower12023601621
ESNLLB1437218682
Tower1374303471
FRNLLB1406245722
Tower1499177472
DENLLB12753481011
Tower1335323661
KONLLB10754901582
Tower12783361092
ITNLLB1384260801
Tower144222756-
PTNLLB146320260-
Tower153214251-
NLNLLB1339306773
Tower139926462-
RUNLLB1379240106-
Tower1406233851
HINLLB147018669-
Tower72516912
", + "image_path": "d98006c005474071287e277efe122673eb39e0c87bc1b6a8e8b92e50ace4d38d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 435, + 518, + 670 + ], + "blocks": [ + { + "bbox": [ + 104, + 349, + 504, + 374 + ], + "lines": [ + { + "bbox": [ + 104, + 349, + 504, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 349, + 504, + 374 + ], + "type": "text", + "content": "Table 8: GPT-4o Judge verdicts for human prompts translation. Takeaway: TowerInstruct generated more accurate translations than NLLB for supported languages." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 435, + 518, + 670 + ], + "lines": [ + { + "bbox": [ + 106, + 435, + 518, + 670 + ], + "spans": [ + { + "bbox": [ + 106, + 435, + 518, + 670 + ], + "type": "table", + "html": "
LanguageModelEntirely CorrectPartially CorrectBadInvalid Judge Verdict
ZHNLLB15311474241
Tower822729174-
ESNLLB858426441-
Tower583105785-
FRNLLB883741101-
Tower481116381-
DENLLB811790124-
Tower625102872-
KONLLB72192084-
Tower7079161011
ITNLLB809566350-
Tower5291103921
PTNLLB8846232162
Tower4891131105-
NLNLLB8287721241
Tower5931049821
RUNLLB906663156-
Tower512112390-
HINLLB128641128
Tower611718
", + "image_path": "8b8df921c7157f55eb7b46dc8c5f5a2e939ccc7fd21b159503d855dc7d135b5f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 677, + 504, + 702 + ], + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 702 + ], + "type": "text", + "content": "Table 9: GPT-40 Judge verdicts for model generation translation. Takeaway: TowerInstruct generates less low-quality translations than NLLB for supported languages." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 230, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 230, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 230, + 94 + ], + "type": "text", + "content": "B Human Validation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "content": "We use Prolific14 to collect annotations. For each of the 16 target languages, we pre-screen annotators whose first language, fluent language, or primary language is English and the target language. Additionally, we pre-screen annotators with an approval rate of " + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "inline_equation", + "content": "90 - 100\\%" + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "content": " and a submission count between 100 and 10,000. Annotators were compensated at the rate of " + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "inline_equation", + "content": "\\$12/" + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "content": "hr. Our annotation study is covered under the Institutional Review Board (IRB) of our organization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 181, + 506, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 181, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 181, + 506, + 240 + ], + "type": "text", + "content": "We collect 2,400 annotations across 16 languages and 50 data points per language, with each data point annotated by 3 annotators, and each annotator annotating 10 data points. We recruited 191 unique annotators15 via Prolific, spanning across 24 countries. They self-identified as 110 male and 81 female. In terms of ethnicity, they described themselves as 84 White, 79 Black, 12 Mixed, 10 Asian, and 5 Other." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 243, + 506, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 301 + ], + "type": "text", + "content": "Figures 6, 7, and 8 present the consent, annotation instructions, and framework questions. The human validation results for each language are shown in Table 10. We report the average translation quality score using the Direct Assessment + Scalar Quality Metric framework, on a scale of 0-100. Inter-annotator agreement is computed using Krippendorff's " + }, + { + "bbox": [ + 104, + 243, + 506, + 301 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 243, + 506, + 301 + ], + "type": "text", + "content": " for both source and target language safety labels." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 113, + 315, + 498, + 517 + ], + "blocks": [ + { + "bbox": [ + 113, + 315, + 498, + 517 + ], + "lines": [ + { + "bbox": [ + 113, + 315, + 498, + 517 + ], + "spans": [ + { + "bbox": [ + 113, + 315, + 498, + 517 + ], + "type": "table", + "html": "
LanguageAvg. Trans- lation ScoreSource Safety αTarget Safety αSource - Target α
Arabic80.990.410.400.96
Chinese78.550.430.420.91
Czech81.110.470.480.96
Dutch77.150.370.330.96
French82.120.480.471.0
German82.670.440.450.92
Hindi84.720.340.370.96
Italian83.210.380.370.91
Japanese76.390.390.360.76
Korean81.550.430.460.96
Polish80.330.390.400.96
Portuguese81.090.460.450.92
Russian80.440.420.430.96
Spanish84.110.450.441.0
Swedish79.660.360.351.0
Thai78.890.410.420.92
", + "image_path": "05148bca329f0174cfa52c7f72c148d82cffe6df95917784369a9249a0046357.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 524, + 504, + 570 + ], + "lines": [ + { + "bbox": [ + 104, + 524, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 524, + 504, + 570 + ], + "type": "text", + "content": "Table 10: Human validation results for translation quality and safety labels. Translation scores are on a 0-100 scale, using the DA+SQM framework. Inter-annotator agreement (Krippendorff's " + }, + { + "bbox": [ + 104, + 524, + 504, + 570 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 104, + 524, + 504, + 570 + ], + "type": "text", + "content": ") for source and target safety labels is reported, along with agreement between majority-voted source and target labels." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 602, + 293, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 602, + 293, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 293, + 618 + ], + "type": "text", + "content": "C POLYGUARD Training Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 634, + 506, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 680 + ], + "type": "text", + "content": "We train our models using OPENRLHF" + }, + { + "bbox": [ + 104, + 634, + 506, + 680 + ], + "type": "inline_equation", + "content": "^{16}" + }, + { + "bbox": [ + 104, + 634, + 506, + 680 + ], + "type": "text", + "content": " on 8 NVIDIA A6000 GPUs. We set LoRA rank to 8 and alpha to 16. We train our models with a total batch size of 128, for a sequence length of 8192, for 1 epoch using a learning rate of " + }, + { + "bbox": [ + 104, + 634, + 506, + 680 + ], + "type": "inline_equation", + "content": "2e - 4" + }, + { + "bbox": [ + 104, + 634, + 506, + 680 + ], + "type": "text", + "content": ". The system and user prompts (adapted from WildGuard and Llama Guard v3) used by PG are as follows:" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 113, + 696, + 237, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 696, + 237, + 709 + ], + "spans": [ + { + "bbox": [ + 113, + 696, + 237, + 709 + ], + "type": "inline_equation", + "content": "^{14}" + }, + { + "bbox": [ + 113, + 696, + 237, + 709 + ], + "type": "text", + "content": "https://www.prolific.com/" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 114, + 709, + 421, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 709, + 421, + 721 + ], + "spans": [ + { + "bbox": [ + 114, + 709, + 421, + 721 + ], + "type": "text", + "content": "15some participated in multiple languages, resulting in a lower unique count" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 114, + 720, + 331, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 720, + 331, + 731 + ], + "spans": [ + { + "bbox": [ + 114, + 720, + 331, + 731 + ], + "type": "text", + "content": "16https://github.com/OpenRLHF/OpenRLHF/tree/main" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 277, + 122, + 334, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 277, + 122, + 334, + 129 + ], + "spans": [ + { + "bbox": [ + 277, + 122, + 334, + 129 + ], + "type": "text", + "content": "Instruction and Consent" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 140, + 137, + 144 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 140, + 137, + 144 + ], + "spans": [ + { + "bbox": [ + 110, + 140, + 137, + 144 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 146, + 498, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 146, + 498, + 158 + ], + "spans": [ + { + "bbox": [ + 110, + 146, + 498, + 158 + ], + "type": "text", + "content": "Hi! We are a team of researchers who are passionate about making AI systems safer to use across multiple languages. We are trying to test out a few systems for automatically translating sentences across languages to see how well they preserve the original meaning and if they translate unsafe content as well. We appreciate your help in making AI systems safer across multiple cultures and languages!" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 164, + 142, + 169 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 164, + 142, + 169 + ], + "spans": [ + { + "bbox": [ + 110, + 164, + 142, + 169 + ], + "type": "text", + "content": "Task Overview:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 171, + 498, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 171, + 498, + 182 + ], + "spans": [ + { + "bbox": [ + 110, + 171, + 498, + 182 + ], + "type": "text", + "content": "In this study, we are interested in making AI systems safer to use in multiple languages. You will be presented with a sentence in English and its machine-generated translation in another language. Your task is to judge the quality of the translation, and if either the original sentence/translation contains any unsafe language." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 189, + 163, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 189, + 163, + 194 + ], + "spans": [ + { + "bbox": [ + 110, + 189, + 163, + 194 + ], + "type": "text", + "content": "Data collection & sharing:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 110, + 196, + 480, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 196, + 480, + 208 + ], + "spans": [ + { + "bbox": [ + 110, + 196, + 480, + 208 + ], + "type": "text", + "content": "We will not ask you for your name, and the data collected in this study will be made identifiable to the best of our extent. We will securely store the data on our servers and only share with qualified researchers (e.g., who want to further the study of hate speech detection). If you later decide that you do not want your responses included in this study, please email so we can exclude your work from the analysis." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 214, + 129, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 214, + 129, + 219 + ], + "spans": [ + { + "bbox": [ + 110, + 214, + 129, + 219 + ], + "type": "text", + "content": "Contact:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 110, + 220, + 495, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 220, + 495, + 240 + ], + "spans": [ + { + "bbox": [ + 110, + 220, + 495, + 240 + ], + "type": "text", + "content": "If you have any questions about this study, you should feel free to ask them by contacting us (via the MTurk interface or via email at: info@mturk.com). If you have questions later, desire additional information, or wish to withdraw your participation please contact e-mail in accordance with the contact information listed above. If you have questions pertaining to your rights as a research participant; or to report concerns to this study, you should contact" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 110, + 251, + 224, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 251, + 224, + 256 + ], + "spans": [ + { + "bbox": [ + 110, + 251, + 224, + 256 + ], + "type": "text", + "content": "Please do not use AI systems when answering the tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 110, + 258, + 465, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 258, + 465, + 263 + ], + "spans": [ + { + "bbox": [ + 110, + 258, + 465, + 263 + ], + "type": "text", + "content": "We will sample some of the responses and manually review them, if we find evidence to show that annotators are not giving proper efforts to this task, we will exclude them from future tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 115, + 270, + 350, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 270, + 350, + 278 + ], + "spans": [ + { + "bbox": [ + 115, + 270, + 350, + 278 + ], + "type": "text", + "content": "I have read and understood the information above, are 18 years or older, and agree to participate in our study." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 115, + 284, + 129, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 284, + 129, + 288 + ], + "spans": [ + { + "bbox": [ + 115, + 284, + 129, + 288 + ], + "type": "text", + "content": "Yes" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 292, + 129, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 292, + 129, + 297 + ], + "spans": [ + { + "bbox": [ + 116, + 292, + 129, + 297 + ], + "type": "text", + "content": "No" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 291, + 335, + 319, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 335, + 319, + 341 + ], + "spans": [ + { + "bbox": [ + 291, + 335, + 319, + 341 + ], + "type": "text", + "content": "Move forward" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 178, + 354, + 431, + 367 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 354, + 431, + 367 + ], + "spans": [ + { + "bbox": [ + 178, + 354, + 431, + 367 + ], + "type": "text", + "content": "Figure 6: Annotator Consent. Blacked out for anonymity." + } + ] + } + ], + "index": 16, + "type": "text" + }, + { + "bbox": [ + 115, + 449, + 165, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 449, + 165, + 456 + ], + "spans": [ + { + "bbox": [ + 115, + 449, + 165, + 456 + ], + "type": "text", + "content": "Annotation Task" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 115, + 465, + 493, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 465, + 493, + 479 + ], + "spans": [ + { + "bbox": [ + 115, + 465, + 493, + 479 + ], + "type": "text", + "content": "You will be presented with 2 sentences: an source original sentence in English, and its automatic translation in Thai. Your task is to assess the quality of the translation and if the source/translated sentences contain unsafe language." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 115, + 483, + 186, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 483, + 186, + 491 + ], + "spans": [ + { + "bbox": [ + 115, + 483, + 186, + 491 + ], + "type": "text", + "content": "Quality of Translation" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 115, + 494, + 233, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 494, + 233, + 499 + ], + "spans": [ + { + "bbox": [ + 115, + 494, + 233, + 499 + ], + "type": "text", + "content": "Evaluate the quality of the translation in Thai. Consider both:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 121, + 504, + 324, + 517 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 121, + 504, + 324, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 504, + 324, + 510 + ], + "spans": [ + { + "bbox": [ + 121, + 504, + 324, + 510 + ], + "type": "text", + "content": "- Meaning: Does the translation convey and reflect the same meaning as the source sentence in English?" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 121, + 510, + 233, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 510, + 233, + 517 + ], + "spans": [ + { + "bbox": [ + 121, + 510, + 233, + 517 + ], + "type": "text", + "content": "- Fluency: Is the translation easy to read and understand?" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 115, + 521, + 234, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 521, + 234, + 527 + ], + "spans": [ + { + "bbox": [ + 115, + 521, + 234, + 527 + ], + "type": "text", + "content": "Rate the translation quality on a continuous scale from 0 to 6." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 115, + 531, + 154, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 531, + 154, + 538 + ], + "spans": [ + { + "bbox": [ + 115, + 531, + 154, + 538 + ], + "type": "text", + "content": "Rating Scale:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 121, + 540, + 492, + 578 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 121, + 540, + 456, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 540, + 456, + 546 + ], + "spans": [ + { + "bbox": [ + 121, + 540, + 456, + 546 + ], + "type": "text", + "content": "0 - No Meaning Preserved: The translation is confusing, makes no sense, or is completely different from the English sentence. Most of the meaning and information is lost." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 121, + 547, + 490, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 547, + 490, + 559 + ], + "spans": [ + { + "bbox": [ + 121, + 547, + 490, + 559 + ], + "type": "text", + "content": "- 2 - Some Meaning Preserved: The translation captures only part of the meaning but misses key details. It may be hard to understand because of major mistakes like completely incorrect or irrelevant words or very poor grammar." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 121, + 560, + 492, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 560, + 492, + 571 + ], + "spans": [ + { + "bbox": [ + 121, + 560, + 492, + 571 + ], + "type": "text", + "content": "- 4 - Most Meaning Preserved: The translation retains most of the meaning of the English sentence -- the main idea is clear with just a few small mistakes, like minor grammar issues, awkward wording, or slight changes in meaning." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 121, + 572, + 404, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 572, + 404, + 578 + ], + "spans": [ + { + "bbox": [ + 121, + 572, + 404, + 578 + ], + "type": "text", + "content": "- 6 - Perfect Translation: The translation is clear, natural, and grammatically correct -- same meaning like the source sentence, with no mistakes." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 116, + 582, + 142, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 582, + 142, + 589 + ], + "spans": [ + { + "bbox": [ + 116, + 582, + 142, + 589 + ], + "type": "text", + "content": "Key Tip:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 121, + 591, + 288, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 591, + 288, + 597 + ], + "spans": [ + { + "bbox": [ + 121, + 591, + 288, + 597 + ], + "type": "text", + "content": "- You can move the slider to any point on the scale, not just whole numbers (e.g., 5.3)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 115, + 602, + 211, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 602, + 211, + 611 + ], + "spans": [ + { + "bbox": [ + 115, + 602, + 211, + 611 + ], + "type": "text", + "content": "Identifying Unsafe Language" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 115, + 613, + 331, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 613, + 331, + 619 + ], + "spans": [ + { + "bbox": [ + 115, + 613, + 331, + 619 + ], + "type": "text", + "content": "Please check if the original sentence in English or the translation in Thai contain any unsafe or harmful language." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 115, + 623, + 233, + 630 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 623, + 233, + 630 + ], + "spans": [ + { + "bbox": [ + 115, + 623, + 233, + 630 + ], + "type": "text", + "content": "What Counts as Unsafe or Harmful Language?" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 115, + 634, + 393, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 634, + 393, + 639 + ], + "spans": [ + { + "bbox": [ + 115, + 634, + 393, + 639 + ], + "type": "text", + "content": "A sentence is considered unsafe or harmful if it supports, encourages, or facilitates harmful, illegal, or unethical actions. This includes BOTH:" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 121, + 644, + 495, + 669 + ], + "type": "list", + "angle": 0, + "index": 39, + "blocks": [ + { + "bbox": [ + 121, + 644, + 481, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 644, + 481, + 656 + ], + "spans": [ + { + "bbox": [ + 121, + 644, + 481, + 656 + ], + "type": "text", + "content": "Content that is itself harmful: (e.g., promoting violence, supporting crimes, providing inappropriate sexual content, expressing hate speech towards Jews and Muslim communities, revealing confidential information or health records, encouraging misuse of technology)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 121, + 657, + 495, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 657, + 495, + 669 + ], + "spans": [ + { + "bbox": [ + 121, + 657, + 495, + 669 + ], + "type": "text", + "content": "Sentences that request harmful content: (e.g., making unauthorized requests to edit medical records and prescription, asking for instructions on illegal activities, ways to create or use dangerous weapons, promoting self-harm, ways to exploit children, how to spread false election information, even if the language seems neutral)." + } + ] + } + ], + "index": 38 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 233, + 681, + 377, + 693 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 681, + 377, + 693 + ], + "spans": [ + { + "bbox": [ + 233, + 681, + 377, + 693 + ], + "type": "text", + "content": "Figure 7: Annotator instructions" + } + ] + } + ], + "index": 40, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 316, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 316, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 316, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 41 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 80, + 504, + 296 + ], + "blocks": [ + { + "bbox": [ + 106, + 80, + 504, + 296 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 504, + 296 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 504, + 296 + ], + "type": "image", + "image_path": "8c1ec5dc3465a48fd1833e0f2501ef6f8b1d046c8f4ff78cbdf04101d4148ee2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 304, + 427, + 318 + ], + "lines": [ + { + "bbox": [ + 184, + 304, + 427, + 318 + ], + "spans": [ + { + "bbox": [ + 184, + 304, + 427, + 318 + ], + "type": "text", + "content": "Figure 8: Annotator Framework with Example in Thai" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 367, + 504, + 730 + ], + "blocks": [ + { + "bbox": [ + 106, + 367, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 106, + 367, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 504, + 730 + ], + "type": "image", + "image_path": "23369c7221938247f171a9f65b265388e08de765f08367b5bf2d80d1302b1041.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 316, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 316, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 316, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 83, + 181, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 83, + 181, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 181, + 95 + ], + "type": "text", + "content": "User Prompt" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 121, + 106, + 187, + 160 + ], + "blocks": [ + { + "bbox": [ + 121, + 106, + 187, + 160 + ], + "lines": [ + { + "bbox": [ + 121, + 106, + 187, + 160 + ], + "spans": [ + { + "bbox": [ + 121, + 106, + 187, + 160 + ], + "type": "text", + "content": "Human user: {prompt} AI assistant: {response}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 104, + 178, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 178, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 506, + 236 + ], + "type": "text", + "content": "We train our models " + }, + { + "bbox": [ + 104, + 178, + 506, + 236 + ], + "type": "inline_equation", + "content": "^{17}" + }, + { + "bbox": [ + 104, + 178, + 506, + 236 + ], + "type": "text", + "content": " to generate formatted responses containing the user input prompt safety label, policies violated, model response refusal, model response safety label, and policies violated (shown below). Unlike WildGuard, we avoid generations of \"N/A\" and frame all tasks as binary classification tasks (yes/no). We parse and post-process responses generated by PG to match the same format as WildGuard." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 122, + 245, + 233, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 245, + 233, + 256 + ], + "spans": [ + { + "bbox": [ + 122, + 245, + 233, + 256 + ], + "type": "text", + "content": "POLYGON Generation" + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 121, + 265, + 313, + 342 + ], + "blocks": [ + { + "bbox": [ + 121, + 265, + 313, + 342 + ], + "lines": [ + { + "bbox": [ + 121, + 265, + 313, + 342 + ], + "spans": [ + { + "bbox": [ + 121, + 265, + 313, + 342 + ], + "type": "text", + "content": "Harmful request: yes/no \nRequest safety violations: None / List of violated categories \nResponse refusal: yes/no \nHarmful response: yes/no \nResponse safety violations: None / List of violated categories" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 369, + 437, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 437, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 437, + 384 + ], + "type": "text", + "content": "D Out-of-Distribution Benchmarking Dataset Annotations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 396, + 506, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 506, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 506, + 498 + ], + "type": "text", + "content": "In this section, we list the formulation of ground-truth labels for the out-of-distribution benchmarks. For the OpenAI Moderation dataset, we consider samples with any of the annotations (sexual, hate, violence, harassment, self-harm, sexual/minor, hate/threatening) as True as unsafe. For RTP-LX, we consider samples with a Toxicity score above 1 unsafe. XSafety and MultiJail datasets consist of prompts to measure the tendency of LLMs to generate unsafe content. Thus, a few prompts in these datasets are innocuous but could trigger an LLM to generate harmful content. Therefore, we use GPT-4o to determine the safety label of the samples. Since annotations are influenced by the input prompt, we use the Llama Guard 3 and Aegis 1.0 prompts to create two sets of ground-truth labels." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 515, + 266, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 266, + 530 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 266, + 530 + ], + "type": "text", + "content": "E Patronus AI Safety Study" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 542, + 506, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 506, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 506, + 589 + ], + "type": "text", + "content": "Patronus AI benchmarked Llama Guard 3 on a small number of samples (500) from various English and multilingual toxicity and safety datasets illustrating its poor recall of unsafe data points (PatronusAI, 2024). Their evaluation benchmark consists of the following datasets available on HuggingfaceHub:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 600, + 303, + 700 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 129, + 600, + 269, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 600, + 269, + 612 + ], + "spans": [ + { + "bbox": [ + 129, + 600, + 269, + 612 + ], + "type": "text", + "content": "1. nicholasKluge/toxic-text-en" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 129, + 613, + 303, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 613, + 303, + 624 + ], + "spans": [ + { + "bbox": [ + 129, + 613, + 303, + 624 + ], + "type": "text", + "content": "2. Arsive/toxicity_classification_jigsaw" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 625, + 282, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 625, + 282, + 637 + ], + "spans": [ + { + "bbox": [ + 129, + 625, + 282, + 637 + ], + "type": "text", + "content": "3. ukr-detect/ukr-toxicity-dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 129, + 638, + 267, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 638, + 267, + 650 + ], + "spans": [ + { + "bbox": [ + 129, + 638, + 267, + 650 + ], + "type": "text", + "content": "4. tmu-nlp/thai_toxicity tweet" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 129, + 651, + 266, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 651, + 266, + 662 + ], + "spans": [ + { + "bbox": [ + 129, + 651, + 266, + 662 + ], + "type": "text", + "content": "5. nicholasKluge/toxic-text-pt" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 663, + 219, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 663, + 219, + 674 + ], + "spans": [ + { + "bbox": [ + 129, + 663, + 219, + 674 + ], + "type": "text", + "content": "6. lmsys/toxic-chat" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 129, + 675, + 272, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 675, + 272, + 688 + ], + "spans": [ + { + "bbox": [ + 129, + 675, + 272, + 688 + ], + "type": "text", + "content": "7. PKU-Alignment/BeaverTails" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 689, + 266, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 689, + 266, + 700 + ], + "spans": [ + { + "bbox": [ + 129, + 689, + 266, + 700 + ], + "type": "text", + "content": "8. OpenSafetyLab/Salad-Data" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "inline_equation", + "content": "^{17}" + }, + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "Qwen2.5-7B-Instruct and Ministrial-8B-Instruct-2410 are available for modifications under the Apache 2.0 license and Mistral Research License respectively." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 80, + 344, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 80, + 344, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 80, + 344, + 95 + ], + "type": "text", + "content": "F Influence of low-quality translated data" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 185 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 185 + ], + "type": "text", + "content": "We distill GPT-4o's knowledge of translation quality into a Qwen2.5 7B classifier to filter out samples with low translation quality. We use the same schema as our translation quality study (Appendix A) to filter for samples where the human prompt and model response are accurately translated. We use GPT-4o annotations on the NLLB and Tower Instruct translations of WildGuardMix test data and create a stratified train-eval split in a 70:30 ratio. Similar to PG, we train a Qwen2.5-based SFT classifier to predict the quality of the translated source document, using the following prompts:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 193, + 192, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 193, + 192, + 205 + ], + "spans": [ + { + "bbox": [ + 121, + 193, + 192, + 205 + ], + "type": "text", + "content": "System Prompt" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 121, + 216, + 308, + 281 + ], + "blocks": [ + { + "bbox": [ + 121, + 216, + 308, + 281 + ], + "lines": [ + { + "bbox": [ + 121, + 216, + 308, + 281 + ], + "spans": [ + { + "bbox": [ + 121, + 216, + 308, + 281 + ], + "type": "text", + "content": "You are a linguistic expert. Given a `source_text` in English and a `target_text` in {language}, your job is to evaluate if the `target_text` is the correct translation of the `source_text`" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 121, + 299, + 181, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 299, + 181, + 312 + ], + "spans": [ + { + "bbox": [ + 121, + 299, + 181, + 312 + ], + "type": "text", + "content": "User Prompt" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 121, + 321, + 239, + 344 + ], + "blocks": [ + { + "bbox": [ + 121, + 321, + 239, + 344 + ], + "lines": [ + { + "bbox": [ + 121, + 321, + 239, + 344 + ], + "spans": [ + { + "bbox": [ + 121, + 321, + 239, + 344 + ], + "type": "text", + "content": "`source_text': {source}\n`target_text': {target}" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 104, + 357, + 504, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 504, + 391 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 504, + 391 + ], + "type": "text", + "content": "The model is trained on 60,346 training samples and achieves an overall accuracy of " + }, + { + "bbox": [ + 104, + 357, + 504, + 391 + ], + "type": "inline_equation", + "content": "82\\%" + }, + { + "bbox": [ + 104, + 357, + 504, + 391 + ], + "type": "text", + "content": " on the validation set of 25,863 samples. A complete evaluation report is shown below in Table 11." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 178, + 399, + 433, + 448 + ], + "blocks": [ + { + "bbox": [ + 178, + 399, + 433, + 448 + ], + "lines": [ + { + "bbox": [ + 178, + 399, + 433, + 448 + ], + "spans": [ + { + "bbox": [ + 178, + 399, + 433, + 448 + ], + "type": "table", + "html": "
LabelPrecisionRecallF1Support
Bad7073712066
Partially Correct7663697704
Entirely Correct87939016093
", + "image_path": "c8c5b104c08252fb9df60205d9c2185c775a21889177f63de0f3b67525d663a0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 171, + 454, + 440, + 468 + ], + "lines": [ + { + "bbox": [ + 171, + 454, + 440, + 468 + ], + "spans": [ + { + "bbox": [ + 171, + 454, + 440, + 468 + ], + "type": "text", + "content": "Table 11: Translation Quality Classifier performance metrics" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 490, + 506, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 506, + 601 + ], + "type": "text", + "content": "Removal of low-quality training data does not necessarily improve model performance. Intuitively, the presence of poor-quality translated data should harm model performance. However, PG models show contrastive trends when low-quality samples are removed from the training data mix (Figure 9). The performance of Qwen2.5 degrades for most datasets, whereas the performance of Ministrial improves. The performance degradation in the case of Qwen2.5 can be attributed to noisy samples in safety and toxicity evaluation datasets. Harmful text is considered to belong to low-quality data; web-crawls implement word blocklist filters to enhance data quality (Dodge et al., 2021). Thus, we hypothesize that the noise induced by poor translations bridges the gap between training and evaluation data, thus leading to performance improvement." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 617, + 194, + 630 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 194, + 630 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 194, + 630 + ], + "type": "text", + "content": "G Limitations" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "We describe several limitations of our work. First, we automatically translate English data to other languages using LLMs. However, automatic translations can introduce deviations in toxicity and safety risks due to incorrect translations and hallucinations (Specia et al., 2021; Sharou & Specia, 2022; Team et al., 2022; Costa-jussa et al., 2023). Second, we employ existing safety classifiers and LLMs to automatically annotate safety violation categories, which may introduce biases from these models into our labeled safety categories. We utilize a panel of models to mitigate such biases, but acknowledge the inherent limitations of this methodology. Third, we follow Llama-Guard-3-8B (Dubey et al., 2024) and define" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 198, + 84, + 413, + 348 + ], + "blocks": [ + { + "bbox": [ + 198, + 84, + 413, + 348 + ], + "lines": [ + { + "bbox": [ + 198, + 84, + 413, + 348 + ], + "spans": [ + { + "bbox": [ + 198, + 84, + 413, + 348 + ], + "type": "image", + "image_path": "e8d291c17038f0e90e663c6a3add7960cc90021d61231e732d1932d341e6d655.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 359, + 506, + 384 + ], + "lines": [ + { + "bbox": [ + 104, + 359, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 506, + 384 + ], + "type": "text", + "content": "Figure 9: Performance difference on removing low-quality data. Takeaway: Removal of low-quality training data does not necessarily improve model performance." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 403, + 506, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 506, + 462 + ], + "type": "text", + "content": "our safety violation taxonomy according to the MLCommons Safety Taxonomy18. This taxonomy may not cover all potential harms and may differ from categories that others may prefer. Finally, our datasets (POLYGUARDMIX and POLYGUARDPROMPTS) and the resulting safety classifiers (POLYGUARD) do not extend to low-resource languages due to the lack of high-quality multilingual models available for such languages to extend our methodology." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 114, + 719, + 360, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 719, + 360, + 732 + ], + "spans": [ + { + "bbox": [ + 114, + 719, + 360, + 732 + ], + "type": "text", + "content": "18https://mlcommons.org/2024/04/mlc-aisafety-v0-5-poc/" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_content_list.json b/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..105e0332b5e1ffea5039e9ed8c6164167d076312 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_content_list.json @@ -0,0 +1,1696 @@ +[ + { + "type": "text", + "text": "Retro-Search: Exploring Untaken Paths for Deeper and Efficient Reasoning", + "text_level": 1, + "bbox": [ + 171, + 99, + 825, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ximing Lu†‡ Seungju Han†§ David Acuna† Hyunwoo Kim† Jaehun Jung† Shrimai Prabhumoye† Niklas Muennighoff§ Mostofa Patwary† Mohammad Shoeybi† Bryan Catanzaro† Yejin Choi†", + "bbox": [ + 179, + 164, + 816, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "†NVIDIA ‡University of Washington §Stanford University {ximingl, seungjuh, dacunamarrer, hyunwook, jaehunj, yejin}@nvidia.com", + "bbox": [ + 183, + 217, + 730, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 282, + 540, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large reasoning models, such as OpenAI o1 and DeepSeek-R1, demonstrate remarkable reasoning capabilities via long, elaborate reasoning trajectories. Numerous follow-up studies report that supervised fine-tuning on such reasoning traces, also known as distillation, can be a cost-effective way to boost reasoning capabilities of smaller student models. However, empirical observations reveal that these reasoning trajectories are often suboptimal, switching excessively between different lines of thought, resulting in underthinking, over-thinking, and even degenerate responses. In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search, for distilling higher quality reasoning paths from large reasoning models. Retro-Search retrospectively revises reasoning paths to discover better, yet shorter traces, which can then lead to student models with enhanced reasoning capabilities with shorter, thus faster inference. Our approach can enable two use cases: self-improvement, where models are fine-tuned on their own Retro-Search-ed thought traces, and weak-to-strong improvement, where a weaker model revises stronger model's thought traces via Retro-Search. For self-improving, R1-distill-7B, fine-tuned on its own Retro-Search-ed traces, reduces the average reasoning length by $31.2\\%$ while improving performance by $7.7\\%$ across seven math benchmarks. For weak-to-strong improvement, we retrospectively revise R1-671B's traces from the OpenThoughts dataset (Team, 2025) using R1-distill-32B as the Retro-Search-er, a model $20\\times$ smaller. Qwen2.5-32B, fine-tuned on 40k instances of this refined data, achieves performance comparable to R1-distill-32B, yielding an $11.3\\%$ reduction in reasoning length and a $2.4\\%$ performance improvement compared to fine-tuning on the original OpenThoughts data. More excitingly, R1-distill-7B and R1-distill-32B, fine-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference efficiency. Our work counters recently emergent viewpoints that question the relevance of search algorithms in the era of large reasoning models, by demonstrating that there are still opportunities for algorithmic advancements, even for frontier models.", + "bbox": [ + 228, + 316, + 769, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 786, + 318, + 801 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent state-of-the-art LLMs, such as OpenAI o1 and DeepSeek-R1, have demonstrated remarkable capabilities in solving complex reasoning problems by scaling test-time compute. Test-time scaling enables the model to produce extended reasoning trajectories—an inner monologue akin to an implicit internal search—where the model explores multiple potential solution paths and verifies itself (OpenAI, 2024; DeepSeek-AI et al., 2025; Qwen Team, 2025).", + "bbox": [ + 169, + 820, + 826, + 895 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.04383v2 [cs.AI] 15 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$\\clubsuit$ First co-authors.", + "bbox": [ + 189, + 907, + 320, + 922 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Question: Given a sequence $a_{n}$ where $a_{n} = -4$ when $n$ is odd, and $a_{n} = 7$ when $n$ is even, write a formula for the $n$ -th term.", + "bbox": [ + 236, + 99, + 758, + 112 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/383b1bfed10a1065b0d70625cf6c18d25e147f3277dd81df30b842b5278ffd08.jpg", + "image_caption": [ + "Figure 1: An example reasoning trace from Retro-Search in weak-to-strong revision. A reasoning trace consists of a series of thoughts segmented by transition keywords (e.g., \"alternatively\", \"wait\"), with each thought composed of a sequence of intermediate steps, delimited by '\\n\\nRetro-Search retrospectively revises reasoning trajectories - exploring promising thoughts that were prematurely abandoned to mitigate under-thinking while avoiding redundant thoughts once the correct answer is evident to reduce over-thinking.\\n\\n" + ], + "image_footnote": [], + "bbox": [ + 189, + 118, + 803, + 531 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Reinforcement learning (RL) has been shown to enable this behavior as training progresses, with key \"aha\" moments in the training dynamics where models begin to generate longer responses and spontaneously develop alternative strategies for problem-solving, verification, and self-correction. As a result, average response length tends to grow proportionally with performance (DeepSeek-AI et al., 2025; Zeng et al., 2025a; HuggingFace, 2025).", + "bbox": [ + 169, + 643, + 826, + 717 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "At the same time, contradictory signals have emerged around whether RL is strictly necessary to enable these behaviors. Cost-effective approaches suggest that access to long reasoning traces may be the key. In fact, recent work shows it is possible to replicate or sometimes even surpass o1 and R1 performance on challenging math benchmarks using long reasoning traces and supervised fine-tuning (Muennighoff et al., 2025; Team, 2025).", + "bbox": [ + 169, + 720, + 826, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This growing belief—that longer reasoning traces equals better reasoning—has shaped much of the recent progress in training and scaling strategies. However, is longer thinking always better? At the surface level, it may appear so. Long thought allows the model to explore alternative solutions paths, define subgoals, backtrack, verify and self-correct. These cognitive behaviors, akin to human problem-solving, have been indeed shown to be beneficial for reasoning models (Gandhi et al., 2025). Furthermore, it is intuitive that complex problems inherently require lengthier deliberations. However, several recent works have demonstrated that longer responses do not always yield better results. In fact, incorrect responses often involve longer reasoning traces marked by frequent switches between", + "bbox": [ + 169, + 797, + 826, + 926 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "different lines of thought where the model prematurely abandons promising directions—a tendency coined by Wang et al. (2025) as under-thinking. On the other hand, over-thinking occurs when the model inefficiently expends resources by engaging in excessive verification or redundant checks after arriving at a final answer, contributing minimally to accuracy improvements Chen et al. (2024).", + "bbox": [ + 174, + 102, + 823, + 172 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Then, is shorter necessarily better? The phenomena of under-thinking and over-thinking have motivated several ad-hoc heuristics that use response length as a proxy for downstream performance (Wang et al., 2025; Fu et al., 2024). For instance, a naive approach to boost a model's reasoning capability is supervised fine-tuning on the shortest reasoning trajectories distilled from large state-of-the-art models such as DeepSeek-R1 671B. However, blind shortening is inherently limited, as length alone may not reliably indicate thoughtfulness or reasoning quality. Short responses may overlook nuanced considerations or miss essential parts of the meta-thinking process (Xiang et al., 2025). Furthermore, employing simple length-based heuristics disregards the complexity and semantic coherence of generated content, potentially discarding useful reasoning sequences that are verbose yet insightful.", + "bbox": [ + 174, + 179, + 823, + 320 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our goal is to consolidate these disparate observations on the quality of reasoning trajectories. We ask—if overly long reasoning is not always beneficial, and blind shortening is suboptimal, how can we discourage under-thinking and over-thinking, and collect more efficient and effective solutions? We argue that search is an effective means of eliciting better reasoning-producing trajectories that are both efficient and insightful, yet shorter in length—and can be used to train stronger student models.", + "bbox": [ + 174, + 325, + 823, + 410 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search (MCTS) for distilling higher quality reasoning data from large reasoning models. Retro-Search retrospectively revises a given reasoning path by suppressing unnecessary thought switches to collect more efficient and effective alternatives. Figure 1 shows an example of Retro-Search refining a reasoning trace from DeepSeek-R1. It expands promising thoughts that were prematurely abandoned to mitigate under-thinking while pruning redundant thoughts once the correct answer becomes evident to reduce over-thinking, resulting in more effective yet shorter reasoning traces.", + "bbox": [ + 174, + 415, + 823, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Contrary to prior attempts where search struggled to improve reasoning effectively, we show that our method is highly effective in two key settings: (1) Self-improvement—Retro-Search can bootstrap self-improvement in reasoning models, by training a model on its own Retro-Search-ed trajectories. We demonstrate that this simple step, despite not relying on frontier model capabilities, yields significant performance gain (of up to $7.7\\%$ ) while reducing inference time by $31.2\\%$ . (2) Weak-to-strong revision—Retro-Search can revise even the reasoning traces generated by an expensive, frontier reasoning model with a substantially smaller, more efficient model, yet significantly improving the quality of dataset. For example, we revise reasoning traces generated by R1-671B using a $20\\times$ smaller model R1-distill-32B as the Retro-Search-er. Yet after training on this revised data, Qwen2.5-32B achieves performance comparable to R1-distill-32B, yielding an $11.3\\%$ reduction in reasoning length and a $2.4\\%$ performance improvement compared to fine-tuning on the original R1-671B's trajectories. And, more excitingly, R1-distill-7B and R1-distill-32B, fin-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference time efficiency.", + "bbox": [ + 174, + 534, + 825, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Method", + "text_level": 1, + "bbox": [ + 174, + 762, + 274, + 779 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We introduce Retro-Search, an MCTS-inspired algorithm that explores untaken steps for deeper and more efficient reasoning. Its goal is to revise and improve a given reasoning path by encouraging continuation instead of prematurely switching to a new thought, ultimately seeking to reach the correct answer more efficiently, i.e. with fewer steps.", + "bbox": [ + 174, + 794, + 823, + 852 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Preliminaries", + "text_level": 1, + "bbox": [ + 174, + 868, + 313, + 883 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Consider a reasoning model $\\mathcal{M}$ that, when given a question $q$ , generates both an intermediate reasoning trajectory $T$ and a final solution $a$ . Formally, given an input question $q \\in \\mathcal{Q}$ ,", + "bbox": [ + 174, + 893, + 823, + 924 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 949, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/44813566fea66d24ce0070477bbb66e7a3058ad7b42f3c5a6e772553f8cf2ef9.jpg", + "image_caption": [ + "Figure 2: An overview of our Retro-Search algorithm. The algorithm iterates through the thoughts and explores untaken paths from steps that come before a thought-switch, which is marked by transition keywords like \"wait\" or \"another approach.\" During the process, it performs multiple rollouts, suppressing these transition keywords in the immediate next step. If the search is successful, the existing trajectory is replaced with the new rollout, and the process continues through the updated trajectory." + ], + "image_footnote": [], + "bbox": [ + 173, + 99, + 823, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the model $\\mathcal{M}$ produces $(T, a) \\coloneqq \\mathcal{M}(q)$ , where $T \\in \\mathcal{T}$ denotes the chain of reasoning, or chain of \"thoughts\", and $a \\in \\mathcal{A}$ represents the final solution to $q$ .", + "bbox": [ + 169, + 527, + 823, + 560 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Each trajectory $T$ can be decomposed in to a set of thoughts, i.e., $T := \\{s^1, s^2, \\ldots, s^\\tau\\}$ . Each $s^\\tau$ denotes an individual thought, and each thought may perform distinctive role such as trying out a new solution strategy, reflecting its progress, back-tracking or verifying calculations, etc. In order to differentiate between independent thoughts, we attend to the fact that models often leverage transition keywords (e.g., \"alternatively\") to make a natural transition between thoughts, e.g. $s^\\tau \\rightarrow s^{\\tau+1}$ . We utilize these linguistic markers to segment and extract individual thoughts from the full reasoning trace.", + "bbox": [ + 169, + 565, + 826, + 667 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Each thought $s^{\\tau}$ itself is a sequence of intermediate steps $s_i^\\tau$ s—that is, $s^{\\tau} := \\{s_1^{\\tau}, s_2^{\\tau}, \\dots, s_k^{\\tau}\\}$ . These intermediate steps $s_k^\\tau$ represent atomic units of reasoning within a thought—such as sub-conclusions, calculations, or logical deductions. In practice, steps are delimited by '\\n\\n(double newline) characters in the model's output. We adapt the convention of using the superscript $\\tau$ to index the thought, and the subscript $k$ to index the step within that thought. For example, $s_k^\\tau$ refers to the $k$ -th step within the $\\tau$ -th thought.", + "bbox": [ + 169, + 672, + 828, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Utilizing the notations above, we represent a complete reasoning trajectory $T$ as:", + "bbox": [ + 171, + 766, + 756, + 782 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nT = \\left\\{\\left\\{s _ {1} ^ {1}, s _ {2} ^ {1}, \\dots , s _ {k _ {1}} ^ {1} \\right\\}, \\left\\{s _ {1} ^ {2}, s _ {2} ^ {2}, \\dots , s _ {k _ {2}} ^ {2} \\right\\}, \\dots , a \\right\\} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 333, + 787, + 825, + 821 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The under-thinking issue: too many shallow thoughts. Previous studies have shown that R1-like models exhibit an under-thinking issue in their reasoning process (Wang et al., 2025). These models frequently abandon promising reasoning paths prematurely, leading to inadequate depth of reasoning on challenging problems. This phenomenon (1) occurs more frequently on harder problems, (2) leads to frequent switching between different thoughts without reaching a conclusion in each, and (3) correlates with incorrect responses due to insufficient exploration of reasoning paths.", + "bbox": [ + 169, + 825, + 826, + 926 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The over-thinking issue: too many redundant thoughts. Conversely, R1-like models also suffer from an over-thinking issue (Sui et al., 2025; Chen et al., 2024), where they expend excessive compute on questions that are exceptionally simple or for which the answer is already evident. The model tends to generate unnecessary thoughts such as self-doubt and redundant verification, even when it produces the correct answer within its early steps.", + "bbox": [ + 169, + 103, + 823, + 175 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The seemingly contradictory issues of under-thinking and over-thinking share a common cause: unnecessarily initiating a new thought. In under-thinking, the model switches to a new thought without fully exploring a previously promising path. In over-thinking, despite the answer being evident, a new thought is started instead of directly generating the answer.", + "bbox": [ + 169, + 180, + 826, + 238 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2 Retro-Search", + "text_level": 1, + "bbox": [ + 171, + 253, + 308, + 266 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The goal of Retro-Search is to start from a tuple $(q,T)$ generated by $\\mathcal{M}$ , and search for an improved trajectory $T^{*}$ using a revision model $\\widehat{\\mathcal{M}}$ . Here, we focus only on revising $T$ that leads to the correct final answer (i.e., $a = a^{\\star}$ ). Intuitively, we consider $T^{*}$ to be better than $T$ if it leads to the same final answer $a$ with fewer reasoning steps—i.e., by avoiding both over-thinking and under-thinking. We specifically consider two settings of Retro-Search, depending on how we set the revision model—(1) Self-Retro-Search, where $\\widehat{\\mathcal{M}}$ is set to be the original model $\\mathcal{M}$ that produced $T$ , and (2) Weak-to-Strong-Retro-Search (W2S-Retro-Search), where $\\widehat{\\mathcal{M}}$ is a smaller, cost-efficient model than $\\mathcal{M}$ .", + "bbox": [ + 169, + 279, + 823, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Collecting alternative rollouts The core rationale behind Retro-Search is that there may exist an alternative trajectory for a given problem that is shorter than the original trajectory, yet still leads to a correct answer. To discover such a trajectory, we iteratively explore alternative rollouts to investigate what would happen if, instead of starting a new thought $s^{\\tau +1}$ after $s^\\tau$ (i.e., generate $s_1^{\\tau +1}$ ), we continued the current thought $s^\\tau$ . Concretely, for each thought $s^\\tau$ in $T$ (Eq. 1), we generate an alternative rollout using $\\widehat{\\mathcal{M}}$ as:", + "bbox": [ + 169, + 416, + 823, + 507 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\{s _ {k + 1} ^ {\\tau}, \\dots , a \\right\\} \\sim \\widehat {\\mathcal {M}} \\left(s ^ {1}, s ^ {2}, \\dots , \\left\\{s _ {1} ^ {\\tau}, s _ {2} ^ {\\tau}, \\dots , s _ {k} ^ {\\tau} \\right\\}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 512, + 825, + 537 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Importantly, when generating the immediate next step $s_{k+1}^{\\tau}$ , we constrain the model to stay within a single thought by preventing it from initiating a new one in the next step—by prohibiting the usage of thought-transition keywords (e.g., \"alternatively,\" \"wait\") during the decoding process. This encourages deeper exploration of the current thought rather than prematurely switching to different lines of thought. Subsequent steps after $s_{k+1}^{\\tau}$ are generated without constraints to allow free on-policy exploration.", + "bbox": [ + 169, + 541, + 823, + 630 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluating alternative rollouts To determine whether the alternative rollout $\\{s_{k + 1}^{\\tau},\\ldots ,a\\}$ is better than the existing path $\\{s_1^{\\tau +1},\\dots ,a\\}$ , we define a value function $V(s)$ over the $i$ -th step $s_i$ in the trajectory $\\{s_1,\\dots ,a\\}$ to compare $V(s_{k + 1}^{\\tau})$ with $V(s_{1}^{\\tau +1})$ :", + "bbox": [ + 169, + 643, + 823, + 696 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nV \\left(s _ {i}, a ^ {\\star}\\right) := \\gamma^ {N - i} R \\left(a \\left(s _ {i}\\right), a ^ {\\star}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 700, + 823, + 720 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $N$ represents the total number of steps in the trajectory $\\{s_1, \\ldots, a\\}$ . Here, we write $a(s_i) \\coloneqq \\{s_i, \\ldots, a\\}$ to explicitly emphasize that the value depends on the specific step $s_i$ and captures the autoregressive dependence of the generated answer $a$ on the continuation from step $s_i$ . The reward function $R(a, a^{\\star})$ is binary, indicating whether the generated answer $a$ matches the ground truth $a^{\\star}$ (i.e., using a verifiable reward). We apply a decay factor $\\gamma$ to discount future rewards, assigning higher value to paths that reach the correct answer in fewer steps. Concretely, we set to $\\gamma = 0.9$ in our experiments. In what follows, we drop the detailed notation and refer to the above simply as $V(s)$ for clarity.", + "bbox": [ + 169, + 724, + 823, + 837 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "If $V(s_{k+1}^{\\tau}) > V(s_1^{\\tau+1})$ , the rollout reaches the correct final answer in fewer steps, and we replace the existing path $\\{s_1^{\\tau+1}, \\ldots, a\\}$ with the rollout $\\{s_{k+1}^{\\tau}, \\ldots, a\\}$ . This could occur when exploring deeper along the current thought is more effective, thus reducing under-thinking. Alternatively, $s_{k+1}^{\\tau} = a$ indicates that the previous thought steps are already sufficient for the model to generate the correct solution directly, thereby reducing over-thinking.", + "bbox": [ + 169, + 844, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In contrast, if $V(s_{k+1}^{\\tau}) < V(s_1^{\\tau+1})$ , the existing path is better. The alternative path either reaches a wrong answer or reaches the correct answer with more steps. This suggests that switching to a new thought was effective and necessary, and thus the existing transition should be preserved. In practice, we sample multiple alternative rollouts (two in our experiments) and retain the best one—that is, the rollout with the highest value. We then proceed to examine the next thought in the updated reasoning trajectory. Please see Figure 2 for a concrete example, and Algorithm 1 in Appendix A for more details.", + "bbox": [ + 169, + 102, + 826, + 203 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Retro-Search with Partial Revisions We also propose a more computationally efficient variant of Retro-Search. Instead of iteratively applying the revision procedure starting from the first thought, this version randomly samples a position in the trajectory at which to begin the revision. This is particularly useful when revising with larger models—for instance, the R1-32B model in our setting—where full iterative revision would be prohibitively expensive.", + "bbox": [ + 169, + 210, + 826, + 284 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Experiments", + "text_level": 1, + "bbox": [ + 171, + 299, + 318, + 316 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Setup", + "text_level": 1, + "bbox": [ + 171, + 328, + 256, + 344 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data Generation We use 40K math questions from NuminaMath (LI et al., 2024). Specifically, we sample NuminaMath questions from OpenThoughts-114k $^{1}$ dataset, which is the training data of OpenThinker-7B and OpenThinker-32B models. We experiment with two settings: 1) Self-Retro-R1-7B, where we first generate responses using the R1-distilled Qwen2.5-7B model and then revise them with the same model as the Retro-Search-er. 2) W2S-Retro-R1-32B, where we take responses from the DeepSeek-R1 671B model in the OpenThoughts dataset and revise them using a weaker model, R1-distilled Qwen2.5-32B. More details are in Appendix B.", + "bbox": [ + 169, + 349, + 826, + 465 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Model Training We trained four models using data generated by Retro-Search: Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B with supervised fine-tuning. All models are fine-tuned for five epochs with learning rate of 1e-5, and sequence length of 16K. More details are in Appendix C.", + "bbox": [ + 169, + 473, + 826, + 531 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines We compare our trained models with a total of eleven open-weight models across two model size categories — six 7B models and five 32B models. These include instruction-tuned models such as Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, Qwen2.5-Math-7B-Inst (Yang et al., 2024b) and Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025), OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). More baseline details are in Appendix D.", + "bbox": [ + 169, + 539, + 826, + 652 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Benchmarks and Metrics We evaluate models on seven math-specific benchmarks: AIME25, AIME24, AMC23, GaoKao23English (Zhong et al., 2023), OlympiadBench (He et al., 2024), GSM8K (Cobbe et al., 2021), and MATH500 (Lightman et al., 2023). The first five benchmarks focus on olympiad-level math problems, where AIME25 and AIME24 each contain 30 problems and AMC23 contains 40 problems. GSM8K includes grade school math problems, and MATH500 includes high-school math competition problems.", + "bbox": [ + 169, + 659, + 826, + 744 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For evaluation, we report two metrics: accuracy to measure the performance, and average response length to measure computational efficiency during inference. For accuracy, we use exact match between the model's prediction and the reference answer, with Qwen's official implementation for answer verification. For response length, we tokenize the responses using the Qwen2.5-7B-Instruct tokenizer and compute the number of output tokens.", + "bbox": [ + 169, + 750, + 823, + 824 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics are computed individually for each benchmark and then averaged using macro averaging to produce the final scores. Since there is no universally optimal decoding strategy", + "bbox": [ + 169, + 828, + 823, + 859 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "1https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k", + "bbox": [ + 192, + 869, + 666, + 883 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "2https://github.com/QwenLM/Qwen2.5-Math/tree/main.", + "bbox": [ + 194, + 883, + 589, + 897 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "Note that evaluation results can significantly vary depending on the specifics of the answer verification, so we recommend to use the same implementation for reproduction.", + "bbox": [ + 173, + 897, + 823, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d14aac5efd6cebeeba18a99eb68347d40ffd47dd815884a28ec2388fed42c964.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
Baselines (7B)
Qwen2.5-Math-7B41.1118239.01225
Qwen2.5-Math-7B-Inst53.198252.7985
OpenR1-Qwen-7B67.6946371.77740
OpenThinker-7B53.81447759.19835
Qwen2.5-7B-Inst48.798547.91033
+ R1-7B49.71436555.48959
+ Self-Retro-R1-7B51.7 (+4.1%)11050 (-23.1%)55.8 (+0.7%)8263 (-7.8%)
+ R1-671B51.51430258.49824
+ W2S-Retro-R1-32B55.3 (+7.3%)13569 (-5.1%)57.8 (-1.1%)8940 (-9.0%)
R1-distill-Qwen2.5-7B64.51060071.06831
+ R1-671B68.4941871.77172
+ W2S-Retro-R1-32B70.8 (+3.5%)8800 (-6.6%)73.1 (+2.0%)6535 (-8.9%)
Baselines (32B)
OpenThinker-32B73.0800175.96840
QwQ-32B-Preview70.9516468.35163
Sky-T1-32B-Preview62.0236762.92018
Qwen2.5-32B-Inst56.197555.9761
+ R1-671B76.2707475.66676
+ W2S-Retro-R1-32B74.6 (-2.2%)6809 (-3.7%)77.5 (+2.4%)5923 (-11.3%)
R1-distill Qwen2.5-32B73.1856677.76173
+ R1-671B (12K)80.4647079.86164
+ W2S-Retro-R1-32B (12K)79.9 (-0.6%)6091 (-5.9%)81.0 (+1.5%)5301 (-14.0%)
", + "bbox": [ + 222, + 99, + 777, + 459 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/bb9a243fb822203f65188b845613855c1fc2f1191d2253776422a4b9f3dabfa0.jpg", + "table_caption": [ + "Table 1: Retro-Search provides better training data. Model evaluation results averaged across seven math benchmarks (AIME25, AIME24, AMC23, GaoKao23English, Olympiad-Bench, GSM8K, and MATH500). We report results from two setups: greedy decoding $(\\mathrm{T} = 0)$ and temperature sampling $(\\mathrm{T} = 0.6$ with top-p $= 0.95)$ . $+X$ indicates that the model is fine-tuned with data X. Only when fine-tuning R1-distill Qwen2.5-32B, we used 12K instances, as using more data did not improve results. The results indicate that: (1) models trained with Retro-Search data are more computationally efficient during inference while generally showing better performance; and (2) weak-to-strong Retro-Search enables new SOTA at 7B and 32B scales." + ], + "table_footnote": [], + "table_body": "
Qwen2.5-7B-InstGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
+ R1-7B49.71436555.48959
+ R1-7B-Shortest50.31234054.68009
+ Self-Retro-R1-7B51.71105055.88263
", + "bbox": [ + 222, + 606, + 771, + 683 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Simply selecting the shortest path for training is suboptimal for model accuracy. We fine-tuned Qwen2.5-7B-Inst with different training data and compare results. We sample eight responses using R1-distilled Qwen2.5-7B and choose the shortest response.", + "bbox": [ + 169, + 691, + 826, + 737 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "that works well across all models, we report results under two commonly used decoding setups: greedy decoding $(\\mathrm{T} = 0)$ , following Muennighoff et al. (2025), and temperature sampling $(\\mathrm{T} = 0.6$ with top-p $= 0.95)$ , following DeepSeek-AI et al. (2025). We took an average of results from five different seeds for the temperature sampling setup. In Appendix E, we share the full results including the confidence interval of the results.", + "bbox": [ + 169, + 763, + 823, + 835 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2 Evaluation Results", + "text_level": 1, + "bbox": [ + 171, + 854, + 354, + 868 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Self Retro-Search teaches stronger and more efficient student models than vanilla data generation. We compare fine-tuning the student model, Qwen2.5-7B-Instruct, using data from our Self-Retro-R1-7B against fine-tuning with data sampled from the R1-distilled", + "bbox": [ + 169, + 881, + 823, + 926 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/c7b5b308bb1254d7abf9e352c8ba5f851b2bd1d6710ccabfeac76cb9a6c47b22.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
R1-distill Qwen2.5-7B64.51060071.06831
+ Self-Retro-R1-7B69.5 (+7.7%)7295 (-31.2%)70.6 (-0.6%)5406 (-20.9%)
", + "bbox": [ + 222, + 99, + 772, + 162 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Retro-Search allows self-improvement of the models. Fine-tuning the R1-distilled Qwen2.5-7B model with self-revision data (Self-Retro-R1-7B) significantly improves efficiency, while maintaining or even improving accuracy.", + "bbox": [ + 169, + 172, + 826, + 217 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qwen2.5-7B model before revision, referred to as $R1 - 7B$ in Table 1. Compared to models trained on $R1 - 7B$ , the model trained on Self-Retro- $R1 - 7B$ produces responses that are $23.1\\%$ shorter while improving accuracy by $+4.1\\%$ under greedy decoding.", + "bbox": [ + 169, + 242, + 826, + 287 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We further compare Retro-Search against another baseline, R1-7B-Shortest, which selects the shortest response for model training after sampling eight responses per questions using R1-distilled Qwen2.5-7B. As shown in Table 2, although training with the shortest response can enhance efficiency when compared to R1-7B, it does not improve the model performance as much as our Retro-Search, clearly demonstrating the effectiveness of our Retro-Search.", + "bbox": [ + 169, + 291, + 823, + 363 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Weak-to-Strong Retro-Search enables new SOTA reasoning models at 7B and 32B scales, excelling in both performance and efficiency. While Self-Retro has proven effective, using a large model such as DeepSeek-R1-671B for both generation and revision is computationally implausible. We evaluate the effectiveness of weak-to-strong revision, where DeepSeek-R1-671B's generations are Retro-Search-ed by R1-distilled Qwen2.5-32B, denoted as W2S-Retro-R1-32B. We fine-tune student models on this data and compare them to those fine-tuned on unrevised data from DeepSeek-R1-671B, referred to as R1-671B in Table 1.", + "bbox": [ + 169, + 372, + 826, + 470 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "W2S-Retro-R1-32B proves to be effective, enabling new SOTA reasoning models at 7B and 32B scales. We fine-tuned four models — Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B — and consistently observed reduced response lengths and improved performance across different setups compared to models fine-tuned on R1-671B. Surprisingly, R1-distilled Qwen2.5-7B and R1-distilled Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieve new SOTA reasoning performance in the sampling setting at the 7B and 32B scales, while yielding the highest inference time efficiency. In addition, Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieves performance comparable to R1-distill-32B, yielding an $11.3\\%$ reduction in reasoning length and a $2.4\\%$ performance improvement compared to fine-tuning on the R1-671B data. Notably, it also outperforms OpenThinker-32B in accuracy while being more efficient (13.4%–14.9% shorter response). This is particularly significant given that OpenThinker-32B is trained on around 2.5 times more data than our W2S-Retro-R1-32B and use DeepSeek-R1 671B for response generation.", + "bbox": [ + 169, + 476, + 826, + 659 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Retro-Search enables self-improvement of R1-distilled models. We fine-tune the R1-distilled Qwen2.5-7B model with our Self-Retro-R1-7B. Results in Table 3 show significant accuracy improvement $(+7.7\\%)$ and response length reduction $(31.2\\%)$ for greedy decoding, compared to R1-distill Qwen2.5-7B. There is a small performance reduction for temperature sampling $(-0.6\\%)$ , but the length reduction is substantial $(20.9\\%)$ . As Self-Retro-R1-7B uses R1-distilled Qwen2.5-7B model for response generation, revision, and fine-tuning the model itself, this shows the self-improvement capabilities enabled by Retro-Search.", + "bbox": [ + 169, + 667, + 826, + 767 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 Analyses", + "text_level": 1, + "bbox": [ + 171, + 785, + 282, + 800 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We quantitatively analyze the reasoning trajectories in the synthesized training data using our Retro-Search, as well as those generated by the fine-tuned student model Qwen2.5-7B. Table 4 reports the average number of transition keywords, number of steps per thought, and the relative location where the solution first appears in the trajectory (with values closer to 1 indicating that the solution is nearer the end). The synthesized reasoning traces from Retro-Search contain significantly fewer transition keywords than those from R1-7B and R1-671B. As a result, thoughts from Retro-Search include more steps than those from R1-7B and 671B, indicating deeper thoughts. Additionally, the solution tends to appear later in", + "bbox": [ + 169, + 811, + 826, + 926 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/cbc12952f330df65db4917167473e75d8a0485d4eea129128ca2cadc83a643ca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Synthesized Training DataStudent Model's Reasoning Trace
#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)
R1-7B85.93.70.67229.24.70.59
Self-Retro-R1-7B32.75.30.73183.25.40.64
R1-671B35.33.80.5980.03.00.44
W2S-Retro-R1-32B10.44.90.6070.13.20.48
", + "bbox": [ + 173, + 99, + 823, + 186 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 4: The average number of transition keywords, the number of steps per thought, and the relative location of the first appearance of the solution in the reasoning trajectory are taken from both the training data and the fine-tuned student model, Qwen2.5-7B.", + "bbox": [ + 169, + 200, + 823, + 244 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "the trajectory, suggesting that our approach shows less redundant thoughts after the final solution is derived. These trends are also consistent in the reasoning outputs from the student model, showing that Retro-Search reduces both under-thinking and over-thinking.", + "bbox": [ + 169, + 268, + 823, + 313 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Related Works", + "text_level": 1, + "bbox": [ + 171, + 329, + 336, + 345 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Test-time compute has emerged as a new axis of scaling for LLM reasoning. While prior research in this direction have focused on parallel scaling—repeated sampling of trajectories followed by aggregation (Brown et al., 2024; Snell et al., 2024; Wu et al., 2025a), recent efforts have focused on sequential scaling—where models are trained to back-track, evaluate, and revise its thought by generating a long, monolithic CoT. Representative models such as O1 and R1 (OpenAI, 2024; DeepSeek-AI et al., 2025) are trained via large-scale reinforcement learning, demonstrating that models can learn to generate long CoTs without relying on bespoke reward models (Lightman et al., 2023; Zhang et al., 2025b), or tree search (Feng et al., 2024; Zhang et al., 2024). Subsequent projects in open-source community aim to replicate these reasoning models (HuggingFace, 2025; Qin et al., 2024). These works often utilize frontier reasoning models to generate synthetic long thought traces, and showing surprising gain in reasoning capabilities via simple supervised fine-tuning (HuggingFace, 2025; NovaSky, 2025; Muennighoff et al., 2025). Our work builds upon these prior efforts, focusing on (1) better-quality reasoning paths by targeted revision of verbose sub-traces, and (2) demonstrating self-improvement beyond typical strong-to-weak distillation, where smaller models can self-improve in both performance and efficiency.", + "bbox": [ + 169, + 357, + 826, + 580 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Meanwhile, concurrent works reveal limitations of reasoning models in their in-efficiency of test-time scaling. Longer generation does not necessarily correlate with better accuracy (Zeng et al., 2025b), and in practice, shorter trajectories are more likely to be correct. Models tend to overthink (Cuadron et al., 2025; Sui et al., 2025; Chen et al., 2024), i.e., they generate unnecessarily long trajectories that do not contribute to the performance. Models also exhibit underthinking (Wang et al., 2025)—while they appear to explore diverse plausible paths, models often switch between paths without sufficient exploration on one path. Wu et al. (2025b) suggests the source of inefficiency may lie in the regularities of the training data we use, and theoretically show that training on CoTs that are longer than the optimal length for the model can hurt its performance. Several measures have been proposed to mitigate these findings, such as auxiliary learnable parameters (Bao et al., 2025; Zhang et al., 2025a), calibration (Huang et al., 2025), and decoding-time algorithm (Xu et al., 2025; Misaki et al., 2025). Retro-Search aligns with these prior efforts, and importantly revisits the value of search algorithm in improving both the efficiency and performance of test-time scaling.", + "bbox": [ + 169, + 585, + 826, + 782 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusions", + "text_level": 1, + "bbox": [ + 171, + 797, + 316, + 813 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we introduced Retro-Search, a novel algorithm for synthesizing reasoning data designed to equip reasoning models with efficient (shorter average response length) and effective (higher accuracy) test-time scaling. Inspired by the MCTS algorithm, Retro-Search retrospectively revises reasoning trajectories—eliminating unnecessary thought switches (under-thinking) and trimming redundant steps after the correct answer becomes evident (over-thinking). Quantitatively, we show that Retro-Search is highly effective for self-improvement and weak-to-strong revision. Specifically, R1-distill-7B, fine-tuned on its own", + "bbox": [ + 169, + 825, + 826, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 958 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Retro-Search-ed traces, reduces the average reasoning length by $31.2\\%$ while improving performance by $7.7\\%$ across seven math benchmarks. Notably, R1-distill-7B and R1-distill-32B, fine-tuned on weak-to-strong Retro-Search-ed reasoning traces from R1-671B, set new state-of-the-art performance at the 7B and 32B scales while yielding the highest reasoning efficiency. We hope our work reinvigorates interest in the power of search-based methods for synthetic data in reasoning models—a direction that has recently fallen out of favor, yet holds significant untapped potential.", + "bbox": [ + 174, + 102, + 823, + 200 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 174, + 102, + 272, + 116 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh-Tung. Learning to stop overthinking at test time, 2025. URL https://arxiv.org/abs/2502.10954.", + "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787.", + "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. ArXiv, abs/2502.08235, 2025. URL https://api-semanticscholar.org/CorpusID:276287600.", + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jia Shi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiying Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunf an Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren,Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan ZhangZhenwen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948.", + "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training, 2024. URL https://arxiv.org/abs/2309.17179.", + "Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993, 2024." + ], + "bbox": [ + 174, + 125, + 825, + 922 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 948, + 504, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307.", + "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024.", + "Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration, 2025. URL https://arxiv.org/abs/2503.00031.", + "HuggingFace. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1.", + "Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024.", + "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050.", + "Kou Misaki, Yuichi Inoue, Yuki Imajuku, So Kuroki, Taishi Nakamura, and Takuya Akiba. Wider or deeper? scaling llm inference-time compute with adaptive branching tree search, 2025. URL https://arxiv.org/abs/2503.04412.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393.", + "NovaSky. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025.", + "OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720.", + "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982.", + "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314.", + "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen, Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419.", + "OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025.", + "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.", + "Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for LLM problem-solving. In The Thirteenth International Conference on Learning Representations, 2025a. URL https://openreview.net/forum?id=VNckp7JEHn.", + "Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms, 2025b. URL https://arxiv.org/abs/2502.07266.", + "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, nathan lile, Dakota Mahan, Louis Castricato, Jan-Philipp Franken, Nick Haber, and Chelsea Finn. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-thought. ArXiv, abs/2501.04682, 2025. URL https://api-semanticscholar.org/CorpusID:275357763.", + "Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less, 2025. URL https://arxiv.org/abs/2502.18600.", + "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a.", + "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b.", + "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025a. Notion Blog.", + "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities?, 2025b. URL https://arxiv.org/abs/2502.12215.", + "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024.", + "Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression, 2025a. URL https://arxiv.org/abs/2502.15589.", + "Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning, 2025b. URL https://arxiv.org/abs/2501.07301.", + "Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023." + ], + "bbox": [ + 171, + 102, + 826, + 741 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Appendices", + "text_level": 1, + "bbox": [ + 171, + 99, + 359, + 128 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A Retro-Search Algorithm 15", + "B Data Generation Details 15", + "C Training Details 15", + "DBaselines Details 15", + "E Per-dataset Evaluation Results 17" + ], + "bbox": [ + 173, + 162, + 825, + 314 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A Retro-Search Algorithm", + "text_level": 1, + "bbox": [ + 171, + 101, + 424, + 119 + ], + "page_idx": 14 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Retro-Search" + ], + "code_body": "Require: Question $q$ , initial reasoning trajectory $T = \\{\\{s_1^1,s_2^1,\\dots ,s_{k_1}^1\\} ,\\{s_1^2,s_2^2,\\dots ,s_{k_2}^2\\} ,\\dots ,a\\}$ , revision model $\\widehat{\\mathcal{M}}$ discount factor $\\gamma$ , ground truth answer $a^\\star$ , and reward function $R(\\cdot ,\\cdot)$ . \nEnsure: Revised trajectory $\\tilde{T}$ that yields answer $a^{*}$ with fewer steps. \n1: Initialize $\\tilde{T}\\gets T$ \n2: Initialize $s^{\\tau}\\gets s^{1}$ from $\\tilde{T}$ \n3: while $s^{\\tau}$ is not the last thought in $\\tilde{T}$ do \n4: $\\{s_{k + 1}^{\\tau},\\ldots ,a\\} \\sim \\widehat{\\mathcal{M}}\\left(s^{1},\\ldots ,\\{s_{1}^{\\tau},s_{2}^{\\tau},\\ldots ,s_{k}^{\\tau}\\}\\right)$ Rollout: transition keywords prohibited in $s_{k + 1}^{\\tau}$ \n5: $V(s_{k + 1}^{\\tau},a^{\\star})\\gets \\gamma^{N - i}R(a(s_{k + 1}^{\\tau}),a^{\\star})$ Compute value of the new step $s_{k + 1}^{\\tau}$ (i.e., $i$ -th step) \n6: if $V(s_{k + 1}^{\\tau}) > V(s_{1}^{\\tau +1})$ then If the value of the new step is higher than the existing one \n7: $\\tilde{T}\\gets \\left\\{s^{1},s^{2},\\dots ,\\{s_{1}^{\\tau},s_{2}^{\\tau},\\dots ,s_{k}^{\\tau}\\} \\{s_{k + 1}^{\\tau},\\dots ,a\\} \\right\\} \\triangleright$ Update the trajectory with the new rollout \n8: $s^{\\tau}\\gets$ the next thought in $\\tilde{T}$ \n9: Return $\\tilde{T}$", + "bbox": [ + 171, + 162, + 828, + 356 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Data Generation Details", + "text_level": 1, + "bbox": [ + 171, + 383, + 426, + 398 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "When constructing Self-Retro-R1-7B, we use the default version of Retro-Search, whereas for W2S-Retro-R1-32B, we use Retro-Search with partial revision. When constructing Self-Retro-R1-7B, we generate responses from R1-distill Qwen2.5-7B and filter for those with correct solutions as the base data for Retro-Search to revise. For W2S-Retro-R1-32B, we directly use OpenThought data as the base, since it contains only correct responses from the DeepSeek-R1 671B model.", + "bbox": [ + 169, + 414, + 826, + 500 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The transition keywords we use to segment thoughts within a reasoning trace are: 'But', 'Wait', 'Alternatively', 'However', 'Hmm', 'Hmmm', 'Not sure', 'Going back', 'Backtrack', 'Trace back', and 'Another'.", + "bbox": [ + 169, + 505, + 826, + 549 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For data generation during Retro-Search, we use top-p sampling with $p = 0.98$ and temperature $T = 1.0$ . We also tried using temperature $T = 0.6$ and found that data generated with a higher temperature tends to produce a better student model, likely due to the increased diversity in the training data induced by higher-temperature sampling. We set the maximum generation length to be 16384.", + "bbox": [ + 169, + 554, + 826, + 626 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "C Training Details", + "text_level": 1, + "bbox": [ + 171, + 643, + 356, + 662 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We perform supervised fine-tuning of models using HuggingFace TRL (von Werra et al., 2020). For all fine-tuning experiments, we used batch size of 128, five training epochs, and cosine learning rate scheduler with warmup rate of 0.05. We used Adam optimizer with weight decay of 1e-4, with beta1=0.9 and beta2=0.95. We did not conduct hyperparameter search, so there is a potential of finding better hyperparameters. With 32 H100 GPUs, fine-tuning 7B model with 40K data took around 90 minutes, and fine-tuning 32B model took 10 hours to finish.", + "bbox": [ + 169, + 676, + 826, + 776 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D Baselines Details", + "text_level": 1, + "bbox": [ + 171, + 794, + 366, + 810 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For 7B models, we evaluate six open-weight models as baselines: instruction-tuned models including Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, and Qwen2.5-Math-7B-Inst (Yang et al., 2024b), as well as reasoning models including OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), and R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025). These reasoning models are fine-tuned using responses from DeepSeek-R1 671B (DeepSeek-AI et al., 2025). Specifically, the OpenR1-Qwen-7B model is trained on 220K math examples, with questions sourced from NuminaMath, while OpenThinker-7B", + "bbox": [ + 169, + 825, + 826, + 926 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 346, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/b6d9d97c07316a940460ececeb0a6ecb9c18d34757fe240d3c3dd28e05d0bb75.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
06'6L0F'60S'606'8L05'9900'0010E'8902'9S(2L)1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E -1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E 1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E+1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E=1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E--1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E---1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E—1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E ----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E
", + "bbox": [ + 344, + 103, + 648, + 662 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 5: Per-dataset evaluation results (accuracies) using greedy decoding.", + "bbox": [ + 230, + 675, + 766, + 690 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "is trained on the OpenThoughts-114K dataset, which includes math, science, and coding problems.", + "bbox": [ + 173, + 777, + 823, + 806 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For 32B models, we evaluate five open-weight models: instruction-tuned Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). Both OpenThinker-32B and R1-distill Qwen2.5-32B are fine-tuned using responses generated by DeepSeek-R1 671B, with OpenThinker-32B utilizing the OpenThoughts-114K dataset. Sky-T1-32B-Preview is trained on a 17K dataset consisting of math and coding problems, with responses generated using QwQ-32B-Preview. The training details of the other models are not publicly disclosed.", + "bbox": [ + 173, + 811, + 825, + 924 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 949, + 506, + 959 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/84a2d240fa298c6ead41330d5f423055ce725c87cb6fae8cf1f688fc5cd0129a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
1609€10€€2€9€1€€2€2€81€6€88€11€52€11€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-
02€9€289€€31€199€€218€082€862€0€29€1€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
608981€06691€087€51€958€2€095€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
€204€68€601€€2€€€100€€86€€27€1€11€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
0088€100€$991€02€6€€111€6898€969€1€5299€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€ -
81€6€26€€991€€298€€521€6628€5289€0718€1€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2 €
96€Z698€15€€10€€598€8895€7189€9969€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
£268€208€9€956€€2101€5692€1888€11400€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
699E1€2898€02081€€8091€811€11866€1828€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€ 1
003€1021€8€1297€2€7691€05291€6688€20749€2€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2
05011020€7€397€3€7891€19801€5211288812€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ -
003E187€350€500S8781€5271F0761F8662F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ 1
9981029905€9981€€561F66991F0029F7269F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
1008826€72E1986€7296F8108F1699F7299F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
9958982€87F957F7801F8802F0025F9661F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
97E2001178F972F679F795F028F978FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
9919187F97F987F892F969F1086F1160FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
52606F87F18S1601F869F928F998FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
£24E1108E86E1708111E602E1889F2719F2M1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
00901678F09F572S870E16198024818799FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
899F6933F99F729S8791F57201F882F89691FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.6
78602971E13E6001F5711F98F1F5051FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.441
781163669S72015711F0001F92F1F9751FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
586810185Z979F1691F7291F506F718FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4
", + "bbox": [ + 344, + 103, + 648, + 664 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 6: Per-dataset evaluation results (response token length) using greedy decoding.", + "bbox": [ + 187, + 676, + 808, + 691 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "E Per-dataset Evaluation Results", + "text_level": 1, + "bbox": [ + 173, + 715, + 477, + 731 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In Tables 5 and 6, we share the per-dataset evaluation results using greedy decoding, and in Tables 7 and 8, we share results using temperature sampling with top-p=0.95 and T=0.6. We use the max response length of 32,768 tokens for all experiments. For temperature sampling, we use random five seeds and aggregate the results, and we further report the confidence interval to share the deviation of the metrics.", + "bbox": [ + 173, + 747, + 823, + 816 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/6c455391b2d122928abedb18a5c13288000920131de32b0aab66a1aed24b147c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
20'1+30'1852'0+0'1601'0+19'1662'0+91'1854'0+05'9972'1+05'2605'2+20'0405'2+20'09(92)18
16'0+18'682'0+26'3616'0+00'3852'0+10'2816'0+16'2968'0+00'1620'2+99'1464'2+16'29(92)18
16'0+57'2216'0+80'3661'0+09'2652'0+05'3872'0+05'2985'1+05'2616'1+16'2911'2+00'05(92)18
52'1+19'212'0+80'3612'0+14'3882'0+12'1812'0+05'1605'2+86'6612'2+00'0511'2+00'05(92)18
68'0+60'2212'0+95'1652'0+95'6852'0+95'1882'0+95'0985'1+05'0622'2+00'8522'2+00'05(92)18
11'1+89'1212'0+96'1612'0+00'2852'0+91'0812'0+06'0905'2+00'2661'2+20'9526'2+20'14(92)18
12'1+19'052'0+09'0612'0+92'1858'0+12'1482'0+05'6905'2+05'6885'1+16'1994'1+00'44(92)18
80'1+27'1292'0+29'0622'0+27'5852'0+90'2255'0+88'6922'1+00'0605'2+00'0912'2+00'85(92)18
50'1+62'2515'0+02'1805'0+29'2894'0+91'2215'0+05'2900'2+99'2794'1+89'2282'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16(92)18
16'0+18'816'0+09'2216'0+09'2262'0+18'1416'0+96'6725'1+00'1488'2+00'0612'2+00'97(92)18
16'0+18'895'0+95'6261'0+02'9809'0+80'2905'0+25'1488'2+00'1900'0+02'9720'2+16'25(92)18
11'1+29'1512'0+01'9211'0+05'5852'0+28'2912'0+85'2799'2+99'9299'2+99'9286'1+00'92(92)18
01'1+19'812'0+80'1811'0+89'9882'0+98'2905'0+26'9721'7+05'6512'2+89'7711'2+16'25(92)18
88'0+16'812'0+91'8602'0+97'8882'0+06'0885'0+19'1918'0+05'1620'2+99'1982'1+16'19(92)18
81'1+19'2294'0+09'2602'0+87'2694'0+06'8212'0+88'3985'0+05'6622'2+99'1982'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-(92)18
99'0+98'7972'0+02'8872'0+98'6655'0+02'9472'0+82'3968'0+05'6412'2+22'2282'2-99'77(92)18
81'1+97'8911'0+17'0602'0+88'5682'0+07'0872'0+98'8511'1+05'7868'1+19'9585'2+20'95(92)18
16'0+06'9552'0+26'1811'0+17'5661'0+09'2772'0+86'9785'0+05'6912'2+20'1162'1+20'51(92)18
70'1+50'6552'0+27'5855'0+02'0844'0+26'2782'0+85'6740'2+05'8966'1+86'2702'1+00'87(92)18
06'0+10'1262'0+26'1672'0+27'9872'0+00'5215'0+08'2918'0+05'0622'2+22'9522'2+00'07(92)18
60'1+89'1252'0+80'2681'0+21'5681'0+95'1889'0+89'6502'2+00'1882'2+99'8782'2-99'07(92)18
80'1+99'7572'0+09'2872'0+95'6611'0+80'9981'0+27'6519'2+05'8520'2+99'1182'2-99'07(92)18
98'1+00'6552'0+27'9862'0+87'1462'1+97'5782'0+19'2757'1+05'1920'2+22'9156'0+89'9(92)18
16'0+28'4792'0+27'4782'0+95'8882'0+06'8582'0+05'0572'1+89'2172'1+89'2172'1+89'07(92)18
", + "bbox": [ + 344, + 164, + 648, + 820 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 7: Per-dataset evaluation results (accuracies) using temperature sampling (t=0.6 and top-p=0.95). The numbers after $\\pm$ means the $95\\%$ confidence interval.", + "bbox": [ + 171, + 830, + 823, + 861 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/6909fe2479fd95798268964aca135d4c6a47fcb9c6917ae729eaa3a308147b7d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
01I + 10CS11 + 2062εI + 506εE + 28828ε + 1849εII + 3944εE + 820662I + 0601(92I) 2εE-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R -
81I + 191912 + 09FCεI + 59118Z + 28ECΔF + 274212I + 278528Z + 208689Z + 2021(92I) 12I - 9I + 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E
90I + 276912 + 591E9 + 27612 + 27E97 + 010288 + 2855661 + 998629E + 266182E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 22E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 30E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 28E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 20E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 31E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 21E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 32E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 10E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 11E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 15E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 24E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 25E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 33E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 23E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 34E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 35E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 16E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 27E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 26E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 37E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 36E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 18E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 38E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 17E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 14E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 13E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 19E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 3101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C
00I + 00C11 + 00C6 + 00I10 + 00C8 + 00C10 + 00C8 + 00C10 + 00C8 + 00C8 + 00C
00T + 00T11 + 00T6 + 00T10 + 00T8 + 00T10 + 00T8 + 00T10 + 00T8 + 00T8 + 00T
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 = 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
", + "bbox": [ + 344, + 171, + 648, + 813 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 8: Per-dataset evaluation results (model response token length) using temperature sampling $(t = 0.6$ and top- $p = 0.95)$ . The numbers after $\\pm$ means the $95\\%$ confidence interval.", + "bbox": [ + 171, + 824, + 823, + 853 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 346, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_model.json b/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_model.json new file mode 100644 index 0000000000000000000000000000000000000000..064979ca36e6cc0d8d6c170537b7472afb9e6638 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_model.json @@ -0,0 +1,2141 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.826, + 0.143 + ], + "angle": 0, + "content": "Retro-Search: Exploring Untaken Paths for Deeper and Efficient Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.165, + 0.818, + 0.217 + ], + "angle": 0, + "content": "Ximing Lu†‡ Seungju Han†§ David Acuna† Hyunwoo Kim† Jaehun Jung† Shrimai Prabhumoye† Niklas Muennighoff§ Mostofa Patwary† Mohammad Shoeybi† Bryan Catanzaro† Yejin Choi†" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.218, + 0.731, + 0.248 + ], + "angle": 0, + "content": "†NVIDIA ‡University of Washington §Stanford University {ximingl, seungjuh, dacunamarrer, hyunwook, jaehunj, yejin}@nvidia.com" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.283, + 0.542, + 0.299 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.317, + 0.77, + 0.751 + ], + "angle": 0, + "content": "Large reasoning models, such as OpenAI o1 and DeepSeek-R1, demonstrate remarkable reasoning capabilities via long, elaborate reasoning trajectories. Numerous follow-up studies report that supervised fine-tuning on such reasoning traces, also known as distillation, can be a cost-effective way to boost reasoning capabilities of smaller student models. However, empirical observations reveal that these reasoning trajectories are often suboptimal, switching excessively between different lines of thought, resulting in underthinking, over-thinking, and even degenerate responses. In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search, for distilling higher quality reasoning paths from large reasoning models. Retro-Search retrospectively revises reasoning paths to discover better, yet shorter traces, which can then lead to student models with enhanced reasoning capabilities with shorter, thus faster inference. Our approach can enable two use cases: self-improvement, where models are fine-tuned on their own Retro-Search-ed thought traces, and weak-to-strong improvement, where a weaker model revises stronger model's thought traces via Retro-Search. For self-improving, R1-distill-7B, fine-tuned on its own Retro-Search-ed traces, reduces the average reasoning length by \\(31.2\\%\\) while improving performance by \\(7.7\\%\\) across seven math benchmarks. For weak-to-strong improvement, we retrospectively revise R1-671B's traces from the OpenThoughts dataset (Team, 2025) using R1-distill-32B as the Retro-Search-er, a model \\(20\\times\\) smaller. Qwen2.5-32B, fine-tuned on 40k instances of this refined data, achieves performance comparable to R1-distill-32B, yielding an \\(11.3\\%\\) reduction in reasoning length and a \\(2.4\\%\\) performance improvement compared to fine-tuning on the original OpenThoughts data. More excitingly, R1-distill-7B and R1-distill-32B, fine-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference efficiency. Our work counters recently emergent viewpoints that question the relevance of search algorithms in the era of large reasoning models, by demonstrating that there are still opportunities for algorithmic advancements, even for frontier models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.787, + 0.32, + 0.803 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.821, + 0.828, + 0.896 + ], + "angle": 0, + "content": "Recent state-of-the-art LLMs, such as OpenAI o1 and DeepSeek-R1, have demonstrated remarkable capabilities in solving complex reasoning problems by scaling test-time compute. Test-time scaling enables the model to produce extended reasoning trajectories—an inner monologue akin to an implicit internal search—where the model explores multiple potential solution paths and verifies itself (OpenAI, 2024; DeepSeek-AI et al., 2025; Qwen Team, 2025)." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.04383v2 [cs.AI] 15 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.908, + 0.321, + 0.924 + ], + "angle": 0, + "content": "\\(\\clubsuit\\) First co-authors." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.238, + 0.101, + 0.759, + 0.113 + ], + "angle": 0, + "content": "Question: Given a sequence \\(a_{n}\\) where \\(a_{n} = -4\\) when \\(n\\) is odd, and \\(a_{n} = 7\\) when \\(n\\) is even, write a formula for the \\(n\\)-th term." + }, + { + "type": "image", + "bbox": [ + 0.19, + 0.119, + 0.805, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.54, + 0.828, + 0.626 + ], + "angle": 0, + "content": "Figure 1: An example reasoning trace from Retro-Search in weak-to-strong revision. A reasoning trace consists of a series of thoughts segmented by transition keywords (e.g., \"alternatively\", \"wait\"), with each thought composed of a sequence of intermediate steps, delimited by '\\n\\nRetro-Search retrospectively revises reasoning trajectories - exploring promising thoughts that were prematurely abandoned to mitigate under-thinking while avoiding redundant thoughts once the correct answer is evident to reduce over-thinking.\\n\\n" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.645, + 0.828, + 0.718 + ], + "angle": 0, + "content": "Reinforcement learning (RL) has been shown to enable this behavior as training progresses, with key \"aha\" moments in the training dynamics where models begin to generate longer responses and spontaneously develop alternative strategies for problem-solving, verification, and self-correction. As a result, average response length tends to grow proportionally with performance (DeepSeek-AI et al., 2025; Zeng et al., 2025a; HuggingFace, 2025)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.722, + 0.828, + 0.794 + ], + "angle": 0, + "content": "At the same time, contradictory signals have emerged around whether RL is strictly necessary to enable these behaviors. Cost-effective approaches suggest that access to long reasoning traces may be the key. In fact, recent work shows it is possible to replicate or sometimes even surpass o1 and R1 performance on challenging math benchmarks using long reasoning traces and supervised fine-tuning (Muennighoff et al., 2025; Team, 2025)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.828, + 0.927 + ], + "angle": 0, + "content": "This growing belief—that longer reasoning traces equals better reasoning—has shaped much of the recent progress in training and scaling strategies. However, is longer thinking always better? At the surface level, it may appear so. Long thought allows the model to explore alternative solutions paths, define subgoals, backtrack, verify and self-correct. These cognitive behaviors, akin to human problem-solving, have been indeed shown to be beneficial for reasoning models (Gandhi et al., 2025). Furthermore, it is intuitive that complex problems inherently require lengthier deliberations. However, several recent works have demonstrated that longer responses do not always yield better results. In fact, incorrect responses often involve longer reasoning traces marked by frequent switches between" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.103, + 0.825, + 0.174 + ], + "angle": 0, + "content": "different lines of thought where the model prematurely abandons promising directions—a tendency coined by Wang et al. (2025) as under-thinking. On the other hand, over-thinking occurs when the model inefficiently expends resources by engaging in excessive verification or redundant checks after arriving at a final answer, contributing minimally to accuracy improvements Chen et al. (2024)." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.18, + 0.825, + 0.321 + ], + "angle": 0, + "content": "Then, is shorter necessarily better? The phenomena of under-thinking and over-thinking have motivated several ad-hoc heuristics that use response length as a proxy for downstream performance (Wang et al., 2025; Fu et al., 2024). For instance, a naive approach to boost a model's reasoning capability is supervised fine-tuning on the shortest reasoning trajectories distilled from large state-of-the-art models such as DeepSeek-R1 671B. However, blind shortening is inherently limited, as length alone may not reliably indicate thoughtfulness or reasoning quality. Short responses may overlook nuanced considerations or miss essential parts of the meta-thinking process (Xiang et al., 2025). Furthermore, employing simple length-based heuristics disregards the complexity and semantic coherence of generated content, potentially discarding useful reasoning sequences that are verbose yet insightful." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.326, + 0.825, + 0.411 + ], + "angle": 0, + "content": "Our goal is to consolidate these disparate observations on the quality of reasoning trajectories. We ask—if overly long reasoning is not always beneficial, and blind shortening is suboptimal, how can we discourage under-thinking and over-thinking, and collect more efficient and effective solutions? We argue that search is an effective means of eliciting better reasoning-producing trajectories that are both efficient and insightful, yet shorter in length—and can be used to train stronger student models." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.416, + 0.825, + 0.529 + ], + "angle": 0, + "content": "In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search (MCTS) for distilling higher quality reasoning data from large reasoning models. Retro-Search retrospectively revises a given reasoning path by suppressing unnecessary thought switches to collect more efficient and effective alternatives. Figure 1 shows an example of Retro-Search refining a reasoning trace from DeepSeek-R1. It expands promising thoughts that were prematurely abandoned to mitigate under-thinking while pruning redundant thoughts once the correct answer becomes evident to reduce over-thinking, resulting in more effective yet shorter reasoning traces." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.535, + 0.826, + 0.744 + ], + "angle": 0, + "content": "Contrary to prior attempts where search struggled to improve reasoning effectively, we show that our method is highly effective in two key settings: (1) Self-improvement—Retro-Search can bootstrap self-improvement in reasoning models, by training a model on its own Retro-Search-ed trajectories. We demonstrate that this simple step, despite not relying on frontier model capabilities, yields significant performance gain (of up to \\(7.7\\%\\)) while reducing inference time by \\(31.2\\%\\). (2) Weak-to-strong revision—Retro-Search can revise even the reasoning traces generated by an expensive, frontier reasoning model with a substantially smaller, more efficient model, yet significantly improving the quality of dataset. For example, we revise reasoning traces generated by R1-671B using a \\(20\\times\\) smaller model R1-distill-32B as the Retro-Search-er. Yet after training on this revised data, Qwen2.5-32B achieves performance comparable to R1-distill-32B, yielding an \\(11.3\\%\\) reduction in reasoning length and a \\(2.4\\%\\) performance improvement compared to fine-tuning on the original R1-671B's trajectories. And, more excitingly, R1-distill-7B and R1-distill-32B, fin-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference time efficiency." + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.763, + 0.276, + 0.78 + ], + "angle": 0, + "content": "2 Method" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.795, + 0.825, + 0.853 + ], + "angle": 0, + "content": "We introduce Retro-Search, an MCTS-inspired algorithm that explores untaken steps for deeper and more efficient reasoning. Its goal is to revise and improve a given reasoning path by encouraging continuation instead of prematurely switching to a new thought, ultimately seeking to reach the correct answer more efficiently, i.e. with fewer steps." + }, + { + "type": "title", + "bbox": [ + 0.175, + 0.869, + 0.314, + 0.884 + ], + "angle": 0, + "content": "2.1 Preliminaries" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.895, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Consider a reasoning model \\(\\mathcal{M}\\) that, when given a question \\(q\\), generates both an intermediate reasoning trajectory \\(T\\) and a final solution \\(a\\). Formally, given an input question \\(q \\in \\mathcal{Q}\\)," + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.95, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.825, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.427, + 0.828, + 0.513 + ], + "angle": 0, + "content": "Figure 2: An overview of our Retro-Search algorithm. The algorithm iterates through the thoughts and explores untaken paths from steps that come before a thought-switch, which is marked by transition keywords like \"wait\" or \"another approach.\" During the process, it performs multiple rollouts, suppressing these transition keywords in the immediate next step. If the search is successful, the existing trajectory is replaced with the new rollout, and the process continues through the updated trajectory." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.529, + 0.825, + 0.561 + ], + "angle": 0, + "content": "the model \\(\\mathcal{M}\\) produces \\((T, a) \\coloneqq \\mathcal{M}(q)\\), where \\(T \\in \\mathcal{T}\\) denotes the chain of reasoning, or chain of \"thoughts\", and \\(a \\in \\mathcal{A}\\) represents the final solution to \\(q\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.566, + 0.827, + 0.669 + ], + "angle": 0, + "content": "Each trajectory \\( T \\) can be decomposed in to a set of thoughts, i.e., \\( T := \\{s^1, s^2, \\ldots, s^\\tau\\} \\). Each \\( s^\\tau \\) denotes an individual thought, and each thought may perform distinctive role such as trying out a new solution strategy, reflecting its progress, back-tracking or verifying calculations, etc. In order to differentiate between independent thoughts, we attend to the fact that models often leverage transition keywords (e.g., \"alternatively\") to make a natural transition between thoughts, e.g. \\( s^\\tau \\rightarrow s^{\\tau+1} \\). We utilize these linguistic markers to segment and extract individual thoughts from the full reasoning trace." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.673, + 0.829, + 0.763 + ], + "angle": 0, + "content": "Each thought \\( s^{\\tau} \\) itself is a sequence of intermediate steps \\( s_i^\\tau \\)s—that is, \\( s^{\\tau} := \\{s_1^{\\tau}, s_2^{\\tau}, \\dots, s_k^{\\tau}\\} \\). These intermediate steps \\( s_k^\\tau \\) represent atomic units of reasoning within a thought—such as sub-conclusions, calculations, or logical deductions. In practice, steps are delimited by '\\n\\n(double newline) characters in the model's output. We adapt the convention of using the superscript \\( \\tau \\) to index the thought, and the subscript \\( k \\) to index the step within that thought. For example, \\( s_k^\\tau \\) refers to the \\( k \\)-th step within the \\( \\tau \\)-th thought." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.767, + 0.757, + 0.784 + ], + "angle": 0, + "content": "Utilizing the notations above, we represent a complete reasoning trajectory \\( T \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.334, + 0.789, + 0.826, + 0.822 + ], + "angle": 0, + "content": "\\[\nT = \\left\\{\\left\\{s _ {1} ^ {1}, s _ {2} ^ {1}, \\dots , s _ {k _ {1}} ^ {1} \\right\\}, \\left\\{s _ {1} ^ {2}, s _ {2} ^ {2}, \\dots , s _ {k _ {2}} ^ {2} \\right\\}, \\dots , a \\right\\} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.828, + 0.927 + ], + "angle": 0, + "content": "The under-thinking issue: too many shallow thoughts. Previous studies have shown that R1-like models exhibit an under-thinking issue in their reasoning process (Wang et al., 2025). These models frequently abandon promising reasoning paths prematurely, leading to inadequate depth of reasoning on challenging problems. This phenomenon (1) occurs more frequently on harder problems, (2) leads to frequent switching between different thoughts without reaching a conclusion in each, and (3) correlates with incorrect responses due to insufficient exploration of reasoning paths." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.348, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.176 + ], + "angle": 0, + "content": "The over-thinking issue: too many redundant thoughts. Conversely, R1-like models also suffer from an over-thinking issue (Sui et al., 2025; Chen et al., 2024), where they expend excessive compute on questions that are exceptionally simple or for which the answer is already evident. The model tends to generate unnecessary thoughts such as self-doubt and redundant verification, even when it produces the correct answer within its early steps." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.181, + 0.827, + 0.239 + ], + "angle": 0, + "content": "The seemingly contradictory issues of under-thinking and over-thinking share a common cause: unnecessarily initiating a new thought. In under-thinking, the model switches to a new thought without fully exploring a previously promising path. In over-thinking, despite the answer being evident, a new thought is started instead of directly generating the answer." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.254, + 0.31, + 0.267 + ], + "angle": 0, + "content": "2.2 Retro-Search" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.28, + 0.825, + 0.402 + ], + "angle": 0, + "content": "The goal of Retro-Search is to start from a tuple \\((q,T)\\) generated by \\(\\mathcal{M}\\), and search for an improved trajectory \\(T^{*}\\) using a revision model \\(\\widehat{\\mathcal{M}}\\). Here, we focus only on revising \\(T\\) that leads to the correct final answer (i.e., \\(a = a^{\\star}\\)). Intuitively, we consider \\(T^{*}\\) to be better than \\(T\\) if it leads to the same final answer \\(a\\) with fewer reasoning steps—i.e., by avoiding both over-thinking and under-thinking. We specifically consider two settings of Retro-Search, depending on how we set the revision model—(1) Self-Retro-Search, where \\(\\widehat{\\mathcal{M}}\\) is set to be the original model \\(\\mathcal{M}\\) that produced \\(T\\), and (2) Weak-to-Strong-Retro-Search (W2S-Retro-Search), where \\(\\widehat{\\mathcal{M}}\\) is a smaller, cost-efficient model than \\(\\mathcal{M}\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.417, + 0.825, + 0.508 + ], + "angle": 0, + "content": "Collecting alternative rollouts The core rationale behind Retro-Search is that there may exist an alternative trajectory for a given problem that is shorter than the original trajectory, yet still leads to a correct answer. To discover such a trajectory, we iteratively explore alternative rollouts to investigate what would happen if, instead of starting a new thought \\( s^{\\tau +1} \\) after \\( s^\\tau \\) (i.e., generate \\( s_1^{\\tau +1} \\)), we continued the current thought \\( s^\\tau \\). Concretely, for each thought \\( s^\\tau \\) in \\( T \\) (Eq. 1), we generate an alternative rollout using \\( \\widehat{\\mathcal{M}} \\) as:" + }, + { + "type": "equation", + "bbox": [ + 0.336, + 0.513, + 0.826, + 0.539 + ], + "angle": 0, + "content": "\\[\n\\left\\{s _ {k + 1} ^ {\\tau}, \\dots , a \\right\\} \\sim \\widehat {\\mathcal {M}} \\left(s ^ {1}, s ^ {2}, \\dots , \\left\\{s _ {1} ^ {\\tau}, s _ {2} ^ {\\tau}, \\dots , s _ {k} ^ {\\tau} \\right\\}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.825, + 0.631 + ], + "angle": 0, + "content": "Importantly, when generating the immediate next step \\( s_{k+1}^{\\tau} \\), we constrain the model to stay within a single thought by preventing it from initiating a new one in the next step—by prohibiting the usage of thought-transition keywords (e.g., \"alternatively,\" \"wait\") during the decoding process. This encourages deeper exploration of the current thought rather than prematurely switching to different lines of thought. Subsequent steps after \\( s_{k+1}^{\\tau} \\) are generated without constraints to allow free on-policy exploration." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.825, + 0.698 + ], + "angle": 0, + "content": "Evaluating alternative rollouts To determine whether the alternative rollout \\(\\{s_{k + 1}^{\\tau},\\ldots ,a\\}\\) is better than the existing path \\(\\{s_1^{\\tau +1},\\dots ,a\\}\\), we define a value function \\(V(s)\\) over the \\(i\\)-th step \\(s_i\\) in the trajectory \\(\\{s_1,\\dots ,a\\}\\) to compare \\(V(s_{k + 1}^{\\tau})\\) with \\(V(s_{1}^{\\tau +1})\\):" + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.702, + 0.825, + 0.721 + ], + "angle": 0, + "content": "\\[\nV \\left(s _ {i}, a ^ {\\star}\\right) := \\gamma^ {N - i} R \\left(a \\left(s _ {i}\\right), a ^ {\\star}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.725, + 0.825, + 0.838 + ], + "angle": 0, + "content": "where \\(N\\) represents the total number of steps in the trajectory \\(\\{s_1, \\ldots, a\\}\\). Here, we write \\(a(s_i) \\coloneqq \\{s_i, \\ldots, a\\}\\) to explicitly emphasize that the value depends on the specific step \\(s_i\\) and captures the autoregressive dependence of the generated answer \\(a\\) on the continuation from step \\(s_i\\). The reward function \\(R(a, a^{\\star})\\) is binary, indicating whether the generated answer \\(a\\) matches the ground truth \\(a^{\\star}\\) (i.e., using a verifiable reward). We apply a decay factor \\(\\gamma\\) to discount future rewards, assigning higher value to paths that reach the correct answer in fewer steps. Concretely, we set to \\(\\gamma = 0.9\\) in our experiments. In what follows, we drop the detailed notation and refer to the above simply as \\(V(s)\\) for clarity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.845, + 0.827, + 0.926 + ], + "angle": 0, + "content": "If \\( V(s_{k+1}^{\\tau}) > V(s_1^{\\tau+1}) \\), the rollout reaches the correct final answer in fewer steps, and we replace the existing path \\( \\{s_1^{\\tau+1}, \\ldots, a\\} \\) with the rollout \\( \\{s_{k+1}^{\\tau}, \\ldots, a\\} \\). This could occur when exploring deeper along the current thought is more effective, thus reducing under-thinking. Alternatively, \\( s_{k+1}^{\\tau} = a \\) indicates that the previous thought steps are already sufficient for the model to generate the correct solution directly, thereby reducing over-thinking." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.103, + 0.827, + 0.204 + ], + "angle": 0, + "content": "In contrast, if \\( V(s_{k+1}^{\\tau}) < V(s_1^{\\tau+1}) \\), the existing path is better. The alternative path either reaches a wrong answer or reaches the correct answer with more steps. This suggests that switching to a new thought was effective and necessary, and thus the existing transition should be preserved. In practice, we sample multiple alternative rollouts (two in our experiments) and retain the best one—that is, the rollout with the highest value. We then proceed to examine the next thought in the updated reasoning trajectory. Please see Figure 2 for a concrete example, and Algorithm 1 in Appendix A for more details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.212, + 0.828, + 0.285 + ], + "angle": 0, + "content": "Retro-Search with Partial Revisions We also propose a more computationally efficient variant of Retro-Search. Instead of iteratively applying the revision procedure starting from the first thought, this version randomly samples a position in the trajectory at which to begin the revision. This is particularly useful when revising with larger models—for instance, the R1-32B model in our setting—where full iterative revision would be prohibitively expensive." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.3, + 0.319, + 0.318 + ], + "angle": 0, + "content": "3 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.329, + 0.258, + 0.345 + ], + "angle": 0, + "content": "3.1 Setup" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.351, + 0.828, + 0.466 + ], + "angle": 0, + "content": "Data Generation We use 40K math questions from NuminaMath (LI et al., 2024). Specifically, we sample NuminaMath questions from OpenThoughts-114k\\(^{1}\\) dataset, which is the training data of OpenThinker-7B and OpenThinker-32B models. We experiment with two settings: 1) Self-Retro-R1-7B, where we first generate responses using the R1-distilled Qwen2.5-7B model and then revise them with the same model as the Retro-Search-er. 2) W2S-Retro-R1-32B, where we take responses from the DeepSeek-R1 671B model in the OpenThoughts dataset and revise them using a weaker model, R1-distilled Qwen2.5-32B. More details are in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.474, + 0.827, + 0.532 + ], + "angle": 0, + "content": "Model Training We trained four models using data generated by Retro-Search: Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B with supervised fine-tuning. All models are fine-tuned for five epochs with learning rate of 1e-5, and sequence length of 16K. More details are in Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.54, + 0.827, + 0.653 + ], + "angle": 0, + "content": "Baselines We compare our trained models with a total of eleven open-weight models across two model size categories — six 7B models and five 32B models. These include instruction-tuned models such as Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, Qwen2.5-Math-7B-Inst (Yang et al., 2024b) and Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025), OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). More baseline details are in Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.66, + 0.827, + 0.746 + ], + "angle": 0, + "content": "Benchmarks and Metrics We evaluate models on seven math-specific benchmarks: AIME25, AIME24, AMC23, GaoKao23English (Zhong et al., 2023), OlympiadBench (He et al., 2024), GSM8K (Cobbe et al., 2021), and MATH500 (Lightman et al., 2023). The first five benchmarks focus on olympiad-level math problems, where AIME25 and AIME24 each contain 30 problems and AMC23 contains 40 problems. GSM8K includes grade school math problems, and MATH500 includes high-school math competition problems." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.751, + 0.825, + 0.825 + ], + "angle": 0, + "content": "For evaluation, we report two metrics: accuracy to measure the performance, and average response length to measure computational efficiency during inference. For accuracy, we use exact match between the model's prediction and the reference answer, with Qwen's official implementation for answer verification. For response length, we tokenize the responses using the Qwen2.5-7B-Instruct tokenizer and compute the number of output tokens." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Metrics are computed individually for each benchmark and then averaged using macro averaging to produce the final scores. Since there is no universally optimal decoding strategy" + }, + { + "type": "page_footnote", + "bbox": [ + 0.194, + 0.87, + 0.668, + 0.884 + ], + "angle": 0, + "content": "1https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k" + }, + { + "type": "page_footnote", + "bbox": [ + 0.195, + 0.884, + 0.59, + 0.898 + ], + "angle": 0, + "content": "2https://github.com/QwenLM/Qwen2.5-Math/tree/main." + }, + { + "type": "page_footnote", + "bbox": [ + 0.174, + 0.898, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Note that evaluation results can significantly vary depending on the specifics of the answer verification, so we recommend to use the same implementation for reproduction." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.87, + 0.825, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.223, + 0.101, + 0.778, + 0.46 + ], + "angle": 0, + "content": "
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
Baselines (7B)
Qwen2.5-Math-7B41.1118239.01225
Qwen2.5-Math-7B-Inst53.198252.7985
OpenR1-Qwen-7B67.6946371.77740
OpenThinker-7B53.81447759.19835
Qwen2.5-7B-Inst48.798547.91033
+ R1-7B49.71436555.48959
+ Self-Retro-R1-7B51.7 (+4.1%)11050 (-23.1%)55.8 (+0.7%)8263 (-7.8%)
+ R1-671B51.51430258.49824
+ W2S-Retro-R1-32B55.3 (+7.3%)13569 (-5.1%)57.8 (-1.1%)8940 (-9.0%)
R1-distill-Qwen2.5-7B64.51060071.06831
+ R1-671B68.4941871.77172
+ W2S-Retro-R1-32B70.8 (+3.5%)8800 (-6.6%)73.1 (+2.0%)6535 (-8.9%)
Baselines (32B)
OpenThinker-32B73.0800175.96840
QwQ-32B-Preview70.9516468.35163
Sky-T1-32B-Preview62.0236762.92018
Qwen2.5-32B-Inst56.197555.9761
+ R1-671B76.2707475.66676
+ W2S-Retro-R1-32B74.6 (-2.2%)6809 (-3.7%)77.5 (+2.4%)5923 (-11.3%)
R1-distill Qwen2.5-32B73.1856677.76173
+ R1-671B (12K)80.4647079.86164
+ W2S-Retro-R1-32B (12K)79.9 (-0.6%)6091 (-5.9%)81.0 (+1.5%)5301 (-14.0%)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.468, + 0.828, + 0.595 + ], + "angle": 0, + "content": "Table 1: Retro-Search provides better training data. Model evaluation results averaged across seven math benchmarks (AIME25, AIME24, AMC23, GaoKao23English, Olympiad-Bench, GSM8K, and MATH500). We report results from two setups: greedy decoding \\((\\mathrm{T} = 0)\\) and temperature sampling \\((\\mathrm{T} = 0.6\\) with top-p \\(= 0.95)\\). \\(+X\\) indicates that the model is fine-tuned with data X. Only when fine-tuning R1-distill Qwen2.5-32B, we used 12K instances, as using more data did not improve results. The results indicate that: (1) models trained with Retro-Search data are more computationally efficient during inference while generally showing better performance; and (2) weak-to-strong Retro-Search enables new SOTA at 7B and 32B scales." + }, + { + "type": "table", + "bbox": [ + 0.223, + 0.607, + 0.772, + 0.684 + ], + "angle": 0, + "content": "
Qwen2.5-7B-InstGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
+ R1-7B49.71436555.48959
+ R1-7B-Shortest50.31234054.68009
+ Self-Retro-R1-7B51.71105055.88263
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.693, + 0.828, + 0.738 + ], + "angle": 0, + "content": "Table 2: Simply selecting the shortest path for training is suboptimal for model accuracy. We fine-tuned Qwen2.5-7B-Inst with different training data and compare results. We sample eight responses using R1-distilled Qwen2.5-7B and choose the shortest response." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.825, + 0.837 + ], + "angle": 0, + "content": "that works well across all models, we report results under two commonly used decoding setups: greedy decoding \\((\\mathrm{T} = 0)\\), following Muennighoff et al. (2025), and temperature sampling \\((\\mathrm{T} = 0.6\\) with top-p \\(= 0.95)\\), following DeepSeek-AI et al. (2025). We took an average of results from five different seeds for the temperature sampling setup. In Appendix E, we share the full results including the confidence interval of the results." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.855, + 0.355, + 0.869 + ], + "angle": 0, + "content": "3.2 Evaluation Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Self Retro-Search teaches stronger and more efficient student models than vanilla data generation. We compare fine-tuning the student model, Qwen2.5-7B-Instruct, using data from our Self-Retro-R1-7B against fine-tuning with data sampled from the R1-distilled" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.223, + 0.101, + 0.773, + 0.164 + ], + "angle": 0, + "content": "
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
R1-distill Qwen2.5-7B64.51060071.06831
+ Self-Retro-R1-7B69.5 (+7.7%)7295 (-31.2%)70.6 (-0.6%)5406 (-20.9%)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.173, + 0.828, + 0.218 + ], + "angle": 0, + "content": "Table 3: Retro-Search allows self-improvement of the models. Fine-tuning the R1-distilled Qwen2.5-7B model with self-revision data (Self-Retro-R1-7B) significantly improves efficiency, while maintaining or even improving accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.243, + 0.827, + 0.288 + ], + "angle": 0, + "content": "Qwen2.5-7B model before revision, referred to as \\( R1 - 7B \\) in Table 1. Compared to models trained on \\( R1 - 7B \\), the model trained on Self-Retro-\\( R1 - 7B \\) produces responses that are \\( 23.1\\% \\) shorter while improving accuracy by \\( +4.1\\% \\) under greedy decoding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.292, + 0.825, + 0.364 + ], + "angle": 0, + "content": "We further compare Retro-Search against another baseline, R1-7B-Shortest, which selects the shortest response for model training after sampling eight responses per questions using R1-distilled Qwen2.5-7B. As shown in Table 2, although training with the shortest response can enhance efficiency when compared to R1-7B, it does not improve the model performance as much as our Retro-Search, clearly demonstrating the effectiveness of our Retro-Search." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.373, + 0.827, + 0.472 + ], + "angle": 0, + "content": "Weak-to-Strong Retro-Search enables new SOTA reasoning models at 7B and 32B scales, excelling in both performance and efficiency. While Self-Retro has proven effective, using a large model such as DeepSeek-R1-671B for both generation and revision is computationally implausible. We evaluate the effectiveness of weak-to-strong revision, where DeepSeek-R1-671B's generations are Retro-Search-ed by R1-distilled Qwen2.5-32B, denoted as W2S-Retro-R1-32B. We fine-tune student models on this data and compare them to those fine-tuned on unrevised data from DeepSeek-R1-671B, referred to as R1-671B in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.477, + 0.827, + 0.66 + ], + "angle": 0, + "content": "W2S-Retro-R1-32B proves to be effective, enabling new SOTA reasoning models at 7B and 32B scales. We fine-tuned four models — Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B — and consistently observed reduced response lengths and improved performance across different setups compared to models fine-tuned on R1-671B. Surprisingly, R1-distilled Qwen2.5-7B and R1-distilled Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieve new SOTA reasoning performance in the sampling setting at the 7B and 32B scales, while yielding the highest inference time efficiency. In addition, Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieves performance comparable to R1-distill-32B, yielding an \\(11.3\\%\\) reduction in reasoning length and a \\(2.4\\%\\) performance improvement compared to fine-tuning on the R1-671B data. Notably, it also outperforms OpenThinker-32B in accuracy while being more efficient (13.4%–14.9% shorter response). This is particularly significant given that OpenThinker-32B is trained on around 2.5 times more data than our W2S-Retro-R1-32B and use DeepSeek-R1 671B for response generation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.669, + 0.827, + 0.768 + ], + "angle": 0, + "content": "Retro-Search enables self-improvement of R1-distilled models. We fine-tune the R1-distilled Qwen2.5-7B model with our Self-Retro-R1-7B. Results in Table 3 show significant accuracy improvement \\((+7.7\\%)\\) and response length reduction \\((31.2\\%)\\) for greedy decoding, compared to R1-distill Qwen2.5-7B. There is a small performance reduction for temperature sampling \\((-0.6\\%)\\), but the length reduction is substantial \\((20.9\\%)\\). As Self-Retro-R1-7B uses R1-distilled Qwen2.5-7B model for response generation, revision, and fine-tuning the model itself, this shows the self-improvement capabilities enabled by Retro-Search." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.786, + 0.283, + 0.801 + ], + "angle": 0, + "content": "3.3 Analyses" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.827, + 0.927 + ], + "angle": 0, + "content": "We quantitatively analyze the reasoning trajectories in the synthesized training data using our Retro-Search, as well as those generated by the fine-tuned student model Qwen2.5-7B. Table 4 reports the average number of transition keywords, number of steps per thought, and the relative location where the solution first appears in the trajectory (with values closer to 1 indicating that the solution is nearer the end). The synthesized reasoning traces from Retro-Search contain significantly fewer transition keywords than those from R1-7B and R1-671B. As a result, thoughts from Retro-Search include more steps than those from R1-7B and 671B, indicating deeper thoughts. Additionally, the solution tends to appear later in" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.101, + 0.825, + 0.187 + ], + "angle": 0, + "content": "
Synthesized Training DataStudent Model's Reasoning Trace
#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)
R1-7B85.93.70.67229.24.70.59
Self-Retro-R1-7B32.75.30.73183.25.40.64
R1-671B35.33.80.5980.03.00.44
W2S-Retro-R1-32B10.44.90.6070.13.20.48
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.202, + 0.825, + 0.245 + ], + "angle": 0, + "content": "Table 4: The average number of transition keywords, the number of steps per thought, and the relative location of the first appearance of the solution in the reasoning trajectory are taken from both the training data and the fine-tuned student model, Qwen2.5-7B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.825, + 0.314 + ], + "angle": 0, + "content": "the trajectory, suggesting that our approach shows less redundant thoughts after the final solution is derived. These trends are also consistent in the reasoning outputs from the student model, showing that Retro-Search reduces both under-thinking and over-thinking." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.33, + 0.338, + 0.346 + ], + "angle": 0, + "content": "4 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.358, + 0.827, + 0.581 + ], + "angle": 0, + "content": "Test-time compute has emerged as a new axis of scaling for LLM reasoning. While prior research in this direction have focused on parallel scaling—repeated sampling of trajectories followed by aggregation (Brown et al., 2024; Snell et al., 2024; Wu et al., 2025a), recent efforts have focused on sequential scaling—where models are trained to back-track, evaluate, and revise its thought by generating a long, monolithic CoT. Representative models such as O1 and R1 (OpenAI, 2024; DeepSeek-AI et al., 2025) are trained via large-scale reinforcement learning, demonstrating that models can learn to generate long CoTs without relying on bespoke reward models (Lightman et al., 2023; Zhang et al., 2025b), or tree search (Feng et al., 2024; Zhang et al., 2024). Subsequent projects in open-source community aim to replicate these reasoning models (HuggingFace, 2025; Qin et al., 2024). These works often utilize frontier reasoning models to generate synthetic long thought traces, and showing surprising gain in reasoning capabilities via simple supervised fine-tuning (HuggingFace, 2025; NovaSky, 2025; Muennighoff et al., 2025). Our work builds upon these prior efforts, focusing on (1) better-quality reasoning paths by targeted revision of verbose sub-traces, and (2) demonstrating self-improvement beyond typical strong-to-weak distillation, where smaller models can self-improve in both performance and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.587, + 0.827, + 0.783 + ], + "angle": 0, + "content": "Meanwhile, concurrent works reveal limitations of reasoning models in their in-efficiency of test-time scaling. Longer generation does not necessarily correlate with better accuracy (Zeng et al., 2025b), and in practice, shorter trajectories are more likely to be correct. Models tend to overthink (Cuadron et al., 2025; Sui et al., 2025; Chen et al., 2024), i.e., they generate unnecessarily long trajectories that do not contribute to the performance. Models also exhibit underthinking (Wang et al., 2025)—while they appear to explore diverse plausible paths, models often switch between paths without sufficient exploration on one path. Wu et al. (2025b) suggests the source of inefficiency may lie in the regularities of the training data we use, and theoretically show that training on CoTs that are longer than the optimal length for the model can hurt its performance. Several measures have been proposed to mitigate these findings, such as auxiliary learnable parameters (Bao et al., 2025; Zhang et al., 2025a), calibration (Huang et al., 2025), and decoding-time algorithm (Xu et al., 2025; Misaki et al., 2025). Retro-Search aligns with these prior efforts, and importantly revisits the value of search algorithm in improving both the efficiency and performance of test-time scaling." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.799, + 0.318, + 0.814 + ], + "angle": 0, + "content": "5 Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.827, + 0.926 + ], + "angle": 0, + "content": "In this work, we introduced Retro-Search, a novel algorithm for synthesizing reasoning data designed to equip reasoning models with efficient (shorter average response length) and effective (higher accuracy) test-time scaling. Inspired by the MCTS algorithm, Retro-Search retrospectively revises reasoning trajectories—eliminating unnecessary thought switches (under-thinking) and trimming redundant steps after the correct answer becomes evident (over-thinking). Quantitatively, we show that Retro-Search is highly effective for self-improvement and weak-to-strong revision. Specifically, R1-distill-7B, fine-tuned on its own" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.959 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.103, + 0.825, + 0.202 + ], + "angle": 0, + "content": "Retro-Search-ed traces, reduces the average reasoning length by \\(31.2\\%\\) while improving performance by \\(7.7\\%\\) across seven math benchmarks. Notably, R1-distill-7B and R1-distill-32B, fine-tuned on weak-to-strong Retro-Search-ed reasoning traces from R1-671B, set new state-of-the-art performance at the 7B and 32B scales while yielding the highest reasoning efficiency. We hope our work reinvigorates interest in the power of search-based methods for synthetic data in reasoning models—a direction that has recently fallen out of favor, yet holds significant untapped potential." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.176, + 0.103, + 0.274, + 0.117 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.126, + 0.824, + 0.155 + ], + "angle": 0, + "content": "Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh-Tung. Learning to stop overthinking at test time, 2025. URL https://arxiv.org/abs/2502.10954." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.163, + 0.825, + 0.206 + ], + "angle": 0, + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.214, + 0.826, + 0.257 + ], + "angle": 0, + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for \\(2 + 3 = ?\\) on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.264, + 0.825, + 0.307 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.315, + 0.826, + 0.384 + ], + "angle": 0, + "content": "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. ArXiv, abs/2502.08235, 2025. URL https://api-semanticscholar.org/CorpusID:276287600." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.393, + 0.826, + 0.823 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jia Shi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiying Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunf an Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren,Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan ZhangZhenwen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.831, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training, 2024. URL https://arxiv.org/abs/2309.17179." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993, 2024." + }, + { + "type": "list", + "bbox": [ + 0.175, + 0.126, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.348, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.827, + 0.213 + ], + "angle": 0, + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.222, + 0.825, + 0.252 + ], + "angle": 0, + "content": "Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration, 2025. URL https://arxiv.org/abs/2503.00031." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.26, + 0.825, + 0.29 + ], + "angle": 0, + "content": "HuggingFace. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.298, + 0.827, + 0.37 + ], + "angle": 0, + "content": "Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.377, + 0.827, + 0.421 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.43, + 0.827, + 0.473 + ], + "angle": 0, + "content": "Kou Misaki, Yuichi Inoue, Yuki Imajuku, So Kuroki, Taishi Nakamura, and Takuya Akiba. Wider or deeper? scaling llm inference-time compute with adaptive branching tree search, 2025. URL https://arxiv.org/abs/2503.04412." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.482, + 0.827, + 0.526 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.534, + 0.827, + 0.565 + ], + "angle": 0, + "content": "NovaSky. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.573, + 0.761, + 0.589 + ], + "angle": 0, + "content": "OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.597, + 0.825, + 0.641 + ], + "angle": 0, + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.649, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.687, + 0.827, + 0.73 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.739, + 0.825, + 0.796 + ], + "angle": 0, + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen, Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.805, + 0.75, + 0.822 + ], + "angle": 0, + "content": "OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.83, + 0.827, + 0.873 + ], + "angle": 0, + "content": "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.162 + ], + "angle": 0, + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for LLM problem-solving. In The Thirteenth International Conference on Learning Representations, 2025a. URL https://openreview.net/forum?id=VNckp7JEHn." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.827, + 0.211 + ], + "angle": 0, + "content": "Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms, 2025b. URL https://arxiv.org/abs/2502.07266." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.22, + 0.828, + 0.293 + ], + "angle": 0, + "content": "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, nathan lile, Dakota Mahan, Louis Castricato, Jan-Philipp Franken, Nick Haber, and Chelsea Finn. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-thought. ArXiv, abs/2501.04682, 2025. URL https://api-semanticscholar.org/CorpusID:275357763." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.299, + 0.825, + 0.33 + ], + "angle": 0, + "content": "Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less, 2025. URL https://arxiv.org/abs/2502.18600." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.338, + 0.827, + 0.38 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.389, + 0.828, + 0.433 + ], + "angle": 0, + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.441, + 0.825, + 0.485 + ], + "angle": 0, + "content": "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025a. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.492, + 0.827, + 0.535 + ], + "angle": 0, + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities?, 2025b. URL https://arxiv.org/abs/2502.12215." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.544, + 0.828, + 0.586 + ], + "angle": 0, + "content": "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.595, + 0.825, + 0.639 + ], + "angle": 0, + "content": "Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression, 2025a. URL https://arxiv.org/abs/2502.15589." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.647, + 0.825, + 0.69 + ], + "angle": 0, + "content": "Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning, 2025b. URL https://arxiv.org/abs/2501.07301." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.698, + 0.825, + 0.742 + ], + "angle": 0, + "content": "Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.742 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.361, + 0.13 + ], + "angle": 0, + "content": "Appendices" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.164, + 0.826, + 0.181 + ], + "angle": 0, + "content": "A Retro-Search Algorithm 15" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.199, + 0.826, + 0.214 + ], + "angle": 0, + "content": "B Data Generation Details 15" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.233, + 0.826, + 0.248 + ], + "angle": 0, + "content": "C Training Details 15" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.267, + 0.826, + 0.281 + ], + "angle": 0, + "content": "DBaselines Details 15" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.301, + 0.826, + 0.315 + ], + "angle": 0, + "content": "E Per-dataset Evaluation Results 17" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.164, + 0.826, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.347, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.102, + 0.426, + 0.121 + ], + "angle": 0, + "content": "A Retro-Search Algorithm" + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.145, + 0.362, + 0.16 + ], + "angle": 0, + "content": "Algorithm 1 Retro-Search" + }, + { + "type": "algorithm", + "bbox": [ + 0.172, + 0.163, + 0.829, + 0.357 + ], + "angle": 0, + "content": "Require: Question \\(q\\), initial reasoning trajectory \\(T = \\{\\{s_1^1,s_2^1,\\dots ,s_{k_1}^1\\} ,\\{s_1^2,s_2^2,\\dots ,s_{k_2}^2\\} ,\\dots ,a\\}\\), revision model \\(\\widehat{\\mathcal{M}}\\) discount factor \\(\\gamma\\), ground truth answer \\(a^\\star\\), and reward function \\(R(\\cdot ,\\cdot)\\). \nEnsure: Revised trajectory \\(\\tilde{T}\\) that yields answer \\(a^{*}\\) with fewer steps. \n1: Initialize \\(\\tilde{T}\\gets T\\) \n2: Initialize \\(s^{\\tau}\\gets s^{1}\\) from \\(\\tilde{T}\\) \n3: while \\(s^{\\tau}\\) is not the last thought in \\(\\tilde{T}\\) do \n4: \\(\\{s_{k + 1}^{\\tau},\\ldots ,a\\} \\sim \\widehat{\\mathcal{M}}\\left(s^{1},\\ldots ,\\{s_{1}^{\\tau},s_{2}^{\\tau},\\ldots ,s_{k}^{\\tau}\\}\\right)\\) Rollout: transition keywords prohibited in \\(s_{k + 1}^{\\tau}\\) \n5: \\(V(s_{k + 1}^{\\tau},a^{\\star})\\gets \\gamma^{N - i}R(a(s_{k + 1}^{\\tau}),a^{\\star})\\) Compute value of the new step \\(s_{k + 1}^{\\tau}\\) (i.e., \\(i\\)-th step) \n6: if \\(V(s_{k + 1}^{\\tau}) > V(s_{1}^{\\tau +1})\\) then If the value of the new step is higher than the existing one \n7: \\(\\tilde{T}\\gets \\left\\{s^{1},s^{2},\\dots ,\\{s_{1}^{\\tau},s_{2}^{\\tau},\\dots ,s_{k}^{\\tau}\\} \\{s_{k + 1}^{\\tau},\\dots ,a\\} \\right\\} \\triangleright\\) Update the trajectory with the new rollout \n8: \\(s^{\\tau}\\gets\\) the next thought in \\(\\tilde{T}\\) \n9: Return \\(\\tilde{T}\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.384, + 0.427, + 0.4 + ], + "angle": 0, + "content": "B Data Generation Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.415, + 0.828, + 0.501 + ], + "angle": 0, + "content": "When constructing Self-Retro-R1-7B, we use the default version of Retro-Search, whereas for W2S-Retro-R1-32B, we use Retro-Search with partial revision. When constructing Self-Retro-R1-7B, we generate responses from R1-distill Qwen2.5-7B and filter for those with correct solutions as the base data for Retro-Search to revise. For W2S-Retro-R1-32B, we directly use OpenThought data as the base, since it contains only correct responses from the DeepSeek-R1 671B model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.506, + 0.828, + 0.55 + ], + "angle": 0, + "content": "The transition keywords we use to segment thoughts within a reasoning trace are: 'But', 'Wait', 'Alternatively', 'However', 'Hmm', 'Hmmm', 'Not sure', 'Going back', 'Backtrack', 'Trace back', and 'Another'." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.555, + 0.828, + 0.627 + ], + "angle": 0, + "content": "For data generation during Retro-Search, we use top-p sampling with \\( p = 0.98 \\) and temperature \\( T = 1.0 \\). We also tried using temperature \\( T = 0.6 \\) and found that data generated with a higher temperature tends to produce a better student model, likely due to the increased diversity in the training data induced by higher-temperature sampling. We set the maximum generation length to be 16384." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.645, + 0.357, + 0.664 + ], + "angle": 0, + "content": "C Training Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.677, + 0.828, + 0.777 + ], + "angle": 0, + "content": "We perform supervised fine-tuning of models using HuggingFace TRL (von Werra et al., 2020). For all fine-tuning experiments, we used batch size of 128, five training epochs, and cosine learning rate scheduler with warmup rate of 0.05. We used Adam optimizer with weight decay of 1e-4, with beta1=0.9 and beta2=0.95. We did not conduct hyperparameter search, so there is a potential of finding better hyperparameters. With 32 H100 GPUs, fine-tuning 7B model with 40K data took around 90 minutes, and fine-tuning 32B model took 10 hours to finish." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.795, + 0.367, + 0.811 + ], + "angle": 0, + "content": "D Baselines Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.827, + 0.828, + 0.927 + ], + "angle": 0, + "content": "For 7B models, we evaluate six open-weight models as baselines: instruction-tuned models including Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, and Qwen2.5-Math-7B-Inst (Yang et al., 2024b), as well as reasoning models including OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), and R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025). These reasoning models are fine-tuned using responses from DeepSeek-R1 671B (DeepSeek-AI et al., 2025). Specifically, the OpenR1-Qwen-7B model is trained on 220K math examples, with questions sourced from NuminaMath, while OpenThinker-7B" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.346, + 0.104, + 0.65, + 0.664 + ], + "angle": 270, + "content": "
06'6L0F'60S'606'8L05'9900'0010E'8902'9S(2L)1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E -1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E 1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E+1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E=1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E--1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E---1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E—1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E ----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E
" + }, + { + "type": "table_caption", + "bbox": [ + 0.231, + 0.676, + 0.767, + 0.691 + ], + "angle": 0, + "content": "Table 5: Per-dataset evaluation results (accuracies) using greedy decoding." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.778, + 0.825, + 0.807 + ], + "angle": 0, + "content": "is trained on the OpenThoughts-114K dataset, which includes math, science, and coding problems." + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.813, + 0.826, + 0.925 + ], + "angle": 0, + "content": "For 32B models, we evaluate five open-weight models: instruction-tuned Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). Both OpenThinker-32B and R1-distill Qwen2.5-32B are fine-tuned using responses generated by DeepSeek-R1 671B, with OpenThinker-32B utilizing the OpenThoughts-114K dataset. Sky-T1-32B-Preview is trained on a 17K dataset consisting of math and coding problems, with responses generated using QwQ-32B-Preview. The training details of the other models are not publicly disclosed." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.95, + 0.508, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.346, + 0.104, + 0.65, + 0.665 + ], + "angle": 270, + "content": "
1609€10€€2€9€1€€2€2€81€6€88€11€52€11€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-
02€9€289€€31€199€€218€082€862€0€29€1€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
608981€06691€087€51€958€2€095€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
€204€68€601€€2€€€100€€86€€27€1€11€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
0088€100€$991€02€6€€111€6898€969€1€5299€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€ -
81€6€26€€991€€298€€521€6628€5289€0718€1€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2 €
96€Z698€15€€10€€598€8895€7189€9969€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
£268€208€9€956€€2101€5692€1888€11400€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
699E1€2898€02081€€8091€811€11866€1828€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€ 1
003€1021€8€1297€2€7691€05291€6688€20749€2€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2
05011020€7€397€3€7891€19801€5211288812€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ -
003E187€350€500S8781€5271F0761F8662F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ 1
9981029905€9981€€561F66991F0029F7269F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
1008826€72E1986€7296F8108F1699F7299F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
9958982€87F957F7801F8802F0025F9661F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
97E2001178F972F679F795F028F978FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
9919187F97F987F892F969F1086F1160FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
52606F87F18S1601F869F928F998FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
£24E1108E86E1708111E602E1889F2719F2M1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
00901678F09F572S870E16198024818799FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
899F6933F99F729S8791F57201F882F89691FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.6
78602971E13E6001F5711F98F1F5051FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.441
781163669S72015711F0001F92F1F9751FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
586810185Z979F1691F7291F506F718FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.188, + 0.678, + 0.809, + 0.693 + ], + "angle": 0, + "content": "Table 6: Per-dataset evaluation results (response token length) using greedy decoding." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.717, + 0.478, + 0.732 + ], + "angle": 0, + "content": "E Per-dataset Evaluation Results" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.748, + 0.825, + 0.817 + ], + "angle": 0, + "content": "In Tables 5 and 6, we share the per-dataset evaluation results using greedy decoding, and in Tables 7 and 8, we share results using temperature sampling with top-p=0.95 and T=0.6. We use the max response length of 32,768 tokens for all experiments. For temperature sampling, we use random five seeds and aggregate the results, and we further report the confidence interval to share the deviation of the metrics." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.346, + 0.165, + 0.65, + 0.821 + ], + "angle": 270, + "content": "
20'1+30'1852'0+0'1601'0+19'1662'0+91'1854'0+05'9972'1+05'2605'2+20'0405'2+20'09(92)18
16'0+18'682'0+26'3616'0+00'3852'0+10'2816'0+16'2968'0+00'1620'2+99'1464'2+16'29(92)18
16'0+57'2216'0+80'3661'0+09'2652'0+05'3872'0+05'2985'1+05'2616'1+16'2911'2+00'05(92)18
52'1+19'212'0+80'3612'0+14'3882'0+12'1812'0+05'1605'2+86'6612'2+00'0511'2+00'05(92)18
68'0+60'2212'0+95'1652'0+95'6852'0+95'1882'0+95'0985'1+05'0622'2+00'8522'2+00'05(92)18
11'1+89'1212'0+96'1612'0+00'2852'0+91'0812'0+06'0905'2+00'2661'2+20'9526'2+20'14(92)18
12'1+19'052'0+09'0612'0+92'1858'0+12'1482'0+05'6905'2+05'6885'1+16'1994'1+00'44(92)18
80'1+27'1292'0+29'0622'0+27'5852'0+90'2255'0+88'6922'1+00'0605'2+00'0912'2+00'85(92)18
50'1+62'2515'0+02'1805'0+29'2894'0+91'2215'0+05'2900'2+99'2794'1+89'2282'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16(92)18
16'0+18'816'0+09'2216'0+09'2262'0+18'1416'0+96'6725'1+00'1488'2+00'0612'2+00'97(92)18
16'0+18'895'0+95'6261'0+02'9809'0+80'2905'0+25'1488'2+00'1900'0+02'9720'2+16'25(92)18
11'1+29'1512'0+01'9211'0+05'5852'0+28'2912'0+85'2799'2+99'9299'2+99'9286'1+00'92(92)18
01'1+19'812'0+80'1811'0+89'9882'0+98'2905'0+26'9721'7+05'6512'2+89'7711'2+16'25(92)18
88'0+16'812'0+91'8602'0+97'8882'0+06'0885'0+19'1918'0+05'1620'2+99'1982'1+16'19(92)18
81'1+19'2294'0+09'2602'0+87'2694'0+06'8212'0+88'3985'0+05'6622'2+99'1982'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-(92)18
99'0+98'7972'0+02'8872'0+98'6655'0+02'9472'0+82'3968'0+05'6412'2+22'2282'2-99'77(92)18
81'1+97'8911'0+17'0602'0+88'5682'0+07'0872'0+98'8511'1+05'7868'1+19'9585'2+20'95(92)18
16'0+06'9552'0+26'1811'0+17'5661'0+09'2772'0+86'9785'0+05'6912'2+20'1162'1+20'51(92)18
70'1+50'6552'0+27'5855'0+02'0844'0+26'2782'0+85'6740'2+05'8966'1+86'2702'1+00'87(92)18
06'0+10'1262'0+26'1672'0+27'9872'0+00'5215'0+08'2918'0+05'0622'2+22'9522'2+00'07(92)18
60'1+89'1252'0+80'2681'0+21'5681'0+95'1889'0+89'6502'2+00'1882'2+99'8782'2-99'07(92)18
80'1+99'7572'0+09'2872'0+95'6611'0+80'9981'0+27'6519'2+05'8520'2+99'1182'2-99'07(92)18
98'1+00'6552'0+27'9862'0+87'1462'1+97'5782'0+19'2757'1+05'1920'2+22'9156'0+89'9(92)18
16'0+28'4792'0+27'4782'0+95'8882'0+06'8582'0+05'0572'1+89'2172'1+89'2172'1+89'07(92)18
" + }, + { + "type": "table_caption", + "bbox": [ + 0.173, + 0.832, + 0.825, + 0.862 + ], + "angle": 0, + "content": "Table 7: Per-dataset evaluation results (accuracies) using temperature sampling (t=0.6 and top-p=0.95). The numbers after \\(\\pm\\) means the \\(95\\%\\) confidence interval." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.347, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.346, + 0.172, + 0.65, + 0.814 + ], + "angle": 270, + "content": "
01I + 10CS11 + 2062εI + 506εE + 28828ε + 1849εII + 3944εE + 820662I + 0601(92I) 2εE-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R -
81I + 191912 + 09FCεI + 59118Z + 28ECΔF + 274212I + 278528Z + 208689Z + 2021(92I) 12I - 9I + 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E
90I + 276912 + 591E9 + 27612 + 27E97 + 010288 + 2855661 + 998629E + 266182E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 22E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 30E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 28E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 20E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 31E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 21E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 32E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 10E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 11E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 15E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 24E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 25E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 33E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 23E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 34E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 35E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 16E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 27E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 26E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 37E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 36E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 18E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 38E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 17E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 14E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 13E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 19E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 3101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C
00I + 00C11 + 00C6 + 00I10 + 00C8 + 00C10 + 00C8 + 00C10 + 00C8 + 00C8 + 00C
00T + 00T11 + 00T6 + 00T10 + 00T8 + 00T10 + 00T8 + 00T10 + 00T8 + 00T8 + 00T
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 = 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
" + }, + { + "type": "table_caption", + "bbox": [ + 0.173, + 0.825, + 0.825, + 0.854 + ], + "angle": 0, + "content": "Table 8: Per-dataset evaluation results (model response token length) using temperature sampling \\((t = 0.6\\) and top- \\(p = 0.95)\\). The numbers after \\(\\pm\\) means the \\(95\\%\\) confidence interval." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_origin.pdf b/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..be8f8227def46945c1b3bbbeb20cec0fe978ca42 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/fd4cbf72-3c61-47c5-a7ef-ba8037d47f6a_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:321990134a0f6ee138bc5adfe75268ee05d3a5a23d6b55b841a0ab9372b5168d +size 1928744 diff --git a/data/2025/2504_04xxx/2504.04383/full.md b/data/2025/2504_04xxx/2504.04383/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8e5876daa056a34f56c775ee6cf20847b2878b8a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/full.md @@ -0,0 +1,260 @@ +# Retro-Search: Exploring Untaken Paths for Deeper and Efficient Reasoning + +Ximing Lu†‡ Seungju Han†§ David Acuna† Hyunwoo Kim† Jaehun Jung† Shrimai Prabhumoye† Niklas Muennighoff§ Mostofa Patwary† Mohammad Shoeybi† Bryan Catanzaro† Yejin Choi† + +†NVIDIA ‡University of Washington §Stanford University {ximingl, seungjuh, dacunamarrer, hyunwook, jaehunj, yejin}@nvidia.com + +# Abstract + +Large reasoning models, such as OpenAI o1 and DeepSeek-R1, demonstrate remarkable reasoning capabilities via long, elaborate reasoning trajectories. Numerous follow-up studies report that supervised fine-tuning on such reasoning traces, also known as distillation, can be a cost-effective way to boost reasoning capabilities of smaller student models. However, empirical observations reveal that these reasoning trajectories are often suboptimal, switching excessively between different lines of thought, resulting in underthinking, over-thinking, and even degenerate responses. In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search, for distilling higher quality reasoning paths from large reasoning models. Retro-Search retrospectively revises reasoning paths to discover better, yet shorter traces, which can then lead to student models with enhanced reasoning capabilities with shorter, thus faster inference. Our approach can enable two use cases: self-improvement, where models are fine-tuned on their own Retro-Search-ed thought traces, and weak-to-strong improvement, where a weaker model revises stronger model's thought traces via Retro-Search. For self-improving, R1-distill-7B, fine-tuned on its own Retro-Search-ed traces, reduces the average reasoning length by $31.2\%$ while improving performance by $7.7\%$ across seven math benchmarks. For weak-to-strong improvement, we retrospectively revise R1-671B's traces from the OpenThoughts dataset (Team, 2025) using R1-distill-32B as the Retro-Search-er, a model $20\times$ smaller. Qwen2.5-32B, fine-tuned on 40k instances of this refined data, achieves performance comparable to R1-distill-32B, yielding an $11.3\%$ reduction in reasoning length and a $2.4\%$ performance improvement compared to fine-tuning on the original OpenThoughts data. More excitingly, R1-distill-7B and R1-distill-32B, fine-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference efficiency. Our work counters recently emergent viewpoints that question the relevance of search algorithms in the era of large reasoning models, by demonstrating that there are still opportunities for algorithmic advancements, even for frontier models. + +# 1 Introduction + +Recent state-of-the-art LLMs, such as OpenAI o1 and DeepSeek-R1, have demonstrated remarkable capabilities in solving complex reasoning problems by scaling test-time compute. Test-time scaling enables the model to produce extended reasoning trajectories—an inner monologue akin to an implicit internal search—where the model explores multiple potential solution paths and verifies itself (OpenAI, 2024; DeepSeek-AI et al., 2025; Qwen Team, 2025). + +Question: Given a sequence $a_{n}$ where $a_{n} = -4$ when $n$ is odd, and $a_{n} = 7$ when $n$ is even, write a formula for the $n$ -th term. + +![](images/383b1bfed10a1065b0d70625cf6c18d25e147f3277dd81df30b842b5278ffd08.jpg) +Figure 1: An example reasoning trace from Retro-Search in weak-to-strong revision. A reasoning trace consists of a series of thoughts segmented by transition keywords (e.g., "alternatively", "wait"), with each thought composed of a sequence of intermediate steps, delimited by '\n\nRetro-Search retrospectively revises reasoning trajectories - exploring promising thoughts that were prematurely abandoned to mitigate under-thinking while avoiding redundant thoughts once the correct answer is evident to reduce over-thinking.\n\n + +Reinforcement learning (RL) has been shown to enable this behavior as training progresses, with key "aha" moments in the training dynamics where models begin to generate longer responses and spontaneously develop alternative strategies for problem-solving, verification, and self-correction. As a result, average response length tends to grow proportionally with performance (DeepSeek-AI et al., 2025; Zeng et al., 2025a; HuggingFace, 2025). + +At the same time, contradictory signals have emerged around whether RL is strictly necessary to enable these behaviors. Cost-effective approaches suggest that access to long reasoning traces may be the key. In fact, recent work shows it is possible to replicate or sometimes even surpass o1 and R1 performance on challenging math benchmarks using long reasoning traces and supervised fine-tuning (Muennighoff et al., 2025; Team, 2025). + +This growing belief—that longer reasoning traces equals better reasoning—has shaped much of the recent progress in training and scaling strategies. However, is longer thinking always better? At the surface level, it may appear so. Long thought allows the model to explore alternative solutions paths, define subgoals, backtrack, verify and self-correct. These cognitive behaviors, akin to human problem-solving, have been indeed shown to be beneficial for reasoning models (Gandhi et al., 2025). Furthermore, it is intuitive that complex problems inherently require lengthier deliberations. However, several recent works have demonstrated that longer responses do not always yield better results. In fact, incorrect responses often involve longer reasoning traces marked by frequent switches between + +different lines of thought where the model prematurely abandons promising directions—a tendency coined by Wang et al. (2025) as under-thinking. On the other hand, over-thinking occurs when the model inefficiently expends resources by engaging in excessive verification or redundant checks after arriving at a final answer, contributing minimally to accuracy improvements Chen et al. (2024). + +Then, is shorter necessarily better? The phenomena of under-thinking and over-thinking have motivated several ad-hoc heuristics that use response length as a proxy for downstream performance (Wang et al., 2025; Fu et al., 2024). For instance, a naive approach to boost a model's reasoning capability is supervised fine-tuning on the shortest reasoning trajectories distilled from large state-of-the-art models such as DeepSeek-R1 671B. However, blind shortening is inherently limited, as length alone may not reliably indicate thoughtfulness or reasoning quality. Short responses may overlook nuanced considerations or miss essential parts of the meta-thinking process (Xiang et al., 2025). Furthermore, employing simple length-based heuristics disregards the complexity and semantic coherence of generated content, potentially discarding useful reasoning sequences that are verbose yet insightful. + +Our goal is to consolidate these disparate observations on the quality of reasoning trajectories. We ask—if overly long reasoning is not always beneficial, and blind shortening is suboptimal, how can we discourage under-thinking and over-thinking, and collect more efficient and effective solutions? We argue that search is an effective means of eliciting better reasoning-producing trajectories that are both efficient and insightful, yet shorter in length—and can be used to train stronger student models. + +In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search (MCTS) for distilling higher quality reasoning data from large reasoning models. Retro-Search retrospectively revises a given reasoning path by suppressing unnecessary thought switches to collect more efficient and effective alternatives. Figure 1 shows an example of Retro-Search refining a reasoning trace from DeepSeek-R1. It expands promising thoughts that were prematurely abandoned to mitigate under-thinking while pruning redundant thoughts once the correct answer becomes evident to reduce over-thinking, resulting in more effective yet shorter reasoning traces. + +Contrary to prior attempts where search struggled to improve reasoning effectively, we show that our method is highly effective in two key settings: (1) Self-improvement—Retro-Search can bootstrap self-improvement in reasoning models, by training a model on its own Retro-Search-ed trajectories. We demonstrate that this simple step, despite not relying on frontier model capabilities, yields significant performance gain (of up to $7.7\%$ ) while reducing inference time by $31.2\%$ . (2) Weak-to-strong revision—Retro-Search can revise even the reasoning traces generated by an expensive, frontier reasoning model with a substantially smaller, more efficient model, yet significantly improving the quality of dataset. For example, we revise reasoning traces generated by R1-671B using a $20\times$ smaller model R1-distill-32B as the Retro-Search-er. Yet after training on this revised data, Qwen2.5-32B achieves performance comparable to R1-distill-32B, yielding an $11.3\%$ reduction in reasoning length and a $2.4\%$ performance improvement compared to fine-tuning on the original R1-671B's trajectories. And, more excitingly, R1-distill-7B and R1-distill-32B, fin-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference time efficiency. + +# 2 Method + +We introduce Retro-Search, an MCTS-inspired algorithm that explores untaken steps for deeper and more efficient reasoning. Its goal is to revise and improve a given reasoning path by encouraging continuation instead of prematurely switching to a new thought, ultimately seeking to reach the correct answer more efficiently, i.e. with fewer steps. + +# 2.1 Preliminaries + +Consider a reasoning model $\mathcal{M}$ that, when given a question $q$ , generates both an intermediate reasoning trajectory $T$ and a final solution $a$ . Formally, given an input question $q \in \mathcal{Q}$ , + +![](images/44813566fea66d24ce0070477bbb66e7a3058ad7b42f3c5a6e772553f8cf2ef9.jpg) +Figure 2: An overview of our Retro-Search algorithm. The algorithm iterates through the thoughts and explores untaken paths from steps that come before a thought-switch, which is marked by transition keywords like "wait" or "another approach." During the process, it performs multiple rollouts, suppressing these transition keywords in the immediate next step. If the search is successful, the existing trajectory is replaced with the new rollout, and the process continues through the updated trajectory. + +the model $\mathcal{M}$ produces $(T, a) \coloneqq \mathcal{M}(q)$ , where $T \in \mathcal{T}$ denotes the chain of reasoning, or chain of "thoughts", and $a \in \mathcal{A}$ represents the final solution to $q$ . + +Each trajectory $T$ can be decomposed in to a set of thoughts, i.e., $T := \{s^1, s^2, \ldots, s^\tau\}$ . Each $s^\tau$ denotes an individual thought, and each thought may perform distinctive role such as trying out a new solution strategy, reflecting its progress, back-tracking or verifying calculations, etc. In order to differentiate between independent thoughts, we attend to the fact that models often leverage transition keywords (e.g., "alternatively") to make a natural transition between thoughts, e.g. $s^\tau \rightarrow s^{\tau+1}$ . We utilize these linguistic markers to segment and extract individual thoughts from the full reasoning trace. + +Each thought $s^{\tau}$ itself is a sequence of intermediate steps $s_i^\tau$ s—that is, $s^{\tau} := \{s_1^{\tau}, s_2^{\tau}, \dots, s_k^{\tau}\}$ . These intermediate steps $s_k^\tau$ represent atomic units of reasoning within a thought—such as sub-conclusions, calculations, or logical deductions. In practice, steps are delimited by '\n\n(double newline) characters in the model's output. We adapt the convention of using the superscript $\tau$ to index the thought, and the subscript $k$ to index the step within that thought. For example, $s_k^\tau$ refers to the $k$ -th step within the $\tau$ -th thought. + +Utilizing the notations above, we represent a complete reasoning trajectory $T$ as: + +$$ +T = \left\{\left\{s _ {1} ^ {1}, s _ {2} ^ {1}, \dots , s _ {k _ {1}} ^ {1} \right\}, \left\{s _ {1} ^ {2}, s _ {2} ^ {2}, \dots , s _ {k _ {2}} ^ {2} \right\}, \dots , a \right\} \tag {1} +$$ + +The under-thinking issue: too many shallow thoughts. Previous studies have shown that R1-like models exhibit an under-thinking issue in their reasoning process (Wang et al., 2025). These models frequently abandon promising reasoning paths prematurely, leading to inadequate depth of reasoning on challenging problems. This phenomenon (1) occurs more frequently on harder problems, (2) leads to frequent switching between different thoughts without reaching a conclusion in each, and (3) correlates with incorrect responses due to insufficient exploration of reasoning paths. + +The over-thinking issue: too many redundant thoughts. Conversely, R1-like models also suffer from an over-thinking issue (Sui et al., 2025; Chen et al., 2024), where they expend excessive compute on questions that are exceptionally simple or for which the answer is already evident. The model tends to generate unnecessary thoughts such as self-doubt and redundant verification, even when it produces the correct answer within its early steps. + +The seemingly contradictory issues of under-thinking and over-thinking share a common cause: unnecessarily initiating a new thought. In under-thinking, the model switches to a new thought without fully exploring a previously promising path. In over-thinking, despite the answer being evident, a new thought is started instead of directly generating the answer. + +# 2.2 Retro-Search + +The goal of Retro-Search is to start from a tuple $(q,T)$ generated by $\mathcal{M}$ , and search for an improved trajectory $T^{*}$ using a revision model $\widehat{\mathcal{M}}$ . Here, we focus only on revising $T$ that leads to the correct final answer (i.e., $a = a^{\star}$ ). Intuitively, we consider $T^{*}$ to be better than $T$ if it leads to the same final answer $a$ with fewer reasoning steps—i.e., by avoiding both over-thinking and under-thinking. We specifically consider two settings of Retro-Search, depending on how we set the revision model—(1) Self-Retro-Search, where $\widehat{\mathcal{M}}$ is set to be the original model $\mathcal{M}$ that produced $T$ , and (2) Weak-to-Strong-Retro-Search (W2S-Retro-Search), where $\widehat{\mathcal{M}}$ is a smaller, cost-efficient model than $\mathcal{M}$ . + +Collecting alternative rollouts The core rationale behind Retro-Search is that there may exist an alternative trajectory for a given problem that is shorter than the original trajectory, yet still leads to a correct answer. To discover such a trajectory, we iteratively explore alternative rollouts to investigate what would happen if, instead of starting a new thought $s^{\tau +1}$ after $s^\tau$ (i.e., generate $s_1^{\tau +1}$ ), we continued the current thought $s^\tau$ . Concretely, for each thought $s^\tau$ in $T$ (Eq. 1), we generate an alternative rollout using $\widehat{\mathcal{M}}$ as: + +$$ +\left\{s _ {k + 1} ^ {\tau}, \dots , a \right\} \sim \widehat {\mathcal {M}} \left(s ^ {1}, s ^ {2}, \dots , \left\{s _ {1} ^ {\tau}, s _ {2} ^ {\tau}, \dots , s _ {k} ^ {\tau} \right\}\right) \tag {2} +$$ + +Importantly, when generating the immediate next step $s_{k+1}^{\tau}$ , we constrain the model to stay within a single thought by preventing it from initiating a new one in the next step—by prohibiting the usage of thought-transition keywords (e.g., "alternatively," "wait") during the decoding process. This encourages deeper exploration of the current thought rather than prematurely switching to different lines of thought. Subsequent steps after $s_{k+1}^{\tau}$ are generated without constraints to allow free on-policy exploration. + +Evaluating alternative rollouts To determine whether the alternative rollout $\{s_{k + 1}^{\tau},\ldots ,a\}$ is better than the existing path $\{s_1^{\tau +1},\dots ,a\}$ , we define a value function $V(s)$ over the $i$ -th step $s_i$ in the trajectory $\{s_1,\dots ,a\}$ to compare $V(s_{k + 1}^{\tau})$ with $V(s_{1}^{\tau +1})$ : + +$$ +V \left(s _ {i}, a ^ {\star}\right) := \gamma^ {N - i} R \left(a \left(s _ {i}\right), a ^ {\star}\right) \tag {3} +$$ + +where $N$ represents the total number of steps in the trajectory $\{s_1, \ldots, a\}$ . Here, we write $a(s_i) \coloneqq \{s_i, \ldots, a\}$ to explicitly emphasize that the value depends on the specific step $s_i$ and captures the autoregressive dependence of the generated answer $a$ on the continuation from step $s_i$ . The reward function $R(a, a^{\star})$ is binary, indicating whether the generated answer $a$ matches the ground truth $a^{\star}$ (i.e., using a verifiable reward). We apply a decay factor $\gamma$ to discount future rewards, assigning higher value to paths that reach the correct answer in fewer steps. Concretely, we set to $\gamma = 0.9$ in our experiments. In what follows, we drop the detailed notation and refer to the above simply as $V(s)$ for clarity. + +If $V(s_{k+1}^{\tau}) > V(s_1^{\tau+1})$ , the rollout reaches the correct final answer in fewer steps, and we replace the existing path $\{s_1^{\tau+1}, \ldots, a\}$ with the rollout $\{s_{k+1}^{\tau}, \ldots, a\}$ . This could occur when exploring deeper along the current thought is more effective, thus reducing under-thinking. Alternatively, $s_{k+1}^{\tau} = a$ indicates that the previous thought steps are already sufficient for the model to generate the correct solution directly, thereby reducing over-thinking. + +In contrast, if $V(s_{k+1}^{\tau}) < V(s_1^{\tau+1})$ , the existing path is better. The alternative path either reaches a wrong answer or reaches the correct answer with more steps. This suggests that switching to a new thought was effective and necessary, and thus the existing transition should be preserved. In practice, we sample multiple alternative rollouts (two in our experiments) and retain the best one—that is, the rollout with the highest value. We then proceed to examine the next thought in the updated reasoning trajectory. Please see Figure 2 for a concrete example, and Algorithm 1 in Appendix A for more details. + +Retro-Search with Partial Revisions We also propose a more computationally efficient variant of Retro-Search. Instead of iteratively applying the revision procedure starting from the first thought, this version randomly samples a position in the trajectory at which to begin the revision. This is particularly useful when revising with larger models—for instance, the R1-32B model in our setting—where full iterative revision would be prohibitively expensive. + +# 3 Experiments + +# 3.1 Setup + +Data Generation We use 40K math questions from NuminaMath (LI et al., 2024). Specifically, we sample NuminaMath questions from OpenThoughts-114k $^{1}$ dataset, which is the training data of OpenThinker-7B and OpenThinker-32B models. We experiment with two settings: 1) Self-Retro-R1-7B, where we first generate responses using the R1-distilled Qwen2.5-7B model and then revise them with the same model as the Retro-Search-er. 2) W2S-Retro-R1-32B, where we take responses from the DeepSeek-R1 671B model in the OpenThoughts dataset and revise them using a weaker model, R1-distilled Qwen2.5-32B. More details are in Appendix B. + +Model Training We trained four models using data generated by Retro-Search: Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B with supervised fine-tuning. All models are fine-tuned for five epochs with learning rate of 1e-5, and sequence length of 16K. More details are in Appendix C. + +Baselines We compare our trained models with a total of eleven open-weight models across two model size categories — six 7B models and five 32B models. These include instruction-tuned models such as Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, Qwen2.5-Math-7B-Inst (Yang et al., 2024b) and Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025), OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). More baseline details are in Appendix D. + +Benchmarks and Metrics We evaluate models on seven math-specific benchmarks: AIME25, AIME24, AMC23, GaoKao23English (Zhong et al., 2023), OlympiadBench (He et al., 2024), GSM8K (Cobbe et al., 2021), and MATH500 (Lightman et al., 2023). The first five benchmarks focus on olympiad-level math problems, where AIME25 and AIME24 each contain 30 problems and AMC23 contains 40 problems. GSM8K includes grade school math problems, and MATH500 includes high-school math competition problems. + +For evaluation, we report two metrics: accuracy to measure the performance, and average response length to measure computational efficiency during inference. For accuracy, we use exact match between the model's prediction and the reference answer, with Qwen's official implementation for answer verification. For response length, we tokenize the responses using the Qwen2.5-7B-Instruct tokenizer and compute the number of output tokens. + +Metrics are computed individually for each benchmark and then averaged using macro averaging to produce the final scores. Since there is no universally optimal decoding strategy + +
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
Baselines (7B)
Qwen2.5-Math-7B41.1118239.01225
Qwen2.5-Math-7B-Inst53.198252.7985
OpenR1-Qwen-7B67.6946371.77740
OpenThinker-7B53.81447759.19835
Qwen2.5-7B-Inst48.798547.91033
+ R1-7B49.71436555.48959
+ Self-Retro-R1-7B51.7 (+4.1%)11050 (-23.1%)55.8 (+0.7%)8263 (-7.8%)
+ R1-671B51.51430258.49824
+ W2S-Retro-R1-32B55.3 (+7.3%)13569 (-5.1%)57.8 (-1.1%)8940 (-9.0%)
R1-distill-Qwen2.5-7B64.51060071.06831
+ R1-671B68.4941871.77172
+ W2S-Retro-R1-32B70.8 (+3.5%)8800 (-6.6%)73.1 (+2.0%)6535 (-8.9%)
Baselines (32B)
OpenThinker-32B73.0800175.96840
QwQ-32B-Preview70.9516468.35163
Sky-T1-32B-Preview62.0236762.92018
Qwen2.5-32B-Inst56.197555.9761
+ R1-671B76.2707475.66676
+ W2S-Retro-R1-32B74.6 (-2.2%)6809 (-3.7%)77.5 (+2.4%)5923 (-11.3%)
R1-distill Qwen2.5-32B73.1856677.76173
+ R1-671B (12K)80.4647079.86164
+ W2S-Retro-R1-32B (12K)79.9 (-0.6%)6091 (-5.9%)81.0 (+1.5%)5301 (-14.0%)
+ +Table 1: Retro-Search provides better training data. Model evaluation results averaged across seven math benchmarks (AIME25, AIME24, AMC23, GaoKao23English, Olympiad-Bench, GSM8K, and MATH500). We report results from two setups: greedy decoding $(\mathrm{T} = 0)$ and temperature sampling $(\mathrm{T} = 0.6$ with top-p $= 0.95)$ . $+X$ indicates that the model is fine-tuned with data X. Only when fine-tuning R1-distill Qwen2.5-32B, we used 12K instances, as using more data did not improve results. The results indicate that: (1) models trained with Retro-Search data are more computationally efficient during inference while generally showing better performance; and (2) weak-to-strong Retro-Search enables new SOTA at 7B and 32B scales. + +
Qwen2.5-7B-InstGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
+ R1-7B49.71436555.48959
+ R1-7B-Shortest50.31234054.68009
+ Self-Retro-R1-7B51.71105055.88263
+ +Table 2: Simply selecting the shortest path for training is suboptimal for model accuracy. We fine-tuned Qwen2.5-7B-Inst with different training data and compare results. We sample eight responses using R1-distilled Qwen2.5-7B and choose the shortest response. + +that works well across all models, we report results under two commonly used decoding setups: greedy decoding $(\mathrm{T} = 0)$ , following Muennighoff et al. (2025), and temperature sampling $(\mathrm{T} = 0.6$ with top-p $= 0.95)$ , following DeepSeek-AI et al. (2025). We took an average of results from five different seeds for the temperature sampling setup. In Appendix E, we share the full results including the confidence interval of the results. + +# 3.2 Evaluation Results + +Self Retro-Search teaches stronger and more efficient student models than vanilla data generation. We compare fine-tuning the student model, Qwen2.5-7B-Instruct, using data from our Self-Retro-R1-7B against fine-tuning with data sampled from the R1-distilled + +
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
R1-distill Qwen2.5-7B64.51060071.06831
+ Self-Retro-R1-7B69.5 (+7.7%)7295 (-31.2%)70.6 (-0.6%)5406 (-20.9%)
+ +Table 3: Retro-Search allows self-improvement of the models. Fine-tuning the R1-distilled Qwen2.5-7B model with self-revision data (Self-Retro-R1-7B) significantly improves efficiency, while maintaining or even improving accuracy. + +Qwen2.5-7B model before revision, referred to as $R1 - 7B$ in Table 1. Compared to models trained on $R1 - 7B$ , the model trained on Self-Retro- $R1 - 7B$ produces responses that are $23.1\%$ shorter while improving accuracy by $+4.1\%$ under greedy decoding. + +We further compare Retro-Search against another baseline, R1-7B-Shortest, which selects the shortest response for model training after sampling eight responses per questions using R1-distilled Qwen2.5-7B. As shown in Table 2, although training with the shortest response can enhance efficiency when compared to R1-7B, it does not improve the model performance as much as our Retro-Search, clearly demonstrating the effectiveness of our Retro-Search. + +Weak-to-Strong Retro-Search enables new SOTA reasoning models at 7B and 32B scales, excelling in both performance and efficiency. While Self-Retro has proven effective, using a large model such as DeepSeek-R1-671B for both generation and revision is computationally implausible. We evaluate the effectiveness of weak-to-strong revision, where DeepSeek-R1-671B's generations are Retro-Search-ed by R1-distilled Qwen2.5-32B, denoted as W2S-Retro-R1-32B. We fine-tune student models on this data and compare them to those fine-tuned on unrevised data from DeepSeek-R1-671B, referred to as R1-671B in Table 1. + +W2S-Retro-R1-32B proves to be effective, enabling new SOTA reasoning models at 7B and 32B scales. We fine-tuned four models — Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B — and consistently observed reduced response lengths and improved performance across different setups compared to models fine-tuned on R1-671B. Surprisingly, R1-distilled Qwen2.5-7B and R1-distilled Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieve new SOTA reasoning performance in the sampling setting at the 7B and 32B scales, while yielding the highest inference time efficiency. In addition, Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieves performance comparable to R1-distill-32B, yielding an $11.3\%$ reduction in reasoning length and a $2.4\%$ performance improvement compared to fine-tuning on the R1-671B data. Notably, it also outperforms OpenThinker-32B in accuracy while being more efficient (13.4%–14.9% shorter response). This is particularly significant given that OpenThinker-32B is trained on around 2.5 times more data than our W2S-Retro-R1-32B and use DeepSeek-R1 671B for response generation. + +Retro-Search enables self-improvement of R1-distilled models. We fine-tune the R1-distilled Qwen2.5-7B model with our Self-Retro-R1-7B. Results in Table 3 show significant accuracy improvement $(+7.7\%)$ and response length reduction $(31.2\%)$ for greedy decoding, compared to R1-distill Qwen2.5-7B. There is a small performance reduction for temperature sampling $(-0.6\%)$ , but the length reduction is substantial $(20.9\%)$ . As Self-Retro-R1-7B uses R1-distilled Qwen2.5-7B model for response generation, revision, and fine-tuning the model itself, this shows the self-improvement capabilities enabled by Retro-Search. + +# 3.3 Analyses + +We quantitatively analyze the reasoning trajectories in the synthesized training data using our Retro-Search, as well as those generated by the fine-tuned student model Qwen2.5-7B. Table 4 reports the average number of transition keywords, number of steps per thought, and the relative location where the solution first appears in the trajectory (with values closer to 1 indicating that the solution is nearer the end). The synthesized reasoning traces from Retro-Search contain significantly fewer transition keywords than those from R1-7B and R1-671B. As a result, thoughts from Retro-Search include more steps than those from R1-7B and 671B, indicating deeper thoughts. Additionally, the solution tends to appear later in + +
Synthesized Training DataStudent Model's Reasoning Trace
#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)
R1-7B85.93.70.67229.24.70.59
Self-Retro-R1-7B32.75.30.73183.25.40.64
R1-671B35.33.80.5980.03.00.44
W2S-Retro-R1-32B10.44.90.6070.13.20.48
+ +Table 4: The average number of transition keywords, the number of steps per thought, and the relative location of the first appearance of the solution in the reasoning trajectory are taken from both the training data and the fine-tuned student model, Qwen2.5-7B. + +the trajectory, suggesting that our approach shows less redundant thoughts after the final solution is derived. These trends are also consistent in the reasoning outputs from the student model, showing that Retro-Search reduces both under-thinking and over-thinking. + +# 4 Related Works + +Test-time compute has emerged as a new axis of scaling for LLM reasoning. While prior research in this direction have focused on parallel scaling—repeated sampling of trajectories followed by aggregation (Brown et al., 2024; Snell et al., 2024; Wu et al., 2025a), recent efforts have focused on sequential scaling—where models are trained to back-track, evaluate, and revise its thought by generating a long, monolithic CoT. Representative models such as O1 and R1 (OpenAI, 2024; DeepSeek-AI et al., 2025) are trained via large-scale reinforcement learning, demonstrating that models can learn to generate long CoTs without relying on bespoke reward models (Lightman et al., 2023; Zhang et al., 2025b), or tree search (Feng et al., 2024; Zhang et al., 2024). Subsequent projects in open-source community aim to replicate these reasoning models (HuggingFace, 2025; Qin et al., 2024). These works often utilize frontier reasoning models to generate synthetic long thought traces, and showing surprising gain in reasoning capabilities via simple supervised fine-tuning (HuggingFace, 2025; NovaSky, 2025; Muennighoff et al., 2025). Our work builds upon these prior efforts, focusing on (1) better-quality reasoning paths by targeted revision of verbose sub-traces, and (2) demonstrating self-improvement beyond typical strong-to-weak distillation, where smaller models can self-improve in both performance and efficiency. + +Meanwhile, concurrent works reveal limitations of reasoning models in their in-efficiency of test-time scaling. Longer generation does not necessarily correlate with better accuracy (Zeng et al., 2025b), and in practice, shorter trajectories are more likely to be correct. Models tend to overthink (Cuadron et al., 2025; Sui et al., 2025; Chen et al., 2024), i.e., they generate unnecessarily long trajectories that do not contribute to the performance. Models also exhibit underthinking (Wang et al., 2025)—while they appear to explore diverse plausible paths, models often switch between paths without sufficient exploration on one path. Wu et al. (2025b) suggests the source of inefficiency may lie in the regularities of the training data we use, and theoretically show that training on CoTs that are longer than the optimal length for the model can hurt its performance. Several measures have been proposed to mitigate these findings, such as auxiliary learnable parameters (Bao et al., 2025; Zhang et al., 2025a), calibration (Huang et al., 2025), and decoding-time algorithm (Xu et al., 2025; Misaki et al., 2025). Retro-Search aligns with these prior efforts, and importantly revisits the value of search algorithm in improving both the efficiency and performance of test-time scaling. + +# 5 Conclusions + +In this work, we introduced Retro-Search, a novel algorithm for synthesizing reasoning data designed to equip reasoning models with efficient (shorter average response length) and effective (higher accuracy) test-time scaling. Inspired by the MCTS algorithm, Retro-Search retrospectively revises reasoning trajectories—eliminating unnecessary thought switches (under-thinking) and trimming redundant steps after the correct answer becomes evident (over-thinking). Quantitatively, we show that Retro-Search is highly effective for self-improvement and weak-to-strong revision. Specifically, R1-distill-7B, fine-tuned on its own + +Retro-Search-ed traces, reduces the average reasoning length by $31.2\%$ while improving performance by $7.7\%$ across seven math benchmarks. Notably, R1-distill-7B and R1-distill-32B, fine-tuned on weak-to-strong Retro-Search-ed reasoning traces from R1-671B, set new state-of-the-art performance at the 7B and 32B scales while yielding the highest reasoning efficiency. We hope our work reinvigorates interest in the power of search-based methods for synthetic data in reasoning models—a direction that has recently fallen out of favor, yet holds significant untapped potential. + +# References + +Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh-Tung. Learning to stop overthinking at test time, 2025. URL https://arxiv.org/abs/2502.10954. +Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787. +Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. ArXiv, abs/2502.08235, 2025. URL https://api-semanticscholar.org/CorpusID:276287600. +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jia Shi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiying Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunf an Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren,Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan ZhangZhenwen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948. +Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training, 2024. URL https://arxiv.org/abs/2309.17179. +Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993, 2024. + +Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307. +Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024. +Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration, 2025. URL https://arxiv.org/abs/2503.00031. +HuggingFace. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1. +Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024. +Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050. +Kou Misaki, Yuichi Inoue, Yuki Imajuku, So Kuroki, Taishi Nakamura, and Takuya Akiba. Wider or deeper? scaling llm inference-time compute with adaptive branching tree search, 2025. URL https://arxiv.org/abs/2503.04412. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393. +NovaSky. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025. +OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720. +Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982. +Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314. +Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen, Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419. +OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025. +Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020. +Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025. + +Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for LLM problem-solving. In The Thirteenth International Conference on Learning Representations, 2025a. URL https://openreview.net/forum?id=VNckp7JEHn. +Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms, 2025b. URL https://arxiv.org/abs/2502.07266. +Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, nathan lile, Dakota Mahan, Louis Castricato, Jan-Philipp Franken, Nick Haber, and Chelsea Finn. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-thought. ArXiv, abs/2501.04682, 2025. URL https://api-semanticscholar.org/CorpusID:275357763. +Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less, 2025. URL https://arxiv.org/abs/2502.18600. +An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a. +An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b. +Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025a. Notion Blog. +Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities?, 2025b. URL https://arxiv.org/abs/2502.12215. +Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024. +Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression, 2025a. URL https://arxiv.org/abs/2502.15589. +Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning, 2025b. URL https://arxiv.org/abs/2501.07301. +Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023. + +# Appendices + +A Retro-Search Algorithm 15 +B Data Generation Details 15 +C Training Details 15 +DBaselines Details 15 +E Per-dataset Evaluation Results 17 + +# A Retro-Search Algorithm + +Algorithm 1 Retro-Search +Require: Question $q$ , initial reasoning trajectory $T = \{\{s_1^1,s_2^1,\dots ,s_{k_1}^1\} ,\{s_1^2,s_2^2,\dots ,s_{k_2}^2\} ,\dots ,a\}$ , revision model $\widehat{\mathcal{M}}$ discount factor $\gamma$ , ground truth answer $a^\star$ , and reward function $R(\cdot ,\cdot)$ . +Ensure: Revised trajectory $\tilde{T}$ that yields answer $a^{*}$ with fewer steps. +1: Initialize $\tilde{T}\gets T$ +2: Initialize $s^{\tau}\gets s^{1}$ from $\tilde{T}$ +3: while $s^{\tau}$ is not the last thought in $\tilde{T}$ do +4: $\{s_{k + 1}^{\tau},\ldots ,a\} \sim \widehat{\mathcal{M}}\left(s^{1},\ldots ,\{s_{1}^{\tau},s_{2}^{\tau},\ldots ,s_{k}^{\tau}\}\right)$ Rollout: transition keywords prohibited in $s_{k + 1}^{\tau}$ +5: $V(s_{k + 1}^{\tau},a^{\star})\gets \gamma^{N - i}R(a(s_{k + 1}^{\tau}),a^{\star})$ Compute value of the new step $s_{k + 1}^{\tau}$ (i.e., $i$ -th step) +6: if $V(s_{k + 1}^{\tau}) > V(s_{1}^{\tau +1})$ then If the value of the new step is higher than the existing one +7: $\tilde{T}\gets \left\{s^{1},s^{2},\dots ,\{s_{1}^{\tau},s_{2}^{\tau},\dots ,s_{k}^{\tau}\} \{s_{k + 1}^{\tau},\dots ,a\} \right\} \triangleright$ Update the trajectory with the new rollout +8: $s^{\tau}\gets$ the next thought in $\tilde{T}$ +9: Return $\tilde{T}$ + +# B Data Generation Details + +When constructing Self-Retro-R1-7B, we use the default version of Retro-Search, whereas for W2S-Retro-R1-32B, we use Retro-Search with partial revision. When constructing Self-Retro-R1-7B, we generate responses from R1-distill Qwen2.5-7B and filter for those with correct solutions as the base data for Retro-Search to revise. For W2S-Retro-R1-32B, we directly use OpenThought data as the base, since it contains only correct responses from the DeepSeek-R1 671B model. + +The transition keywords we use to segment thoughts within a reasoning trace are: 'But', 'Wait', 'Alternatively', 'However', 'Hmm', 'Hmmm', 'Not sure', 'Going back', 'Backtrack', 'Trace back', and 'Another'. + +For data generation during Retro-Search, we use top-p sampling with $p = 0.98$ and temperature $T = 1.0$ . We also tried using temperature $T = 0.6$ and found that data generated with a higher temperature tends to produce a better student model, likely due to the increased diversity in the training data induced by higher-temperature sampling. We set the maximum generation length to be 16384. + +# C Training Details + +We perform supervised fine-tuning of models using HuggingFace TRL (von Werra et al., 2020). For all fine-tuning experiments, we used batch size of 128, five training epochs, and cosine learning rate scheduler with warmup rate of 0.05. We used Adam optimizer with weight decay of 1e-4, with beta1=0.9 and beta2=0.95. We did not conduct hyperparameter search, so there is a potential of finding better hyperparameters. With 32 H100 GPUs, fine-tuning 7B model with 40K data took around 90 minutes, and fine-tuning 32B model took 10 hours to finish. + +# D Baselines Details + +For 7B models, we evaluate six open-weight models as baselines: instruction-tuned models including Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, and Qwen2.5-Math-7B-Inst (Yang et al., 2024b), as well as reasoning models including OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), and R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025). These reasoning models are fine-tuned using responses from DeepSeek-R1 671B (DeepSeek-AI et al., 2025). Specifically, the OpenR1-Qwen-7B model is trained on 220K math examples, with questions sourced from NuminaMath, while OpenThinker-7B + +
06'6L0F'60S'606'8L05'9900'0010E'8902'9S(2L)1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E -1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E 1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E+1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E=1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E--1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E---1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E—1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E ----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E
+ +Table 5: Per-dataset evaluation results (accuracies) using greedy decoding. + +is trained on the OpenThoughts-114K dataset, which includes math, science, and coding problems. + +For 32B models, we evaluate five open-weight models: instruction-tuned Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). Both OpenThinker-32B and R1-distill Qwen2.5-32B are fine-tuned using responses generated by DeepSeek-R1 671B, with OpenThinker-32B utilizing the OpenThoughts-114K dataset. Sky-T1-32B-Preview is trained on a 17K dataset consisting of math and coding problems, with responses generated using QwQ-32B-Preview. The training details of the other models are not publicly disclosed. + +
1609€10€€2€9€1€€2€2€81€6€88€11€52€11€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-
02€9€289€€31€199€€218€082€862€0€29€1€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
608981€06691€087€51€958€2€095€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
€204€68€601€€2€€€100€€86€€27€1€11€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
0088€100€$991€02€6€€111€6898€969€1€5299€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€ -
81€6€26€€991€€298€€521€6628€5289€0718€1€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2 €
96€Z698€15€€10€€598€8895€7189€9969€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
£268€208€9€956€€2101€5692€1888€11400€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
699E1€2898€02081€€8091€811€11866€1828€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€ 1
003€1021€8€1297€2€7691€05291€6688€20749€2€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2
05011020€7€397€3€7891€19801€5211288812€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ -
003E187€350€500S8781€5271F0761F8662F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ 1
9981029905€9981€€561F66991F0029F7269F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
1008826€72E1986€7296F8108F1699F7299F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
9958982€87F957F7801F8802F0025F9661F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
97E2001178F972F679F795F028F978FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
9919187F97F987F892F969F1086F1160FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
52606F87F18S1601F869F928F998FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
£24E1108E86E1708111E602E1889F2719F2M1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
00901678F09F572S870E16198024818799FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
899F6933F99F729S8791F57201F882F89691FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.6
78602971E13E6001F5711F98F1F5051FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.441
781163669S72015711F0001F92F1F9751FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
586810185Z979F1691F7291F506F718FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4
+ +Table 6: Per-dataset evaluation results (response token length) using greedy decoding. + +# E Per-dataset Evaluation Results + +In Tables 5 and 6, we share the per-dataset evaluation results using greedy decoding, and in Tables 7 and 8, we share results using temperature sampling with top-p=0.95 and T=0.6. We use the max response length of 32,768 tokens for all experiments. For temperature sampling, we use random five seeds and aggregate the results, and we further report the confidence interval to share the deviation of the metrics. + +
20'1+30'1852'0+0'1601'0+19'1662'0+91'1854'0+05'9972'1+05'2605'2+20'0405'2+20'09(92)18
16'0+18'682'0+26'3616'0+00'3852'0+10'2816'0+16'2968'0+00'1620'2+99'1464'2+16'29(92)18
16'0+57'2216'0+80'3661'0+09'2652'0+05'3872'0+05'2985'1+05'2616'1+16'2911'2+00'05(92)18
52'1+19'212'0+80'3612'0+14'3882'0+12'1812'0+05'1605'2+86'6612'2+00'0511'2+00'05(92)18
68'0+60'2212'0+95'1652'0+95'6852'0+95'1882'0+95'0985'1+05'0622'2+00'8522'2+00'05(92)18
11'1+89'1212'0+96'1612'0+00'2852'0+91'0812'0+06'0905'2+00'2661'2+20'9526'2+20'14(92)18
12'1+19'052'0+09'0612'0+92'1858'0+12'1482'0+05'6905'2+05'6885'1+16'1994'1+00'44(92)18
80'1+27'1292'0+29'0622'0+27'5852'0+90'2255'0+88'6922'1+00'0605'2+00'0912'2+00'85(92)18
50'1+62'2515'0+02'1805'0+29'2894'0+91'2215'0+05'2900'2+99'2794'1+89'2282'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16(92)18
16'0+18'816'0+09'2216'0+09'2262'0+18'1416'0+96'6725'1+00'1488'2+00'0612'2+00'97(92)18
16'0+18'895'0+95'6261'0+02'9809'0+80'2905'0+25'1488'2+00'1900'0+02'9720'2+16'25(92)18
11'1+29'1512'0+01'9211'0+05'5852'0+28'2912'0+85'2799'2+99'9299'2+99'9286'1+00'92(92)18
01'1+19'812'0+80'1811'0+89'9882'0+98'2905'0+26'9721'7+05'6512'2+89'7711'2+16'25(92)18
88'0+16'812'0+91'8602'0+97'8882'0+06'0885'0+19'1918'0+05'1620'2+99'1982'1+16'19(92)18
81'1+19'2294'0+09'2602'0+87'2694'0+06'8212'0+88'3985'0+05'6622'2+99'1982'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-(92)18
99'0+98'7972'0+02'8872'0+98'6655'0+02'9472'0+82'3968'0+05'6412'2+22'2282'2-99'77(92)18
81'1+97'8911'0+17'0602'0+88'5682'0+07'0872'0+98'8511'1+05'7868'1+19'9585'2+20'95(92)18
16'0+06'9552'0+26'1811'0+17'5661'0+09'2772'0+86'9785'0+05'6912'2+20'1162'1+20'51(92)18
70'1+50'6552'0+27'5855'0+02'0844'0+26'2782'0+85'6740'2+05'8966'1+86'2702'1+00'87(92)18
06'0+10'1262'0+26'1672'0+27'9872'0+00'5215'0+08'2918'0+05'0622'2+22'9522'2+00'07(92)18
60'1+89'1252'0+80'2681'0+21'5681'0+95'1889'0+89'6502'2+00'1882'2+99'8782'2-99'07(92)18
80'1+99'7572'0+09'2872'0+95'6611'0+80'9981'0+27'6519'2+05'8520'2+99'1182'2-99'07(92)18
98'1+00'6552'0+27'9862'0+87'1462'1+97'5782'0+19'2757'1+05'1920'2+22'9156'0+89'9(92)18
16'0+28'4792'0+27'4782'0+95'8882'0+06'8582'0+05'0572'1+89'2172'1+89'2172'1+89'07(92)18
+ +Table 7: Per-dataset evaluation results (accuracies) using temperature sampling (t=0.6 and top-p=0.95). The numbers after $\pm$ means the $95\%$ confidence interval. + +
01I + 10CS11 + 2062εI + 506εE + 28828ε + 1849εII + 3944εE + 820662I + 0601(92I) 2εE-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R -
81I + 191912 + 09FCεI + 59118Z + 28ECΔF + 274212I + 278528Z + 208689Z + 2021(92I) 12I - 9I + 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E
90I + 276912 + 591E9 + 27612 + 27E97 + 010288 + 2855661 + 998629E + 266182E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 22E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 30E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 28E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 20E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 31E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 21E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 32E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 10E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 11E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 15E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 24E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 25E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 33E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 23E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 34E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 35E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 16E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 27E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 26E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 37E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 36E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 18E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 38E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 17E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 14E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 13E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 19E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 3101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C
00I + 00C11 + 00C6 + 00I10 + 00C8 + 00C10 + 00C8 + 00C10 + 00C8 + 00C8 + 00C
00T + 00T11 + 00T6 + 00T10 + 00T8 + 00T10 + 00T8 + 00T10 + 00T8 + 00T8 + 00T
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 = 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
+ +Table 8: Per-dataset evaluation results (model response token length) using temperature sampling $(t = 0.6$ and top- $p = 0.95)$ . The numbers after $\pm$ means the $95\%$ confidence interval. \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04383/images/383b1bfed10a1065b0d70625cf6c18d25e147f3277dd81df30b842b5278ffd08.jpg b/data/2025/2504_04xxx/2504.04383/images/383b1bfed10a1065b0d70625cf6c18d25e147f3277dd81df30b842b5278ffd08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0cf4f7dd815f89f83267bba49054e1d483c1c8e5 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/383b1bfed10a1065b0d70625cf6c18d25e147f3277dd81df30b842b5278ffd08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa2d7defe99baa36e4c27e26632d9517f6eb839d44c472453cbd7d772eea2252 +size 234092 diff --git a/data/2025/2504_04xxx/2504.04383/images/44813566fea66d24ce0070477bbb66e7a3058ad7b42f3c5a6e772553f8cf2ef9.jpg b/data/2025/2504_04xxx/2504.04383/images/44813566fea66d24ce0070477bbb66e7a3058ad7b42f3c5a6e772553f8cf2ef9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..395cb7c87854973e9ba189513de9e18cf9a4bb67 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/44813566fea66d24ce0070477bbb66e7a3058ad7b42f3c5a6e772553f8cf2ef9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd3460af3791388e719a2810bb7eb69bf4c1ac3b34fd677b6861e7d619457363 +size 133484 diff --git a/data/2025/2504_04xxx/2504.04383/images/4c44eb3a2e950534ddddd9309f5b88b7b0af833f93517fe9c420bcbca10d5301.jpg b/data/2025/2504_04xxx/2504.04383/images/4c44eb3a2e950534ddddd9309f5b88b7b0af833f93517fe9c420bcbca10d5301.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a25e3d265358ca3c01f16f87cf1680afee4cd2d --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/4c44eb3a2e950534ddddd9309f5b88b7b0af833f93517fe9c420bcbca10d5301.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef77cd9eb93fc8008686b89cda4e35ed64831c20e7846d137181aee89dfa5a55 +size 7018 diff --git a/data/2025/2504_04xxx/2504.04383/images/6909fe2479fd95798268964aca135d4c6a47fcb9c6917ae729eaa3a308147b7d.jpg b/data/2025/2504_04xxx/2504.04383/images/6909fe2479fd95798268964aca135d4c6a47fcb9c6917ae729eaa3a308147b7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..062a46c24d2a3a3e017688aa8dc9d220f3c79108 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/6909fe2479fd95798268964aca135d4c6a47fcb9c6917ae729eaa3a308147b7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40dc666ae0af7e05c9919aa57c57c8423fbe34a7d1f62513f9891341faed0096 +size 189031 diff --git a/data/2025/2504_04xxx/2504.04383/images/6c455391b2d122928abedb18a5c13288000920131de32b0aab66a1aed24b147c.jpg b/data/2025/2504_04xxx/2504.04383/images/6c455391b2d122928abedb18a5c13288000920131de32b0aab66a1aed24b147c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8175ffeaedd9b1fb59c115d0da996efe5740b2a3 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/6c455391b2d122928abedb18a5c13288000920131de32b0aab66a1aed24b147c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:323990413b2c680307fd05ae37a36752a4cf72aa0701c064c02ddfb2acbcfa78 +size 210827 diff --git a/data/2025/2504_04xxx/2504.04383/images/84a2d240fa298c6ead41330d5f423055ce725c87cb6fae8cf1f688fc5cd0129a.jpg b/data/2025/2504_04xxx/2504.04383/images/84a2d240fa298c6ead41330d5f423055ce725c87cb6fae8cf1f688fc5cd0129a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c33e1ba6064d64dc555471fdd502970a79997a0a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/84a2d240fa298c6ead41330d5f423055ce725c87cb6fae8cf1f688fc5cd0129a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:861047001cca9c6fc4a904cef9ac69fedfe8acac7214c2058aae8264826bb4f7 +size 130877 diff --git a/data/2025/2504_04xxx/2504.04383/images/b6d9d97c07316a940460ececeb0a6ecb9c18d34757fe240d3c3dd28e05d0bb75.jpg b/data/2025/2504_04xxx/2504.04383/images/b6d9d97c07316a940460ececeb0a6ecb9c18d34757fe240d3c3dd28e05d0bb75.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8cbf0e6f0321b3947ae330a94510cefd3c81321 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/b6d9d97c07316a940460ececeb0a6ecb9c18d34757fe240d3c3dd28e05d0bb75.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc1d27f3931152b2c686b08b9a6733403f10b34c129b64691a32454b2585e0d6 +size 132724 diff --git a/data/2025/2504_04xxx/2504.04383/images/bb9a243fb822203f65188b845613855c1fc2f1191d2253776422a4b9f3dabfa0.jpg b/data/2025/2504_04xxx/2504.04383/images/bb9a243fb822203f65188b845613855c1fc2f1191d2253776422a4b9f3dabfa0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfb79b00963cc6a4085d174447ed8c6fd3c611fe --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/bb9a243fb822203f65188b845613855c1fc2f1191d2253776422a4b9f3dabfa0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef8bc4bd8efdc8650ce17af50140c3f55e1bde6cdcb2614064f324792ac6bcb8 +size 33021 diff --git a/data/2025/2504_04xxx/2504.04383/images/c7b5b308bb1254d7abf9e352c8ba5f851b2bd1d6710ccabfeac76cb9a6c47b22.jpg b/data/2025/2504_04xxx/2504.04383/images/c7b5b308bb1254d7abf9e352c8ba5f851b2bd1d6710ccabfeac76cb9a6c47b22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..322e8bb9c5740624fd7151a7431871340d7f4aa6 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/c7b5b308bb1254d7abf9e352c8ba5f851b2bd1d6710ccabfeac76cb9a6c47b22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c80912fc50de90edb3aa2ff0697bf901d989e5cb5ab0c20438cd8c784c13cce0 +size 30124 diff --git a/data/2025/2504_04xxx/2504.04383/images/cbc12952f330df65db4917167473e75d8a0485d4eea129128ca2cadc83a643ca.jpg b/data/2025/2504_04xxx/2504.04383/images/cbc12952f330df65db4917167473e75d8a0485d4eea129128ca2cadc83a643ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eed038f3975808d6d050d6b9228055be6e8b7e28 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/cbc12952f330df65db4917167473e75d8a0485d4eea129128ca2cadc83a643ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15a026678ddb02a2f648c7f3aadedb19d2aa38baace719cfa4e0e333db4e1ed3 +size 40052 diff --git a/data/2025/2504_04xxx/2504.04383/images/d14aac5efd6cebeeba18a99eb68347d40ffd47dd815884a28ec2388fed42c964.jpg b/data/2025/2504_04xxx/2504.04383/images/d14aac5efd6cebeeba18a99eb68347d40ffd47dd815884a28ec2388fed42c964.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f030eb8d0f808232d9850d91340d5a5e5a432ad7 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/d14aac5efd6cebeeba18a99eb68347d40ffd47dd815884a28ec2388fed42c964.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23a8ea2853b4f8084115fb9f5fc6957a72815f2e2b12c9f1fa076e972aab11b8 +size 133526 diff --git a/data/2025/2504_04xxx/2504.04383/images/e1d33e0b54431d2957564489168b0d83399724608278631d093b3cda16602854.jpg b/data/2025/2504_04xxx/2504.04383/images/e1d33e0b54431d2957564489168b0d83399724608278631d093b3cda16602854.jpg new file mode 100644 index 0000000000000000000000000000000000000000..072588320b4794d75fa8765580a00e8cb47077bd --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/e1d33e0b54431d2957564489168b0d83399724608278631d093b3cda16602854.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39d725990beee1129ecac6159f3cda4795d5f3893ee0c0866f4a7dc90d7e7bd9 +size 4813 diff --git a/data/2025/2504_04xxx/2504.04383/images/ebe80d324d27054e5f5b382931663fa16fdbb066f4da6818eac7df3ebe930ba1.jpg b/data/2025/2504_04xxx/2504.04383/images/ebe80d324d27054e5f5b382931663fa16fdbb066f4da6818eac7df3ebe930ba1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69845d891bd85251fc3ae385c641b07ef230d8f6 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/images/ebe80d324d27054e5f5b382931663fa16fdbb066f4da6818eac7df3ebe930ba1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6049e5294ce04189a8469e6855830d09dfd12382c7816f08be062f88f61c0db +size 6657 diff --git a/data/2025/2504_04xxx/2504.04383/layout.json b/data/2025/2504_04xxx/2504.04383/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9129ce2d6ce5d13e36b28059347ff256edc5c6a7 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04383/layout.json @@ -0,0 +1,9393 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 505, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 505, + 113 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 505, + 113 + ], + "type": "text", + "content": "Retro-Search: Exploring Untaken Paths for Deeper and Efficient Reasoning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 130, + 500, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 130, + 500, + 171 + ], + "spans": [ + { + "bbox": [ + 110, + 130, + 500, + 171 + ], + "type": "text", + "content": "Ximing Lu†‡ Seungju Han†§ David Acuna† Hyunwoo Kim† Jaehun Jung† Shrimai Prabhumoye† Niklas Muennighoff§ Mostofa Patwary† Mohammad Shoeybi† Bryan Catanzaro† Yejin Choi†" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 172, + 447, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 172, + 447, + 196 + ], + "spans": [ + { + "bbox": [ + 112, + 172, + 447, + 196 + ], + "type": "text", + "content": "†NVIDIA ‡University of Washington §Stanford University {ximingl, seungjuh, dacunamarrer, hyunwook, jaehunj, yejin}@nvidia.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 280, + 224, + 331, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 224, + 331, + 236 + ], + "spans": [ + { + "bbox": [ + 280, + 224, + 331, + 236 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "spans": [ + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "text", + "content": "Large reasoning models, such as OpenAI o1 and DeepSeek-R1, demonstrate remarkable reasoning capabilities via long, elaborate reasoning trajectories. Numerous follow-up studies report that supervised fine-tuning on such reasoning traces, also known as distillation, can be a cost-effective way to boost reasoning capabilities of smaller student models. However, empirical observations reveal that these reasoning trajectories are often suboptimal, switching excessively between different lines of thought, resulting in underthinking, over-thinking, and even degenerate responses. In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search, for distilling higher quality reasoning paths from large reasoning models. Retro-Search retrospectively revises reasoning paths to discover better, yet shorter traces, which can then lead to student models with enhanced reasoning capabilities with shorter, thus faster inference. Our approach can enable two use cases: self-improvement, where models are fine-tuned on their own Retro-Search-ed thought traces, and weak-to-strong improvement, where a weaker model revises stronger model's thought traces via Retro-Search. For self-improving, R1-distill-7B, fine-tuned on its own Retro-Search-ed traces, reduces the average reasoning length by " + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "inline_equation", + "content": "31.2\\%" + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "text", + "content": " while improving performance by " + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "inline_equation", + "content": "7.7\\%" + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "text", + "content": " across seven math benchmarks. For weak-to-strong improvement, we retrospectively revise R1-671B's traces from the OpenThoughts dataset (Team, 2025) using R1-distill-32B as the Retro-Search-er, a model " + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "inline_equation", + "content": "20\\times" + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "text", + "content": " smaller. Qwen2.5-32B, fine-tuned on 40k instances of this refined data, achieves performance comparable to R1-distill-32B, yielding an " + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "inline_equation", + "content": "11.3\\%" + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "text", + "content": " reduction in reasoning length and a " + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 140, + 251, + 471, + 594 + ], + "type": "text", + "content": " performance improvement compared to fine-tuning on the original OpenThoughts data. More excitingly, R1-distill-7B and R1-distill-32B, fine-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference efficiency. Our work counters recently emergent viewpoints that question the relevance of search algorithms in the era of large reasoning models, by demonstrating that there are still opportunities for algorithmic advancements, even for frontier models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 623, + 195, + 635 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 623, + 195, + 635 + ], + "spans": [ + { + "bbox": [ + 105, + 623, + 195, + 635 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 650, + 506, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 506, + 709 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 506, + 709 + ], + "type": "text", + "content": "Recent state-of-the-art LLMs, such as OpenAI o1 and DeepSeek-R1, have demonstrated remarkable capabilities in solving complex reasoning problems by scaling test-time compute. Test-time scaling enables the model to produce extended reasoning trajectories—an inner monologue akin to an implicit internal search—where the model explores multiple potential solution paths and verifies itself (OpenAI, 2024; DeepSeek-AI et al., 2025; Qwen Team, 2025)." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.04383v2 [cs.AI] 15 Apr 2025" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 116, + 719, + 196, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 719, + 196, + 731 + ], + "spans": [ + { + "bbox": [ + 116, + 719, + 196, + 731 + ], + "type": "inline_equation", + "content": "\\clubsuit" + }, + { + "bbox": [ + 116, + 719, + 196, + 731 + ], + "type": "text", + "content": " First co-authors." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "spans": [ + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "content": "Question: Given a sequence " + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "inline_equation", + "content": "a_{n}" + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "inline_equation", + "content": "a_{n} = -4" + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "content": " is odd, and " + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "inline_equation", + "content": "a_{n} = 7" + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "content": " is even, write a formula for the " + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 145, + 79, + 464, + 89 + ], + "type": "text", + "content": "-th term." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 116, + 94, + 492, + 421 + ], + "blocks": [ + { + "bbox": [ + 116, + 94, + 492, + 421 + ], + "lines": [ + { + "bbox": [ + 116, + 94, + 492, + 421 + ], + "spans": [ + { + "bbox": [ + 116, + 94, + 492, + 421 + ], + "type": "image", + "image_path": "383b1bfed10a1065b0d70625cf6c18d25e147f3277dd81df30b842b5278ffd08.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 427, + 506, + 495 + ], + "lines": [ + { + "bbox": [ + 104, + 427, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 506, + 495 + ], + "type": "text", + "content": "Figure 1: An example reasoning trace from Retro-Search in weak-to-strong revision. A reasoning trace consists of a series of thoughts segmented by transition keywords (e.g., \"alternatively\", \"wait\"), with each thought composed of a sequence of intermediate steps, delimited by '\\n\\nRetro-Search retrospectively revises reasoning trajectories - exploring promising thoughts that were prematurely abandoned to mitigate under-thinking while avoiding redundant thoughts once the correct answer is evident to reduce over-thinking.\\n\\n" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 510, + 506, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 568 + ], + "type": "text", + "content": "Reinforcement learning (RL) has been shown to enable this behavior as training progresses, with key \"aha\" moments in the training dynamics where models begin to generate longer responses and spontaneously develop alternative strategies for problem-solving, verification, and self-correction. As a result, average response length tends to grow proportionally with performance (DeepSeek-AI et al., 2025; Zeng et al., 2025a; HuggingFace, 2025)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 571, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 506, + 628 + ], + "type": "text", + "content": "At the same time, contradictory signals have emerged around whether RL is strictly necessary to enable these behaviors. Cost-effective approaches suggest that access to long reasoning traces may be the key. In fact, recent work shows it is possible to replicate or sometimes even surpass o1 and R1 performance on challenging math benchmarks using long reasoning traces and supervised fine-tuning (Muennighoff et al., 2025; Team, 2025)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 632, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 734 + ], + "type": "text", + "content": "This growing belief—that longer reasoning traces equals better reasoning—has shaped much of the recent progress in training and scaling strategies. However, is longer thinking always better? At the surface level, it may appear so. Long thought allows the model to explore alternative solutions paths, define subgoals, backtrack, verify and self-correct. These cognitive behaviors, akin to human problem-solving, have been indeed shown to be beneficial for reasoning models (Gandhi et al., 2025). Furthermore, it is intuitive that complex problems inherently require lengthier deliberations. However, several recent works have demonstrated that longer responses do not always yield better results. In fact, incorrect responses often involve longer reasoning traces marked by frequent switches between" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 504, + 137 + ], + "type": "text", + "content": "different lines of thought where the model prematurely abandons promising directions—a tendency coined by Wang et al. (2025) as under-thinking. On the other hand, over-thinking occurs when the model inefficiently expends resources by engaging in excessive verification or redundant checks after arriving at a final answer, contributing minimally to accuracy improvements Chen et al. (2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 142, + 504, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 142, + 504, + 254 + ], + "spans": [ + { + "bbox": [ + 107, + 142, + 504, + 254 + ], + "type": "text", + "content": "Then, is shorter necessarily better? The phenomena of under-thinking and over-thinking have motivated several ad-hoc heuristics that use response length as a proxy for downstream performance (Wang et al., 2025; Fu et al., 2024). For instance, a naive approach to boost a model's reasoning capability is supervised fine-tuning on the shortest reasoning trajectories distilled from large state-of-the-art models such as DeepSeek-R1 671B. However, blind shortening is inherently limited, as length alone may not reliably indicate thoughtfulness or reasoning quality. Short responses may overlook nuanced considerations or miss essential parts of the meta-thinking process (Xiang et al., 2025). Furthermore, employing simple length-based heuristics disregards the complexity and semantic coherence of generated content, potentially discarding useful reasoning sequences that are verbose yet insightful." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 258, + 504, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 258, + 504, + 325 + ], + "spans": [ + { + "bbox": [ + 107, + 258, + 504, + 325 + ], + "type": "text", + "content": "Our goal is to consolidate these disparate observations on the quality of reasoning trajectories. We ask—if overly long reasoning is not always beneficial, and blind shortening is suboptimal, how can we discourage under-thinking and over-thinking, and collect more efficient and effective solutions? We argue that search is an effective means of eliciting better reasoning-producing trajectories that are both efficient and insightful, yet shorter in length—and can be used to train stronger student models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 329, + 504, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 329, + 504, + 418 + ], + "spans": [ + { + "bbox": [ + 107, + 329, + 504, + 418 + ], + "type": "text", + "content": "In this work, we introduce Retro-Search, a search algorithm in the spirit of Monte-Carlo Tree Search (MCTS) for distilling higher quality reasoning data from large reasoning models. Retro-Search retrospectively revises a given reasoning path by suppressing unnecessary thought switches to collect more efficient and effective alternatives. Figure 1 shows an example of Retro-Search refining a reasoning trace from DeepSeek-R1. It expands promising thoughts that were prematurely abandoned to mitigate under-thinking while pruning redundant thoughts once the correct answer becomes evident to reduce over-thinking, resulting in more effective yet shorter reasoning traces." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "spans": [ + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "text", + "content": "Contrary to prior attempts where search struggled to improve reasoning effectively, we show that our method is highly effective in two key settings: (1) Self-improvement—Retro-Search can bootstrap self-improvement in reasoning models, by training a model on its own Retro-Search-ed trajectories. We demonstrate that this simple step, despite not relying on frontier model capabilities, yields significant performance gain (of up to " + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "inline_equation", + "content": "7.7\\%" + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "text", + "content": ") while reducing inference time by " + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "inline_equation", + "content": "31.2\\%" + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "text", + "content": ". (2) Weak-to-strong revision—Retro-Search can revise even the reasoning traces generated by an expensive, frontier reasoning model with a substantially smaller, more efficient model, yet significantly improving the quality of dataset. For example, we revise reasoning traces generated by R1-671B using a " + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "inline_equation", + "content": "20\\times" + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "text", + "content": " smaller model R1-distill-32B as the Retro-Search-er. Yet after training on this revised data, Qwen2.5-32B achieves performance comparable to R1-distill-32B, yielding an " + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "inline_equation", + "content": "11.3\\%" + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "text", + "content": " reduction in reasoning length and a " + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 107, + 423, + 505, + 589 + ], + "type": "text", + "content": " performance improvement compared to fine-tuning on the original R1-671B's trajectories. And, more excitingly, R1-distill-7B and R1-distill-32B, fin-tuned on this revised data, achieve new state-of-the-art reasoning performance at the 7B and 32B scales while yielding the highest inference time efficiency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 604, + 168, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 604, + 168, + 617 + ], + "spans": [ + { + "bbox": [ + 107, + 604, + 168, + 617 + ], + "type": "text", + "content": "2 Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 629, + 504, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 629, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 107, + 629, + 504, + 675 + ], + "type": "text", + "content": "We introduce Retro-Search, an MCTS-inspired algorithm that explores untaken steps for deeper and more efficient reasoning. Its goal is to revise and improve a given reasoning path by encouraging continuation instead of prematurely switching to a new thought, ultimately seeking to reach the correct answer more efficiently, i.e. with fewer steps." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 688, + 192, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 688, + 192, + 700 + ], + "spans": [ + { + "bbox": [ + 107, + 688, + 192, + 700 + ], + "type": "text", + "content": "2.1 Preliminaries" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "text", + "content": "Consider a reasoning model " + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "text", + "content": " that, when given a question " + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "text", + "content": ", generates both an intermediate reasoning trajectory " + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "text", + "content": " and a final solution " + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "text", + "content": ". Formally, given an input question " + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "inline_equation", + "content": "q \\in \\mathcal{Q}" + }, + { + "bbox": [ + 107, + 708, + 504, + 732 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 752, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 504, + 331 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 331 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 331 + ], + "type": "image", + "image_path": "44813566fea66d24ce0070477bbb66e7a3058ad7b42f3c5a6e772553f8cf2ef9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 338, + 506, + 406 + ], + "lines": [ + { + "bbox": [ + 104, + 338, + 506, + 406 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 506, + 406 + ], + "type": "text", + "content": "Figure 2: An overview of our Retro-Search algorithm. The algorithm iterates through the thoughts and explores untaken paths from steps that come before a thought-switch, which is marked by transition keywords like \"wait\" or \"another approach.\" During the process, it performs multiple rollouts, suppressing these transition keywords in the immediate next step. If the search is successful, the existing trajectory is replaced with the new rollout, and the process continues through the updated trajectory." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "text", + "content": "the model " + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "text", + "content": " produces " + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "inline_equation", + "content": "(T, a) \\coloneqq \\mathcal{M}(q)" + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "inline_equation", + "content": "T \\in \\mathcal{T}" + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "text", + "content": " denotes the chain of reasoning, or chain of \"thoughts\", and " + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "inline_equation", + "content": "a \\in \\mathcal{A}" + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "text", + "content": " represents the final solution to " + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 418, + 504, + 444 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "text", + "content": "Each trajectory " + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "text", + "content": " can be decomposed in to a set of thoughts, i.e., " + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "inline_equation", + "content": "T := \\{s^1, s^2, \\ldots, s^\\tau\\}" + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "text", + "content": ". Each " + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "inline_equation", + "content": "s^\\tau" + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "text", + "content": " denotes an individual thought, and each thought may perform distinctive role such as trying out a new solution strategy, reflecting its progress, back-tracking or verifying calculations, etc. In order to differentiate between independent thoughts, we attend to the fact that models often leverage transition keywords (e.g., \"alternatively\") to make a natural transition between thoughts, e.g. " + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "inline_equation", + "content": "s^\\tau \\rightarrow s^{\\tau+1}" + }, + { + "bbox": [ + 104, + 448, + 506, + 529 + ], + "type": "text", + "content": ". We utilize these linguistic markers to segment and extract individual thoughts from the full reasoning trace." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": "Each thought " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "s^{\\tau}" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": " itself is a sequence of intermediate steps " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "s_i^\\tau" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": "s—that is, " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "s^{\\tau} := \\{s_1^{\\tau}, s_2^{\\tau}, \\dots, s_k^{\\tau}\\}" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": ". These intermediate steps " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "s_k^\\tau" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": " represent atomic units of reasoning within a thought—such as sub-conclusions, calculations, or logical deductions. In practice, steps are delimited by '\\n\\n(double newline) characters in the model's output. We adapt the convention of using the superscript " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": " to index the thought, and the subscript " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": " to index the step within that thought. For example, " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "s_k^\\tau" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": " refers to the " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": "-th step within the " + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 533, + 507, + 604 + ], + "type": "text", + "content": "-th thought." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 607, + 463, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 607, + 463, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 607, + 463, + 620 + ], + "type": "text", + "content": "Utilizing the notations above, we represent a complete reasoning trajectory " + }, + { + "bbox": [ + 105, + 607, + 463, + 620 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 105, + 607, + 463, + 620 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 204, + 624, + 505, + 651 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 624, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 204, + 624, + 505, + 651 + ], + "type": "interline_equation", + "content": "T = \\left\\{\\left\\{s _ {1} ^ {1}, s _ {2} ^ {1}, \\dots , s _ {k _ {1}} ^ {1} \\right\\}, \\left\\{s _ {1} ^ {2}, s _ {2} ^ {2}, \\dots , s _ {k _ {2}} ^ {2} \\right\\}, \\dots , a \\right\\} \\tag {1}", + "image_path": "4c44eb3a2e950534ddddd9309f5b88b7b0af833f93517fe9c420bcbca10d5301.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": "The under-thinking issue: too many shallow thoughts. Previous studies have shown that R1-like models exhibit an under-thinking issue in their reasoning process (Wang et al., 2025). These models frequently abandon promising reasoning paths prematurely, leading to inadequate depth of reasoning on challenging problems. This phenomenon (1) occurs more frequently on harder problems, (2) leads to frequent switching between different thoughts without reaching a conclusion in each, and (3) correlates with incorrect responses due to insufficient exploration of reasoning paths." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 139 + ], + "type": "text", + "content": "The over-thinking issue: too many redundant thoughts. Conversely, R1-like models also suffer from an over-thinking issue (Sui et al., 2025; Chen et al., 2024), where they expend excessive compute on questions that are exceptionally simple or for which the answer is already evident. The model tends to generate unnecessary thoughts such as self-doubt and redundant verification, even when it produces the correct answer within its early steps." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 143, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 143, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 143, + 506, + 189 + ], + "type": "text", + "content": "The seemingly contradictory issues of under-thinking and over-thinking share a common cause: unnecessarily initiating a new thought. In under-thinking, the model switches to a new thought without fully exploring a previously promising path. In over-thinking, despite the answer being evident, a new thought is started instead of directly generating the answer." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 201, + 189, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 189, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 189, + 211 + ], + "type": "text", + "content": "2.2 Retro-Search" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": "The goal of Retro-Search is to start from a tuple " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "(q,T)" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " generated by " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": ", and search for an improved trajectory " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "T^{*}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " using a revision model " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": ". Here, we focus only on revising " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " that leads to the correct final answer (i.e., " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "a = a^{\\star}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": "). Intuitively, we consider " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "T^{*}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " to be better than " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " if it leads to the same final answer " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " with fewer reasoning steps—i.e., by avoiding both over-thinking and under-thinking. We specifically consider two settings of Retro-Search, depending on how we set the revision model—(1) Self-Retro-Search, where " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " is set to be the original model " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " that produced " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": ", and (2) Weak-to-Strong-Retro-Search (W2S-Retro-Search), where " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": " is a smaller, cost-efficient model than " + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 221, + 504, + 318 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": "Collecting alternative rollouts The core rationale behind Retro-Search is that there may exist an alternative trajectory for a given problem that is shorter than the original trajectory, yet still leads to a correct answer. To discover such a trajectory, we iteratively explore alternative rollouts to investigate what would happen if, instead of starting a new thought " + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "inline_equation", + "content": "s^{\\tau +1}" + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": " after " + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "inline_equation", + "content": "s^\\tau" + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": " (i.e., generate " + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "inline_equation", + "content": "s_1^{\\tau +1}" + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": "), we continued the current thought " + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "inline_equation", + "content": "s^\\tau" + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": ". Concretely, for each thought " + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "inline_equation", + "content": "s^\\tau" + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": " (Eq. 1), we generate an alternative rollout using " + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{M}}" + }, + { + "bbox": [ + 104, + 330, + 504, + 402 + ], + "type": "text", + "content": " as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 205, + 406, + 505, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 406, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 205, + 406, + 505, + 426 + ], + "type": "interline_equation", + "content": "\\left\\{s _ {k + 1} ^ {\\tau}, \\dots , a \\right\\} \\sim \\widehat {\\mathcal {M}} \\left(s ^ {1}, s ^ {2}, \\dots , \\left\\{s _ {1} ^ {\\tau}, s _ {2} ^ {\\tau}, \\dots , s _ {k} ^ {\\tau} \\right\\}\\right) \\tag {2}", + "image_path": "ebe80d324d27054e5f5b382931663fa16fdbb066f4da6818eac7df3ebe930ba1.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 429, + 504, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 504, + 499 + ], + "type": "text", + "content": "Importantly, when generating the immediate next step " + }, + { + "bbox": [ + 104, + 429, + 504, + 499 + ], + "type": "inline_equation", + "content": "s_{k+1}^{\\tau}" + }, + { + "bbox": [ + 104, + 429, + 504, + 499 + ], + "type": "text", + "content": ", we constrain the model to stay within a single thought by preventing it from initiating a new one in the next step—by prohibiting the usage of thought-transition keywords (e.g., \"alternatively,\" \"wait\") during the decoding process. This encourages deeper exploration of the current thought rather than prematurely switching to different lines of thought. Subsequent steps after " + }, + { + "bbox": [ + 104, + 429, + 504, + 499 + ], + "type": "inline_equation", + "content": "s_{k+1}^{\\tau}" + }, + { + "bbox": [ + 104, + 429, + 504, + 499 + ], + "type": "text", + "content": " are generated without constraints to allow free on-policy exploration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": "Evaluating alternative rollouts To determine whether the alternative rollout " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "\\{s_{k + 1}^{\\tau},\\ldots ,a\\}" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": " is better than the existing path " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "\\{s_1^{\\tau +1},\\dots ,a\\}" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": ", we define a value function " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "V(s)" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": " over the " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": "-th step " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": " in the trajectory " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "\\{s_1,\\dots ,a\\}" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": " to compare " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "V(s_{k + 1}^{\\tau})" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "inline_equation", + "content": "V(s_{1}^{\\tau +1})" + }, + { + "bbox": [ + 104, + 510, + 504, + 552 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 241, + 555, + 504, + 571 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 555, + 504, + 571 + ], + "spans": [ + { + "bbox": [ + 241, + 555, + 504, + 571 + ], + "type": "interline_equation", + "content": "V \\left(s _ {i}, a ^ {\\star}\\right) := \\gamma^ {N - i} R \\left(a \\left(s _ {i}\\right), a ^ {\\star}\\right) \\tag {3}", + "image_path": "e1d33e0b54431d2957564489168b0d83399724608278631d093b3cda16602854.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " represents the total number of steps in the trajectory " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "\\{s_1, \\ldots, a\\}" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": ". Here, we write " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "a(s_i) \\coloneqq \\{s_i, \\ldots, a\\}" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " to explicitly emphasize that the value depends on the specific step " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " and captures the autoregressive dependence of the generated answer " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " on the continuation from step " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": ". The reward function " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "R(a, a^{\\star})" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " is binary, indicating whether the generated answer " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " matches the ground truth " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "a^{\\star}" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " (i.e., using a verifiable reward). We apply a decay factor " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " to discount future rewards, assigning higher value to paths that reach the correct answer in fewer steps. Concretely, we set to " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "\\gamma = 0.9" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " in our experiments. In what follows, we drop the detailed notation and refer to the above simply as " + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "inline_equation", + "content": "V(s)" + }, + { + "bbox": [ + 104, + 574, + 504, + 663 + ], + "type": "text", + "content": " for clarity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "inline_equation", + "content": "V(s_{k+1}^{\\tau}) > V(s_1^{\\tau+1})" + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "text", + "content": ", the rollout reaches the correct final answer in fewer steps, and we replace the existing path " + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\{s_1^{\\tau+1}, \\ldots, a\\}" + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "text", + "content": " with the rollout " + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\{s_{k+1}^{\\tau}, \\ldots, a\\}" + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "text", + "content": ". This could occur when exploring deeper along the current thought is more effective, thus reducing under-thinking. Alternatively, " + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "inline_equation", + "content": "s_{k+1}^{\\tau} = a" + }, + { + "bbox": [ + 104, + 669, + 506, + 733 + ], + "type": "text", + "content": " indicates that the previous thought steps are already sufficient for the model to generate the correct solution directly, thereby reducing over-thinking." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 161 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 161 + ], + "type": "text", + "content": "In contrast, if " + }, + { + "bbox": [ + 104, + 81, + 506, + 161 + ], + "type": "inline_equation", + "content": "V(s_{k+1}^{\\tau}) < V(s_1^{\\tau+1})" + }, + { + "bbox": [ + 104, + 81, + 506, + 161 + ], + "type": "text", + "content": ", the existing path is better. The alternative path either reaches a wrong answer or reaches the correct answer with more steps. This suggests that switching to a new thought was effective and necessary, and thus the existing transition should be preserved. In practice, we sample multiple alternative rollouts (two in our experiments) and retain the best one—that is, the rollout with the highest value. We then proceed to examine the next thought in the updated reasoning trajectory. Please see Figure 2 for a concrete example, and Algorithm 1 in Appendix A for more details." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 225 + ], + "type": "text", + "content": "Retro-Search with Partial Revisions We also propose a more computationally efficient variant of Retro-Search. Instead of iteratively applying the revision procedure starting from the first thought, this version randomly samples a position in the trajectory at which to begin the revision. This is particularly useful when revising with larger models—for instance, the R1-32B model in our setting—where full iterative revision would be prohibitively expensive." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 237, + 195, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 237, + 195, + 251 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 195, + 251 + ], + "type": "text", + "content": "3 Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 260, + 157, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 260, + 157, + 273 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 157, + 273 + ], + "type": "text", + "content": "3.1 Setup" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 277, + 506, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 277, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 506, + 369 + ], + "type": "text", + "content": "Data Generation We use 40K math questions from NuminaMath (LI et al., 2024). Specifically, we sample NuminaMath questions from OpenThoughts-114k" + }, + { + "bbox": [ + 104, + 277, + 506, + 369 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 104, + 277, + 506, + 369 + ], + "type": "text", + "content": " dataset, which is the training data of OpenThinker-7B and OpenThinker-32B models. We experiment with two settings: 1) Self-Retro-R1-7B, where we first generate responses using the R1-distilled Qwen2.5-7B model and then revise them with the same model as the Retro-Search-er. 2) W2S-Retro-R1-32B, where we take responses from the DeepSeek-R1 671B model in the OpenThoughts dataset and revise them using a weaker model, R1-distilled Qwen2.5-32B. More details are in Appendix B." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 375, + 506, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 375, + 506, + 421 + ], + "spans": [ + { + "bbox": [ + 104, + 375, + 506, + 421 + ], + "type": "text", + "content": "Model Training We trained four models using data generated by Retro-Search: Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B with supervised fine-tuning. All models are fine-tuned for five epochs with learning rate of 1e-5, and sequence length of 16K. More details are in Appendix C." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 427, + 506, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 506, + 517 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 506, + 517 + ], + "type": "text", + "content": "Baselines We compare our trained models with a total of eleven open-weight models across two model size categories — six 7B models and five 32B models. These include instruction-tuned models such as Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, Qwen2.5-Math-7B-Inst (Yang et al., 2024b) and Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025), OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). More baseline details are in Appendix D." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 522, + 506, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 522, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 522, + 506, + 590 + ], + "type": "text", + "content": "Benchmarks and Metrics We evaluate models on seven math-specific benchmarks: AIME25, AIME24, AMC23, GaoKao23English (Zhong et al., 2023), OlympiadBench (He et al., 2024), GSM8K (Cobbe et al., 2021), and MATH500 (Lightman et al., 2023). The first five benchmarks focus on olympiad-level math problems, where AIME25 and AIME24 each contain 30 problems and AMC23 contains 40 problems. GSM8K includes grade school math problems, and MATH500 includes high-school math competition problems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 594, + 504, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 653 + ], + "type": "text", + "content": "For evaluation, we report two metrics: accuracy to measure the performance, and average response length to measure computational efficiency during inference. For accuracy, we use exact match between the model's prediction and the reference answer, with Qwen's official implementation for answer verification. For response length, we tokenize the responses using the Qwen2.5-7B-Instruct tokenizer and compute the number of output tokens." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 656, + 504, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 681 + ], + "type": "text", + "content": "Metrics are computed individually for each benchmark and then averaged using macro averaging to produce the final scores. Since there is no universally optimal decoding strategy" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 689, + 408, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 689, + 408, + 700 + ], + "spans": [ + { + "bbox": [ + 118, + 689, + 408, + 700 + ], + "type": "text", + "content": "1https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 700, + 361, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 700, + 361, + 711 + ], + "spans": [ + { + "bbox": [ + 119, + 700, + 361, + 711 + ], + "type": "text", + "content": "2https://github.com/QwenLM/Qwen2.5-Math/tree/main." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 711, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 711, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 711, + 504, + 732 + ], + "type": "text", + "content": "Note that evaluation results can significantly vary depending on the specifics of the answer verification, so we recommend to use the same implementation for reproduction." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 136, + 79, + 476, + 364 + ], + "blocks": [ + { + "bbox": [ + 136, + 79, + 476, + 364 + ], + "lines": [ + { + "bbox": [ + 136, + 79, + 476, + 364 + ], + "spans": [ + { + "bbox": [ + 136, + 79, + 476, + 364 + ], + "type": "table", + "html": "
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
Baselines (7B)
Qwen2.5-Math-7B41.1118239.01225
Qwen2.5-Math-7B-Inst53.198252.7985
OpenR1-Qwen-7B67.6946371.77740
OpenThinker-7B53.81447759.19835
Qwen2.5-7B-Inst48.798547.91033
+ R1-7B49.71436555.48959
+ Self-Retro-R1-7B51.7 (+4.1%)11050 (-23.1%)55.8 (+0.7%)8263 (-7.8%)
+ R1-671B51.51430258.49824
+ W2S-Retro-R1-32B55.3 (+7.3%)13569 (-5.1%)57.8 (-1.1%)8940 (-9.0%)
R1-distill-Qwen2.5-7B64.51060071.06831
+ R1-671B68.4941871.77172
+ W2S-Retro-R1-32B70.8 (+3.5%)8800 (-6.6%)73.1 (+2.0%)6535 (-8.9%)
Baselines (32B)
OpenThinker-32B73.0800175.96840
QwQ-32B-Preview70.9516468.35163
Sky-T1-32B-Preview62.0236762.92018
Qwen2.5-32B-Inst56.197555.9761
+ R1-671B76.2707475.66676
+ W2S-Retro-R1-32B74.6 (-2.2%)6809 (-3.7%)77.5 (+2.4%)5923 (-11.3%)
R1-distill Qwen2.5-32B73.1856677.76173
+ R1-671B (12K)80.4647079.86164
+ W2S-Retro-R1-32B (12K)79.9 (-0.6%)6091 (-5.9%)81.0 (+1.5%)5301 (-14.0%)
", + "image_path": "d14aac5efd6cebeeba18a99eb68347d40ffd47dd815884a28ec2388fed42c964.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 136, + 480, + 472, + 541 + ], + "blocks": [ + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "lines": [ + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "text", + "content": "Table 1: Retro-Search provides better training data. Model evaluation results averaged across seven math benchmarks (AIME25, AIME24, AMC23, GaoKao23English, Olympiad-Bench, GSM8K, and MATH500). We report results from two setups: greedy decoding " + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} = 0)" + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "text", + "content": " and temperature sampling " + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} = 0.6" + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "text", + "content": " with top-p " + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "inline_equation", + "content": "= 0.95)" + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "inline_equation", + "content": "+X" + }, + { + "bbox": [ + 104, + 370, + 506, + 471 + ], + "type": "text", + "content": " indicates that the model is fine-tuned with data X. Only when fine-tuning R1-distill Qwen2.5-32B, we used 12K instances, as using more data did not improve results. The results indicate that: (1) models trained with Retro-Search data are more computationally efficient during inference while generally showing better performance; and (2) weak-to-strong Retro-Search enables new SOTA at 7B and 32B scales." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 136, + 480, + 472, + 541 + ], + "lines": [ + { + "bbox": [ + 136, + 480, + 472, + 541 + ], + "spans": [ + { + "bbox": [ + 136, + 480, + 472, + 541 + ], + "type": "table", + "html": "
Qwen2.5-7B-InstGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
+ R1-7B49.71436555.48959
+ R1-7B-Shortest50.31234054.68009
+ Self-Retro-R1-7B51.71105055.88263
", + "image_path": "bb9a243fb822203f65188b845613855c1fc2f1191d2253776422a4b9f3dabfa0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 548, + 506, + 584 + ], + "lines": [ + { + "bbox": [ + 104, + 548, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 506, + 584 + ], + "type": "text", + "content": "Table 2: Simply selecting the shortest path for training is suboptimal for model accuracy. We fine-tuned Qwen2.5-7B-Inst with different training data and compare results. We sample eight responses using R1-distilled Qwen2.5-7B and choose the shortest response." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "text", + "content": "that works well across all models, we report results under two commonly used decoding setups: greedy decoding " + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} = 0)" + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "text", + "content": ", following Muennighoff et al. (2025), and temperature sampling " + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "inline_equation", + "content": "(\\mathrm{T} = 0.6" + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "text", + "content": " with top-p " + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "inline_equation", + "content": "= 0.95)" + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "text", + "content": ", following DeepSeek-AI et al. (2025). We took an average of results from five different seeds for the temperature sampling setup. In Appendix E, we share the full results including the confidence interval of the results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 677, + 217, + 688 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 217, + 688 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 217, + 688 + ], + "type": "text", + "content": "3.2 Evaluation Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "content": "Self Retro-Search teaches stronger and more efficient student models than vanilla data generation. We compare fine-tuning the student model, Qwen2.5-7B-Instruct, using data from our Self-Retro-R1-7B against fine-tuning with data sampled from the R1-distilled" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 136, + 79, + 473, + 129 + ], + "blocks": [ + { + "bbox": [ + 136, + 79, + 473, + 129 + ], + "lines": [ + { + "bbox": [ + 136, + 79, + 473, + 129 + ], + "spans": [ + { + "bbox": [ + 136, + 79, + 473, + 129 + ], + "type": "table", + "html": "
ModelsGreedy DecodingSampling (T=0.6, p=0.95)
Accuracy (↑)Length (↓)Accuracy (↑)Length (↓)
R1-distill Qwen2.5-7B64.51060071.06831
+ Self-Retro-R1-7B69.5 (+7.7%)7295 (-31.2%)70.6 (-0.6%)5406 (-20.9%)
", + "image_path": "c7b5b308bb1254d7abf9e352c8ba5f851b2bd1d6710ccabfeac76cb9a6c47b22.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 137, + 506, + 172 + ], + "lines": [ + { + "bbox": [ + 104, + 137, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 137, + 506, + 172 + ], + "type": "text", + "content": "Table 3: Retro-Search allows self-improvement of the models. Fine-tuning the R1-distilled Qwen2.5-7B model with self-revision data (Self-Retro-R1-7B) significantly improves efficiency, while maintaining or even improving accuracy." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "content": "Qwen2.5-7B model before revision, referred to as " + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "inline_equation", + "content": "R1 - 7B" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "content": " in Table 1. Compared to models trained on " + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "inline_equation", + "content": "R1 - 7B" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "content": ", the model trained on Self-Retro-" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "inline_equation", + "content": "R1 - 7B" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "content": " produces responses that are " + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "inline_equation", + "content": "23.1\\%" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "content": " shorter while improving accuracy by " + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "inline_equation", + "content": "+4.1\\%" + }, + { + "bbox": [ + 104, + 192, + 506, + 228 + ], + "type": "text", + "content": " under greedy decoding." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 231, + 504, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 231, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 504, + 288 + ], + "type": "text", + "content": "We further compare Retro-Search against another baseline, R1-7B-Shortest, which selects the shortest response for model training after sampling eight responses per questions using R1-distilled Qwen2.5-7B. As shown in Table 2, although training with the shortest response can enhance efficiency when compared to R1-7B, it does not improve the model performance as much as our Retro-Search, clearly demonstrating the effectiveness of our Retro-Search." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 295, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 506, + 373 + ], + "type": "text", + "content": "Weak-to-Strong Retro-Search enables new SOTA reasoning models at 7B and 32B scales, excelling in both performance and efficiency. While Self-Retro has proven effective, using a large model such as DeepSeek-R1-671B for both generation and revision is computationally implausible. We evaluate the effectiveness of weak-to-strong revision, where DeepSeek-R1-671B's generations are Retro-Search-ed by R1-distilled Qwen2.5-32B, denoted as W2S-Retro-R1-32B. We fine-tune student models on this data and compare them to those fine-tuned on unrevised data from DeepSeek-R1-671B, referred to as R1-671B in Table 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 377, + 506, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 506, + 522 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 506, + 522 + ], + "type": "text", + "content": "W2S-Retro-R1-32B proves to be effective, enabling new SOTA reasoning models at 7B and 32B scales. We fine-tuned four models — Qwen2.5-7B-Instruct, R1-distilled Qwen2.5-7B, Qwen2.5-32B-Instruct and R1-distilled Qwen2.5-32B — and consistently observed reduced response lengths and improved performance across different setups compared to models fine-tuned on R1-671B. Surprisingly, R1-distilled Qwen2.5-7B and R1-distilled Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieve new SOTA reasoning performance in the sampling setting at the 7B and 32B scales, while yielding the highest inference time efficiency. In addition, Qwen2.5-32B fine-tuned on W2S-Retro-R1-32B, achieves performance comparable to R1-distill-32B, yielding an " + }, + { + "bbox": [ + 104, + 377, + 506, + 522 + ], + "type": "inline_equation", + "content": "11.3\\%" + }, + { + "bbox": [ + 104, + 377, + 506, + 522 + ], + "type": "text", + "content": " reduction in reasoning length and a " + }, + { + "bbox": [ + 104, + 377, + 506, + 522 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 104, + 377, + 506, + 522 + ], + "type": "text", + "content": " performance improvement compared to fine-tuning on the R1-671B data. Notably, it also outperforms OpenThinker-32B in accuracy while being more efficient (13.4%–14.9% shorter response). This is particularly significant given that OpenThinker-32B is trained on around 2.5 times more data than our W2S-Retro-R1-32B and use DeepSeek-R1 671B for response generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "text", + "content": "Retro-Search enables self-improvement of R1-distilled models. We fine-tune the R1-distilled Qwen2.5-7B model with our Self-Retro-R1-7B. Results in Table 3 show significant accuracy improvement " + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "inline_equation", + "content": "(+7.7\\%)" + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "text", + "content": " and response length reduction " + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "inline_equation", + "content": "(31.2\\%)" + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "text", + "content": " for greedy decoding, compared to R1-distill Qwen2.5-7B. There is a small performance reduction for temperature sampling " + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "inline_equation", + "content": "(-0.6\\%)" + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "text", + "content": ", but the length reduction is substantial " + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "inline_equation", + "content": "(20.9\\%)" + }, + { + "bbox": [ + 104, + 529, + 506, + 608 + ], + "type": "text", + "content": ". As Self-Retro-R1-7B uses R1-distilled Qwen2.5-7B model for response generation, revision, and fine-tuning the model itself, this shows the self-improvement capabilities enabled by Retro-Search." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 622, + 173, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 622, + 173, + 634 + ], + "spans": [ + { + "bbox": [ + 105, + 622, + 173, + 634 + ], + "type": "text", + "content": "3.3 Analyses" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "type": "text", + "content": "We quantitatively analyze the reasoning trajectories in the synthesized training data using our Retro-Search, as well as those generated by the fine-tuned student model Qwen2.5-7B. Table 4 reports the average number of transition keywords, number of steps per thought, and the relative location where the solution first appears in the trajectory (with values closer to 1 indicating that the solution is nearer the end). The synthesized reasoning traces from Retro-Search contain significantly fewer transition keywords than those from R1-7B and R1-671B. As a result, thoughts from Retro-Search include more steps than those from R1-7B and 671B, indicating deeper thoughts. Additionally, the solution tends to appear later in" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 79, + 504, + 148 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 148 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 148 + ], + "type": "table", + "html": "
Synthesized Training DataStudent Model's Reasoning Trace
#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)#Transition Keywords (↓)#Steps/Thought (↑)Relative Location of Solution (↑)
R1-7B85.93.70.67229.24.70.59
Self-Retro-R1-7B32.75.30.73183.25.40.64
R1-671B35.33.80.5980.03.00.44
W2S-Retro-R1-32B10.44.90.6070.13.20.48
", + "image_path": "cbc12952f330df65db4917167473e75d8a0485d4eea129128ca2cadc83a643ca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 159, + 504, + 194 + ], + "lines": [ + { + "bbox": [ + 104, + 159, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 504, + 194 + ], + "type": "text", + "content": "Table 4: The average number of transition keywords, the number of steps per thought, and the relative location of the first appearance of the solution in the reasoning trajectory are taken from both the training data and the fine-tuned student model, Qwen2.5-7B." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 213, + 504, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 504, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 504, + 248 + ], + "type": "text", + "content": "the trajectory, suggesting that our approach shows less redundant thoughts after the final solution is derived. These trends are also consistent in the reasoning outputs from the student model, showing that Retro-Search reduces both under-thinking and over-thinking." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 261, + 206, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 206, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 206, + 274 + ], + "type": "text", + "content": "4 Related Works" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 283, + 506, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 283, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 283, + 506, + 460 + ], + "type": "text", + "content": "Test-time compute has emerged as a new axis of scaling for LLM reasoning. While prior research in this direction have focused on parallel scaling—repeated sampling of trajectories followed by aggregation (Brown et al., 2024; Snell et al., 2024; Wu et al., 2025a), recent efforts have focused on sequential scaling—where models are trained to back-track, evaluate, and revise its thought by generating a long, monolithic CoT. Representative models such as O1 and R1 (OpenAI, 2024; DeepSeek-AI et al., 2025) are trained via large-scale reinforcement learning, demonstrating that models can learn to generate long CoTs without relying on bespoke reward models (Lightman et al., 2023; Zhang et al., 2025b), or tree search (Feng et al., 2024; Zhang et al., 2024). Subsequent projects in open-source community aim to replicate these reasoning models (HuggingFace, 2025; Qin et al., 2024). These works often utilize frontier reasoning models to generate synthetic long thought traces, and showing surprising gain in reasoning capabilities via simple supervised fine-tuning (HuggingFace, 2025; NovaSky, 2025; Muennighoff et al., 2025). Our work builds upon these prior efforts, focusing on (1) better-quality reasoning paths by targeted revision of verbose sub-traces, and (2) demonstrating self-improvement beyond typical strong-to-weak distillation, where smaller models can self-improve in both performance and efficiency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 464, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 506, + 620 + ], + "type": "text", + "content": "Meanwhile, concurrent works reveal limitations of reasoning models in their in-efficiency of test-time scaling. Longer generation does not necessarily correlate with better accuracy (Zeng et al., 2025b), and in practice, shorter trajectories are more likely to be correct. Models tend to overthink (Cuadron et al., 2025; Sui et al., 2025; Chen et al., 2024), i.e., they generate unnecessarily long trajectories that do not contribute to the performance. Models also exhibit underthinking (Wang et al., 2025)—while they appear to explore diverse plausible paths, models often switch between paths without sufficient exploration on one path. Wu et al. (2025b) suggests the source of inefficiency may lie in the regularities of the training data we use, and theoretically show that training on CoTs that are longer than the optimal length for the model can hurt its performance. Several measures have been proposed to mitigate these findings, such as auxiliary learnable parameters (Bao et al., 2025; Zhang et al., 2025a), calibration (Huang et al., 2025), and decoding-time algorithm (Xu et al., 2025; Misaki et al., 2025). Retro-Search aligns with these prior efforts, and importantly revisits the value of search algorithm in improving both the efficiency and performance of test-time scaling." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 632, + 194, + 644 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 632, + 194, + 644 + ], + "spans": [ + { + "bbox": [ + 105, + 632, + 194, + 644 + ], + "type": "text", + "content": "5 Conclusions" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 733 + ], + "type": "text", + "content": "In this work, we introduced Retro-Search, a novel algorithm for synthesizing reasoning data designed to equip reasoning models with efficient (shorter average response length) and effective (higher accuracy) test-time scaling. Inspired by the MCTS algorithm, Retro-Search retrospectively revises reasoning trajectories—eliminating unnecessary thought switches (under-thinking) and trimming redundant steps after the correct answer becomes evident (over-thinking). Quantitatively, we show that Retro-Search is highly effective for self-improvement and weak-to-strong revision. Specifically, R1-distill-7B, fine-tuned on its own" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 759 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 504, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 504, + 159 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 504, + 159 + ], + "type": "text", + "content": "Retro-Search-ed traces, reduces the average reasoning length by " + }, + { + "bbox": [ + 107, + 81, + 504, + 159 + ], + "type": "inline_equation", + "content": "31.2\\%" + }, + { + "bbox": [ + 107, + 81, + 504, + 159 + ], + "type": "text", + "content": " while improving performance by " + }, + { + "bbox": [ + 107, + 81, + 504, + 159 + ], + "type": "inline_equation", + "content": "7.7\\%" + }, + { + "bbox": [ + 107, + 81, + 504, + 159 + ], + "type": "text", + "content": " across seven math benchmarks. Notably, R1-distill-7B and R1-distill-32B, fine-tuned on weak-to-strong Retro-Search-ed reasoning traces from R1-671B, set new state-of-the-art performance at the 7B and 32B scales while yielding the highest reasoning efficiency. We hope our work reinvigorates interest in the power of search-based methods for synthetic data in reasoning models—a direction that has recently fallen out of favor, yet holds significant untapped potential." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 81, + 167, + 92 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 167, + 92 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 167, + 92 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 99, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 107, + 99, + 504, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 99, + 504, + 122 + ], + "spans": [ + { + "bbox": [ + 107, + 99, + 504, + 122 + ], + "type": "text", + "content": "Hieu Tran Bao, Nguyen Cong Dat, Nguyen Duc Anh, and Hoang Thanh-Tung. Learning to stop overthinking at test time, 2025. URL https://arxiv.org/abs/2502.10954." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 129, + 504, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 129, + 504, + 163 + ], + "spans": [ + { + "bbox": [ + 107, + 129, + 504, + 163 + ], + "type": "text", + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 169, + 505, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 169, + 505, + 203 + ], + "spans": [ + { + "bbox": [ + 107, + 169, + 505, + 203 + ], + "type": "text", + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. Do not think that much for " + }, + { + "bbox": [ + 107, + 169, + 505, + 203 + ], + "type": "inline_equation", + "content": "2 + 3 = ?" + }, + { + "bbox": [ + 107, + 169, + 505, + 203 + ], + "type": "text", + "content": " on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 209, + 504, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 209, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 107, + 209, + 504, + 243 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 249, + 505, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 249, + 505, + 304 + ], + "spans": [ + { + "bbox": [ + 107, + 249, + 505, + 304 + ], + "type": "text", + "content": "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks. ArXiv, abs/2502.08235, 2025. URL https://api-semanticscholar.org/CorpusID:276287600." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 311, + 505, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 311, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 107, + 311, + 505, + 651 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jia Shi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiying Yu, Shunfeng Zhou, Shuting Pan, S. S. Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T. Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W. L. Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X. Q. Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y. K. Li, Y. Q. Wang, Y. X. Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong Ying He Yishi Piao Yisong Wang Yixuan Tan Yiyang Ma Yiyuan Liu Yongqiang Guo Yuan Ou Yuduan Wang Yue Gong Yuheng Zou Yujia He Yunf an Xiong Yuxiang Luo Yuxiang You Yuxuan Liu Yuyang Zhou Y. X. Zhu Yanhong Xu Yanping Huang Yaohui Li Yi Zheng Yuchen Zhu Yunxian Ma Ying Tang Yukun Zha Yuting Yan Z.Z.Ren Zehui Ren,Zhangli Sha Zhe Fu Zhean Xu Zhenda Xie Zhengyan ZhangZhenwen Hao Zhicheng Ma Zhigang Yan Zhiyu Wu Zihui Gu Zijia Zhu Zijun Liu Zilin Li Ziwei Xie Ziyang Song Zizheng Pan Zhen Huang Zhipeng Xu Zhongyu Zhang and Zhen Zhang. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 658, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 658, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 107, + 658, + 504, + 692 + ], + "type": "text", + "content": "Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. Alphazero-like tree-search can guide large language model decoding and training, 2024. URL https://arxiv.org/abs/2309.17179." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 698, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 504, + 731 + ], + "type": "text", + "content": "Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993, 2024." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 309, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D. Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective stars, 2025. URL https://arxiv.org/abs/2503.01307." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 168 + ], + "type": "text", + "content": "Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 175, + 504, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 175, + 504, + 199 + ], + "spans": [ + { + "bbox": [ + 107, + 175, + 504, + 199 + ], + "type": "text", + "content": "Chengsong Huang, Langlin Huang, Jixuan Leng, Jiacheng Liu, and Jiaxin Huang. Efficient test-time scaling via self-calibration, 2025. URL https://arxiv.org/abs/2503.00031." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 205, + 504, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 205, + 504, + 229 + ], + "spans": [ + { + "bbox": [ + 107, + 205, + 504, + 229 + ], + "type": "text", + "content": "HuggingFace. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 236, + 506, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 236, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 236, + 506, + 293 + ], + "type": "text", + "content": "Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. Numinamath. [https://huggingface.co/AI-MO/NuminaMath-CoT](https://github.com/project-numina/aimo-progress-prize/blob/main/report/numina_dataset.pdf), 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 298, + 506, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 298, + 506, + 333 + ], + "spans": [ + { + "bbox": [ + 107, + 298, + 506, + 333 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step, 2023. URL https://arxiv.org/abs/2305.20050." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 340, + 506, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 340, + 506, + 374 + ], + "spans": [ + { + "bbox": [ + 107, + 340, + 506, + 374 + ], + "type": "text", + "content": "Kou Misaki, Yuichi Inoue, Yuki Imajuku, So Kuroki, Taishi Nakamura, and Takuya Akiba. Wider or deeper? scaling llm inference-time compute with adaptive branching tree search, 2025. URL https://arxiv.org/abs/2503.04412." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 381, + 506, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 381, + 506, + 416 + ], + "spans": [ + { + "bbox": [ + 107, + 381, + 506, + 416 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 422, + 506, + 447 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 422, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 107, + 422, + 506, + 447 + ], + "type": "text", + "content": "NovaSky. Sky-t1: Train your own o1 preview model within $450. https://novaskyai.github.io/posts/sky-t1, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 453, + 465, + 466 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 453, + 465, + 466 + ], + "spans": [ + { + "bbox": [ + 107, + 453, + 465, + 466 + ], + "type": "text", + "content": "OpenAI. Openai o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 472, + 504, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 472, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 107, + 472, + 504, + 507 + ], + "type": "text", + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, and Pengfei Liu. O1 replication journey: A strategic progress report - part 1, 2024. URL https://arxiv.org/abs/2410.18982." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 107, + 514, + 504, + 537 + ], + "type": "text", + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 544, + 506, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 544, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 107, + 544, + 506, + 578 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https:// arxiv.org/abs/2408.03314." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 585, + 504, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 585, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 107, + 585, + 504, + 630 + ], + "type": "text", + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen, Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 637, + 459, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 637, + 459, + 651 + ], + "spans": [ + { + "bbox": [ + 107, + 637, + 459, + 651 + ], + "type": "text", + "content": "OpenThoughts Team. Open Thoughts. https://open-thoughts.ai, January 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 657, + 506, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 657, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 107, + 657, + 506, + 691 + ], + "type": "text", + "content": "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 698, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 506, + 732 + ], + "type": "text", + "content": "Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. Thoughts are all over the place: On the underthinking of o1-like llms. arXiv preprint arXiv:2501.18585, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 587 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "text", + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for LLM problem-solving. In The Thirteenth International Conference on Learning Representations, 2025a. URL https://openreview.net/forum?id=VNckp7JEHn." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 506, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 506, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 167 + ], + "type": "text", + "content": "Yuyang Wu, Yifei Wang, Tianqi Du, Stefanie Jegelka, and Yisen Wang. When more is less: Understanding chain-of-thought length in llms, 2025b. URL https://arxiv.org/abs/2502.07266." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 174, + 506, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 174, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 506, + 232 + ], + "type": "text", + "content": "Violet Xiang, Charlie Snell, Kanishk Gandhi, Alon Albalak, Anikait Singh, Chase Blagden, Duy Phung, Rafael Rafailov, nathan lile, Dakota Mahan, Louis Castricato, Jan-Philipp Franken, Nick Haber, and Chelsea Finn. Towards system 2 reasoning in llms: Learning how to think with meta chain-of-thought. ArXiv, abs/2501.04682, 2025. URL https://api-semanticscholar.org/CorpusID:275357763." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 236, + 504, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 504, + 261 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 504, + 261 + ], + "type": "text", + "content": "Silei Xu, Wenhao Xie, Lingxiao Zhao, and Pengcheng He. Chain of draft: Thinking faster by writing less, 2025. URL https://arxiv.org/abs/2502.18600." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 267, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 267, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 267, + 506, + 300 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 308, + 506, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 308, + 506, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 506, + 342 + ], + "type": "text", + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122, 2024b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 349, + 504, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 349, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 349, + 504, + 384 + ], + "type": "text", + "content": "Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 7b model and 8k examples: Emerging reasoning with reinforcement learning is both effective and efficient. https://hkust-nlp.notion.site/simplerl-reason, 2025a. Notion Blog." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 389, + 506, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 506, + 423 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 506, + 423 + ], + "type": "text", + "content": "Zhiyuan Zeng, Qinyuan Cheng, Zhangyue Yin, Yunhua Zhou, and Xipeng Qiu. Revisiting the test-time scaling of o1-like models: Do they truly possess test-time scaling capabilities?, 2025b. URL https://arxiv.org/abs/2502.12215." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 430, + 506, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 506, + 464 + ], + "type": "text", + "content": "Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. Rest-mcts*: Llm self-training via process reward guided tree search. arXiv preprint arXiv:2406.03816, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 471, + 504, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 471, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 471, + 504, + 506 + ], + "type": "text", + "content": "Jintian Zhang, Yuqi Zhu, Mengshu Sun, Yujie Luo, Shuofei Qiao, Lun Du, Da Zheng, Huajun Chen, and Ningyu Zhang. Lighthinker: Thinking step-by-step compression, 2025a. URL https://arxiv.org/abs/2502.15589." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 512, + 504, + 546 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 504, + 546 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 504, + 546 + ], + "type": "text", + "content": "Zhenru Zhang, Chujie Zheng, Yangzhen Wu, Beichen Zhang, Runji Lin, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. The lessons of developing process reward models in mathematical reasoning, 2025b. URL https://arxiv.org/abs/2501.07301." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 552, + 504, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 552, + 504, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 504, + 587 + ], + "type": "text", + "content": "Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364, 2023." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 220, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 220, + 102 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 220, + 102 + ], + "type": "text", + "content": "Appendices" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 129, + 505, + 249 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 106, + 129, + 505, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 129, + 505, + 143 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 505, + 143 + ], + "type": "text", + "content": "A Retro-Search Algorithm 15" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 157, + 505, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 157, + 505, + 169 + ], + "spans": [ + { + "bbox": [ + 106, + 157, + 505, + 169 + ], + "type": "text", + "content": "B Data Generation Details 15" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 184, + 505, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 184, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 106, + 184, + 505, + 196 + ], + "type": "text", + "content": "C Training Details 15" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 211, + 505, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 211, + 505, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 211, + 505, + 222 + ], + "type": "text", + "content": "DBaselines Details 15" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 238, + 505, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 238, + 505, + 249 + ], + "spans": [ + { + "bbox": [ + 106, + 238, + 505, + 249 + ], + "type": "text", + "content": "E Per-dataset Evaluation Results 17" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 260, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 260, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 260, + 95 + ], + "type": "text", + "content": "A Retro-Search Algorithm" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 105, + 129, + 507, + 282 + ], + "blocks": [ + { + "bbox": [ + 106, + 114, + 221, + 126 + ], + "lines": [ + { + "bbox": [ + 106, + 114, + 221, + 126 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 221, + 126 + ], + "type": "text", + "content": "Algorithm 1 Retro-Search" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "lines": [ + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": "Require: Question " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": ", initial reasoning trajectory " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "T = \\{\\{s_1^1,s_2^1,\\dots ,s_{k_1}^1\\} ,\\{s_1^2,s_2^2,\\dots ,s_{k_2}^2\\} ,\\dots ,a\\}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": ", revision model " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathcal{M}}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " discount factor " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": ", ground truth answer " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "a^\\star" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": ", and reward function " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "R(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": ". \nEnsure: Revised trajectory " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\tilde{T}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " that yields answer " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "a^{*}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " with fewer steps. \n1: Initialize " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\tilde{T}\\gets T" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " \n2: Initialize " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "s^{\\tau}\\gets s^{1}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\tilde{T}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " \n3: while " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "s^{\\tau}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " is not the last thought in " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\tilde{T}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " do \n4: " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\{s_{k + 1}^{\\tau},\\ldots ,a\\} \\sim \\widehat{\\mathcal{M}}\\left(s^{1},\\ldots ,\\{s_{1}^{\\tau},s_{2}^{\\tau},\\ldots ,s_{k}^{\\tau}\\}\\right)" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " Rollout: transition keywords prohibited in " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "s_{k + 1}^{\\tau}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " \n5: " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "V(s_{k + 1}^{\\tau},a^{\\star})\\gets \\gamma^{N - i}R(a(s_{k + 1}^{\\tau}),a^{\\star})" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " Compute value of the new step " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "s_{k + 1}^{\\tau}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " (i.e., " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": "-th step) \n6: if " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "V(s_{k + 1}^{\\tau}) > V(s_{1}^{\\tau +1})" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " then If the value of the new step is higher than the existing one \n7: " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\tilde{T}\\gets \\left\\{s^{1},s^{2},\\dots ,\\{s_{1}^{\\tau},s_{2}^{\\tau},\\dots ,s_{k}^{\\tau}\\} \\{s_{k + 1}^{\\tau},\\dots ,a\\} \\right\\} \\triangleright" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " Update the trajectory with the new rollout \n8: " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "s^{\\tau}\\gets" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " the next thought in " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\tilde{T}" + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "text", + "content": " \n9: Return " + }, + { + "bbox": [ + 105, + 129, + 507, + 282 + ], + "type": "inline_equation", + "content": "\\tilde{T}" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "algorithm" + }, + { + "bbox": [ + 105, + 304, + 261, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 261, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 261, + 316 + ], + "type": "text", + "content": "B Data Generation Details" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 328, + 506, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 506, + 396 + ], + "type": "text", + "content": "When constructing Self-Retro-R1-7B, we use the default version of Retro-Search, whereas for W2S-Retro-R1-32B, we use Retro-Search with partial revision. When constructing Self-Retro-R1-7B, we generate responses from R1-distill Qwen2.5-7B and filter for those with correct solutions as the base data for Retro-Search to revise. For W2S-Retro-R1-32B, we directly use OpenThought data as the base, since it contains only correct responses from the DeepSeek-R1 671B model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 400, + 506, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 400, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 104, + 400, + 506, + 435 + ], + "type": "text", + "content": "The transition keywords we use to segment thoughts within a reasoning trace are: 'But', 'Wait', 'Alternatively', 'However', 'Hmm', 'Hmmm', 'Not sure', 'Going back', 'Backtrack', 'Trace back', and 'Another'." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "text", + "content": "For data generation during Retro-Search, we use top-p sampling with " + }, + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "inline_equation", + "content": "p = 0.98" + }, + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "text", + "content": " and temperature " + }, + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "inline_equation", + "content": "T = 1.0" + }, + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "text", + "content": ". We also tried using temperature " + }, + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "inline_equation", + "content": "T = 0.6" + }, + { + "bbox": [ + 104, + 439, + 506, + 496 + ], + "type": "text", + "content": " and found that data generated with a higher temperature tends to produce a better student model, likely due to the increased diversity in the training data induced by higher-temperature sampling. We set the maximum generation length to be 16384." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 510, + 218, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 218, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 218, + 525 + ], + "type": "text", + "content": "C Training Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 536, + 506, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 615 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 615 + ], + "type": "text", + "content": "We perform supervised fine-tuning of models using HuggingFace TRL (von Werra et al., 2020). For all fine-tuning experiments, we used batch size of 128, five training epochs, and cosine learning rate scheduler with warmup rate of 0.05. We used Adam optimizer with weight decay of 1e-4, with beta1=0.9 and beta2=0.95. We did not conduct hyperparameter search, so there is a potential of finding better hyperparameters. With 32 H100 GPUs, fine-tuning 7B model with 40K data took around 90 minutes, and fine-tuning 32B model took 10 hours to finish." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 629, + 224, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 629, + 224, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 629, + 224, + 642 + ], + "type": "text", + "content": "D Baselines Details" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 734 + ], + "type": "text", + "content": "For 7B models, we evaluate six open-weight models as baselines: instruction-tuned models including Qwen2.5-7B-Inst (Yang et al., 2024a), Qwen2.5-Math-7B, and Qwen2.5-Math-7B-Inst (Yang et al., 2024b), as well as reasoning models including OpenR1-Qwen-7B (HuggingFace, 2025), OpenThinker-7B (Team, 2025), and R1-distill Qwen2.5-7B (DeepSeek-AI et al., 2025). These reasoning models are fine-tuned using responses from DeepSeek-R1 671B (DeepSeek-AI et al., 2025). Specifically, the OpenR1-Qwen-7B model is trained on 220K math examples, with questions sourced from NuminaMath, while OpenThinker-7B" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 211, + 82, + 397, + 525 + ], + "blocks": [ + { + "bbox": [ + 211, + 82, + 397, + 525 + ], + "lines": [ + { + "bbox": [ + 211, + 82, + 397, + 525 + ], + "spans": [ + { + "bbox": [ + 211, + 82, + 397, + 525 + ], + "type": "table", + "html": "
06'6L0F'60S'606'8L05'9900'0010E'8902'9S(2L)1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E -1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E 1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E+1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E=1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E--1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E---1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E—1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E------1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E ----1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E-1R2E
", + "image_path": "b6d9d97c07316a940460ececeb0a6ecb9c18d34757fe240d3c3dd28e05d0bb75.jpg" + } + ] + } + ], + "index": 1, + "angle": 270, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 141, + 535, + 469, + 547 + ], + "lines": [ + { + "bbox": [ + 141, + 535, + 469, + 547 + ], + "spans": [ + { + "bbox": [ + 141, + 535, + 469, + 547 + ], + "type": "text", + "content": "Table 5: Per-dataset evaluation results (accuracies) using greedy decoding." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 106, + 616, + 504, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 616, + 504, + 639 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 504, + 639 + ], + "type": "text", + "content": "is trained on the OpenThoughts-114K dataset, which includes math, science, and coding problems." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 643, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 643, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 643, + 505, + 732 + ], + "type": "text", + "content": "For 32B models, we evaluate five open-weight models: instruction-tuned Qwen2.5-32B-Inst (Yang et al., 2024a), as well as reasoning models such as OpenThinker-32B (Team, 2025), QwQ-32B-Preview (Qwen Team, 2025), Sky-T1-32B-Preview (NovaSky, 2025), and R1-distill Qwen2.5-32B (DeepSeek-AI et al., 2025). Both OpenThinker-32B and R1-distill Qwen2.5-32B are fine-tuned using responses generated by DeepSeek-R1 671B, with OpenThinker-32B utilizing the OpenThoughts-114K dataset. Sky-T1-32B-Preview is trained on a 17K dataset consisting of math and coding problems, with responses generated using QwQ-32B-Preview. The training details of the other models are not publicly disclosed." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 752, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 752, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 752, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 211, + 82, + 397, + 526 + ], + "blocks": [ + { + "bbox": [ + 211, + 82, + 397, + 526 + ], + "lines": [ + { + "bbox": [ + 211, + 82, + 397, + 526 + ], + "spans": [ + { + "bbox": [ + 211, + 82, + 397, + 526 + ], + "type": "table", + "html": "
1609€10€€2€9€1€€2€2€81€6€88€11€52€11€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-
02€9€289€€31€199€€218€082€862€0€29€1€(€2€) €2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
608981€06691€087€51€958€2€095€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
€204€68€601€€2€€€100€€86€€27€1€11€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€
0088€100€$991€02€6€€111€6898€969€1€5299€1€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€ -
81€6€26€€991€€298€€521€6628€5289€0718€1€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2 €
96€Z698€15€€10€€598€8895€7189€9969€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
£268€208€9€956€€2101€5692€1888€11400€€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €
699E1€2898€02081€€8091€811€11866€1828€€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€ 1
003€1021€8€1297€2€7691€05291€6688€20749€2€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2€-€2
05011020€7€397€3€7891€19801€5211288812€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ -
003E187€350€500S8781€5271F0761F8662F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€ 1
9981029905€9981€€561F66991F0029F7269F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
1008826€72E1986€7296F8108F1699F7299F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
9958982€87F957F7801F8802F0025F9661F€2-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€-€- €1
97E2001178F972F679F795F028F978FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
9919187F97F987F892F969F1086F1160FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.1
52606F87F18S1601F869F928F998FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
£24E1108E86E1708111E602E1889F2719F2M1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.44
00901678F09F572S870E16198024818799FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
899F6933F99F729S8791F57201F882F89691FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.6
78602971E13E6001F5711F98F1F5051FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.441
781163669S72015711F0001F92F1F9751FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.5
586810185Z979F1691F7291F506F718FM1-1.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4.4
", + "image_path": "84a2d240fa298c6ead41330d5f423055ce725c87cb6fae8cf1f688fc5cd0129a.jpg" + } + ] + } + ], + "index": 1, + "angle": 270, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 536, + 495, + 548 + ], + "lines": [ + { + "bbox": [ + 115, + 536, + 495, + 548 + ], + "spans": [ + { + "bbox": [ + 115, + 536, + 495, + 548 + ], + "type": "text", + "content": "Table 6: Per-dataset evaluation results (response token length) using greedy decoding." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 106, + 567, + 292, + 579 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 567, + 292, + 579 + ], + "spans": [ + { + "bbox": [ + 106, + 567, + 292, + 579 + ], + "type": "text", + "content": "E Per-dataset Evaluation Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 592, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 592, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 106, + 592, + 504, + 647 + ], + "type": "text", + "content": "In Tables 5 and 6, we share the per-dataset evaluation results using greedy decoding, and in Tables 7 and 8, we share results using temperature sampling with top-p=0.95 and T=0.6. We use the max response length of 32,768 tokens for all experiments. For temperature sampling, we use random five seeds and aggregate the results, and we further report the confidence interval to share the deviation of the metrics." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 211, + 130, + 397, + 650 + ], + "blocks": [ + { + "bbox": [ + 211, + 130, + 397, + 650 + ], + "lines": [ + { + "bbox": [ + 211, + 130, + 397, + 650 + ], + "spans": [ + { + "bbox": [ + 211, + 130, + 397, + 650 + ], + "type": "table", + "html": "
20'1+30'1852'0+0'1601'0+19'1662'0+91'1854'0+05'9972'1+05'2605'2+20'0405'2+20'09(92)18
16'0+18'682'0+26'3616'0+00'3852'0+10'2816'0+16'2968'0+00'1620'2+99'1464'2+16'29(92)18
16'0+57'2216'0+80'3661'0+09'2652'0+05'3872'0+05'2985'1+05'2616'1+16'2911'2+00'05(92)18
52'1+19'212'0+80'3612'0+14'3882'0+12'1812'0+05'1605'2+86'6612'2+00'0511'2+00'05(92)18
68'0+60'2212'0+95'1652'0+95'6852'0+95'1882'0+95'0985'1+05'0622'2+00'8522'2+00'05(92)18
11'1+89'1212'0+96'1612'0+00'2852'0+91'0812'0+06'0905'2+00'2661'2+20'9526'2+20'14(92)18
12'1+19'052'0+09'0612'0+92'1858'0+12'1482'0+05'6905'2+05'6885'1+16'1994'1+00'44(92)18
80'1+27'1292'0+29'0622'0+27'5852'0+90'2255'0+88'6922'1+00'0605'2+00'0912'2+00'85(92)18
50'1+62'2515'0+02'1805'0+29'2894'0+91'2215'0+05'2900'2+99'2794'1+89'2282'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16(92)18
16'0+18'816'0+09'2216'0+09'2262'0+18'1416'0+96'6725'1+00'1488'2+00'0612'2+00'97(92)18
16'0+18'895'0+95'6261'0+02'9809'0+80'2905'0+25'1488'2+00'1900'0+02'9720'2+16'25(92)18
11'1+29'1512'0+01'9211'0+05'5852'0+28'2912'0+85'2799'2+99'9299'2+99'9286'1+00'92(92)18
01'1+19'812'0+80'1811'0+89'9882'0+98'2905'0+26'9721'7+05'6512'2+89'7711'2+16'25(92)18
88'0+16'812'0+91'8602'0+97'8882'0+06'0885'0+19'1918'0+05'1620'2+99'1982'1+16'19(92)18
81'1+19'2294'0+09'2602'0+87'2694'0+06'8212'0+88'3985'0+05'6622'2+99'1982'2-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-16-(92)18
99'0+98'7972'0+02'8872'0+98'6655'0+02'9472'0+82'3968'0+05'6412'2+22'2282'2-99'77(92)18
81'1+97'8911'0+17'0602'0+88'5682'0+07'0872'0+98'8511'1+05'7868'1+19'9585'2+20'95(92)18
16'0+06'9552'0+26'1811'0+17'5661'0+09'2772'0+86'9785'0+05'6912'2+20'1162'1+20'51(92)18
70'1+50'6552'0+27'5855'0+02'0844'0+26'2782'0+85'6740'2+05'8966'1+86'2702'1+00'87(92)18
06'0+10'1262'0+26'1672'0+27'9872'0+00'5215'0+08'2918'0+05'0622'2+22'9522'2+00'07(92)18
60'1+89'1252'0+80'2681'0+21'5681'0+95'1889'0+89'6502'2+00'1882'2+99'8782'2-99'07(92)18
80'1+99'7572'0+09'2872'0+95'6611'0+80'9981'0+27'6519'2+05'8520'2+99'1182'2-99'07(92)18
98'1+00'6552'0+27'9862'0+87'1462'1+97'5782'0+19'2757'1+05'1920'2+22'9156'0+89'9(92)18
16'0+28'4792'0+27'4782'0+95'8882'0+06'8582'0+05'0572'1+89'2172'1+89'2172'1+89'07(92)18
", + "image_path": "6c455391b2d122928abedb18a5c13288000920131de32b0aab66a1aed24b147c.jpg" + } + ] + } + ], + "index": 1, + "angle": 270, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 658, + 504, + 682 + ], + "lines": [ + { + "bbox": [ + 105, + 658, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 504, + 682 + ], + "type": "text", + "content": "Table 7: Per-dataset evaluation results (accuracies) using temperature sampling (t=0.6 and top-p=0.95). The numbers after " + }, + { + "bbox": [ + 105, + 658, + 504, + 682 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 105, + 658, + 504, + 682 + ], + "type": "text", + "content": " means the " + }, + { + "bbox": [ + 105, + 658, + 504, + 682 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 105, + 658, + 504, + 682 + ], + "type": "text", + "content": " confidence interval." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 211, + 136, + 397, + 644 + ], + "blocks": [ + { + "bbox": [ + 211, + 136, + 397, + 644 + ], + "lines": [ + { + "bbox": [ + 211, + 136, + 397, + 644 + ], + "spans": [ + { + "bbox": [ + 211, + 136, + 397, + 644 + ], + "type": "table", + "html": "
01I + 10CS11 + 2062εI + 506εE + 28828ε + 1849εII + 3944εE + 820662I + 0601(92I) 2εE-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R-3R -
81I + 191912 + 09FCεI + 59118Z + 28ECΔF + 274212I + 278528Z + 208689Z + 2021(92I) 12I - 9I + 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E - 22E
90I + 276912 + 591E9 + 27612 + 27E97 + 010288 + 2855661 + 998629E + 266182E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 22E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 30E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 28E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 20E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 31E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 21E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 32E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 10E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 11E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 15E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 24E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 25E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 33E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 23E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 34E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 35E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 16E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 27E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 26E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 37E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 36E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 18E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 38E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 17E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 14E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 13E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 19E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 29E - 3101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C101I + 107C
00I + 00C11 + 00C6 + 00I10 + 00C8 + 00C10 + 00C8 + 00C10 + 00C8 + 00C8 + 00C
00T + 00T11 + 00T6 + 00T10 + 00T8 + 00T10 + 00T8 + 00T10 + 00T8 + 00T8 + 00T
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 00Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z10 + 00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z6 + 00Z10 + 00Z8 + 00Z10 +00Z8 + 00Z10 +00Z8 + 00Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 = 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z
00Z + 00Z11 + 10Z11 + 10Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 + 10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
00Z + 00Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z11 +10Z
", + "image_path": "6909fe2479fd95798268964aca135d4c6a47fcb9c6917ae729eaa3a308147b7d.jpg" + } + ] + } + ], + "index": 1, + "angle": 270, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "lines": [ + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "text", + "content": "Table 8: Per-dataset evaluation results (model response token length) using temperature sampling " + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "inline_equation", + "content": "(t = 0.6" + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "text", + "content": " and top- " + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "inline_equation", + "content": "p = 0.95)" + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "text", + "content": ". The numbers after " + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "inline_equation", + "content": "\\pm" + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "text", + "content": " means the " + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 105, + 653, + 504, + 676 + ], + "type": "text", + "content": " confidence interval." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 212, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_content_list.json b/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..7060e88eaff4e843e6c0fe515c577be3a6ba9dab --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_content_list.json @@ -0,0 +1,811 @@ +[ + { + "type": "text", + "text": "Pre-trained Language Models and Few-shot Learning for Medical Entity Extraction", + "text_level": 1, + "bbox": [ + 78, + 68, + 919, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaokai Wang Santa Clara University Santa Clara, USA", + "bbox": [ + 133, + 143, + 272, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guiran Liu \nSan Francisco State University \nSan Francisco, USA", + "bbox": [ + 405, + 143, + 591, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Binrong Zhu \nSan Francisco State University \nSan Francisco, USA", + "bbox": [ + 700, + 143, + 885, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jacky He Cornell University New York, USA", + "bbox": [ + 145, + 222, + 259, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hongye Zheng \nThe Chinese University of Hong Kong \nHong Kong, China", + "bbox": [ + 383, + 222, + 614, + 267 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hanlu Zhang* \nStevens Institute of Technology \nHoboken, USA", + "bbox": [ + 700, + 222, + 888, + 266 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract-This study proposes a medical entity extraction method based on Transformer to enhance the information extraction capability of medical literature. Considering the professionalism and complexity of medical texts, we compare the performance of different pre-trained language models (BERT, BioBERT, PubMedBERT, ClinicalBERT) in medical entity extraction tasks. Experimental results show that PubMedBERT achieves the best performance $(F1\\text{-score} = 88.8\\%)$ , indicating that a language model pre-trained on biomedical literature is more effective in the medical domain. In addition, we analyze the impact of different entity extraction methods (CRF, Span-based, Seq2Seq) and find that the Span-based approach performs best in medical entity extraction tasks $(F1\\text{-score} = 88.6\\%)$ . It demonstrates superior accuracy in identifying entity boundaries. In low-resource scenarios, we further explore the application of Few-shot Learning in medical entity extraction. Experimental results show that even with only 10-shot training samples, the model achieves an F1-score of $79.1\\%$ , verifying the effectiveness of Few-shot Learning under limited data conditions. This study confirms that the combination of pre-trained language models and Few-shot Learning can enhance the accuracy of medical entity extraction. Future research can integrate knowledge graphs and active learning strategies to improve the model's generalization and stability, providing a more effective solution for medical NLP research.", + "bbox": [ + 68, + 330, + 485, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords- Natural Language Processing, medical named entity recognition, pre-trained language model, Few-shot Learning, information extraction, deep learning", + "bbox": [ + 70, + 656, + 483, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 705, + 352, + 719 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Medical entity extraction is a key application of Natural Language Processing (NLP) in healthcare. With the rapid growth of biomedical research, the volume of medical literature is increasing exponentially. Each day, thousands of papers are added to databases such as PubMed, Medline, and Embase. Researchers must extract valuable information from this vast amount of data to support medical research, clinical decision-making, and drug development. However, traditional manual literature screening and analysis are time-consuming and labor-intensive, making it difficult to meet the demand for efficient information retrieval in modern medical research [1]. Entity extraction technology provides strong support for medical data mining by automatically identifying structured information", + "bbox": [ + 70, + 724, + 485, + 905 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "from unstructured text, such as disease-drug relationships, gene-phenotype associations, and clinical treatment plans [2]. In recent years, deep learning, particularly the Transformer architecture, has significantly advanced NLP, improving the performance of medical entity extraction tasks.", + "bbox": [ + 511, + 330, + 926, + 400 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Traditional medical entity extraction methods mainly rely on rule-based techniques and classical machine learning models. Rule-based approaches analyze text using predefined regular expressions, knowledge base matching, and expert-defined grammar rules. While these methods achieve high accuracy in specific tasks, their generalization ability is limited, making them ineffective for handling complex syntactic structures and diverse language expressions in medical literature. However, medical texts contain highly specialized terminology and hierarchical semantic structures. Traditional models often struggle with feature selection in large-scale literature and fail to capture deep contextual information. Efficient and accurate medical entity extraction has therefore become a central research challenge in medical NLP [3].", + "bbox": [ + 511, + 407, + 926, + 599 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The introduction of Transformer-based models offers a new solution for medical entity extraction. Pre-trained language models such as BERT, RoBERTa, BioBERT, and PubMedBERT leverage self-attention mechanisms to learn long-range dependencies and capture deep semantic representations in medical texts. Beyond NLP, Transformer-based and hybrid architectures have also demonstrated strong performance in other domains. In computer vision and medical imaging, they have been applied to tasks such as 3D spine segmentation [4], skin disease detection [5], and object detection in clinical scans [6], showing enhanced accuracy through attention mechanisms and multi-scale fusion. In the field of multimodal learning, Transformer-CNN architectures have enabled more effective image-text classification through cross-modal feature fusion [7]. Additionally, in human-computer interaction, Transformer-related models have been used for optimizing interface design [8] and improving user experience through graph-based learning and dynamic adaptation [9]. These successes highlight the adaptability and effectiveness of Transformer models across diverse application areas [10-11]. Transformer-based models, such as BioBERT,", + "bbox": [ + 511, + 607, + 926, + 897 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "have significantly improved medical entity recognition, relationship extraction, and text classification tasks. Finetuning and transfer learning enhance model adaptability, enabling better understanding of complex medical language structures compared to traditional methods [12].", + "bbox": [ + 70, + 68, + 486, + 137 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This study proposes a Transformer-based framework to automate and enhance accuracy in medical entity extraction. Effective extraction supports rapid identification of medical evidence, aids clinical decision-making, and accelerates biomedical discoveries. Transformer models also enable multitask learning and generative applications, including summarization and medical reasoning, thereby broadening their utility in medical NLP research [13]. As medical data expands, Transformer models will become increasingly important in precision medicine, diagnostics, and education. However, privacy and ethical issues require ongoing attention. Integrating multi-modal medical data and knowledge graphs with Transformer models may further enhance interpretability and scalability. This research aims to advance medical NLP by improving methods for processing extensive medical literature.", + "bbox": [ + 70, + 143, + 486, + 351 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. RELATED WORK", + "text_level": 1, + "bbox": [ + 210, + 362, + 354, + 375 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The development of medical entity extraction has benefited greatly from advances in Transformer-based models and deep learning architectures. Notably, recent works have leveraged hierarchical and multimodal Transformer models to enhance named entity recognition (NER) performance. Tong et al. [14] proposed a semantic fusion framework using hierarchical Transformers, which enables the integration of diverse contextual representations. Similarly, prompt-based strategies have been introduced to optimize large language models for specialized entity extraction tasks, achieving better adaptability to domain-specific terminology and limited data scenarios [15].", + "bbox": [ + 70, + 380, + 486, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Several studies have applied Transformer mechanisms within hybrid or multi-scale frameworks to strengthen deep representation learning. For instance, Hao et al. [16] designed a", + "bbox": [ + 70, + 539, + 486, + 582 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "hybrid convolutional and Transformer-based architecture that captures both local and global dependencies in sequential data, improving the generalization of deep learning models. Multiscale Transformer models further enhance performance by capturing hierarchical information, enabling more nuanced feature extraction [17]. Attention mechanisms, embedded within optimized neural architectures, also contribute to improved semantic segmentation and context-aware classification [18].", + "bbox": [ + 513, + 68, + 928, + 191 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Beyond Transformer-based models, other neural network enhancements have significantly contributed to the advancement of deep feature extraction. Graph neural frameworks, particularly those that utilize self-supervised learning techniques, have been proposed to enhance the representation capabilities of complex data environments [19]. Additionally, dynamic rule mining mechanisms based on Transformer variants have been developed to facilitate adaptive pattern recognition in unstructured data [20].", + "bbox": [ + 513, + 198, + 928, + 324 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Sequential modeling techniques also contribute valuable insights. LSTM-based prediction models have demonstrated robustness in handling time-dependent data and adaptive scheduling, providing lessons in efficient learning from limited sequences [21]. Combined with pattern discovery methods, such frameworks support enhanced spatiotemporal learning [22]. Additionally, deep neural network architectures have been effectively used to develop robust predictive systems, emphasizing the importance of carefully designed learning structures in handling heterogeneous and high-dimensional data [23].", + "bbox": [ + 513, + 330, + 928, + 482 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "III. METHOD", + "text_level": 1, + "bbox": [ + 674, + 493, + 771, + 506 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This study proposes a medical literature information extraction model based on Transformer structure, which aims to automatically extract key entities and their relationships from medical texts. The self-attention mechanism architecture in Transformer is shown in Figure 1.", + "bbox": [ + 513, + 513, + 928, + 584 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9d3604a26a92766c2afa345c58e441e8a3c3bd362cbb6278471a9b45c536d4a7.jpg", + "image_caption": [ + "Figure 1. Basic structure diagram of self-attention mechanism" + ], + "image_footnote": [], + "bbox": [ + 228, + 595, + 844, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given a medical text $X = \\{x_{1}, x_{2}, \\dots, x_{n}\\}$ , where $x_{i}$ represents the i-th word, we first map the text to a high-dimensional vector space using the pre-trained medical domain BERT (such as BioBERT or PubMedBERT) to get the context representation $H = \\{h_{1}, h_{2}, \\dots, h_{n}\\}$ for each word. Transformer computes the relationships between words using a self-attention mechanism. The core calculation is as follows:", + "bbox": [ + 66, + 70, + 486, + 185 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d _ {k}}}\\right) V\n$$\n", + "text_format": "latex", + "bbox": [ + 102, + 189, + 406, + 234 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Where $Q, K, V$ is the query matrix, the key matrix and the value matrix respectively, and $d_k$ is the scaling factor to stabilize the gradient update. Through multi-layer Transformer calculation, deep representation $H$ of medical text can be obtained for subsequent information extraction tasks.", + "bbox": [ + 66, + 239, + 486, + 325 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the Medical named Entity recognition (NER) task [24], we use sequence annotation to feed Transformer's output $\\mathrm{H}$ into a Conditional Random Field (CRF) layer to capture medical entity dependencies [25]. For a given label sequence $Y = \\{y_{1}, y_{2}, \\dots, y_{n}\\}$ , define the conditional probability:", + "bbox": [ + 66, + 327, + 486, + 405 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nP (Y \\mid X) = \\frac {\\exp \\left(\\sum_ {i = 1} ^ {n} W y _ {i - 1} , y _ {i} + h _ {i} ^ {T} W y _ {i}\\right)}{\\sum_ {Y ^ {\\prime}} \\exp \\left(\\sum_ {i = 1} ^ {n} W y ^ {\\prime} _ {i - 1} y ^ {\\prime} _ {i} + h _ {i} ^ {T} W y ^ {\\prime} _ {i}\\right)}\n$$\n", + "text_format": "latex", + "bbox": [ + 94, + 414, + 434, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Where W is the state transition matrix that controls dependencies between labels. The loss function is optimized with Negative Log-Likelihood (NLL):", + "bbox": [ + 66, + 469, + 486, + 513 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL _ {N E R} = - \\sum_ {i = 1} ^ {n} \\log P (Y \\mid X)\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 518, + 383, + 556 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In relation extraction task, we adopt two-channel Transformer structure to independently model entity to $(e_1,e_2)$ . First, we compute for each entity its context representation:", + "bbox": [ + 66, + 561, + 486, + 626 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nh _ {e _ {i}} = \\frac {1}{| e _ {i} |} \\sum_ {x _ {j} \\in e _ {i}} h _ {j}\n$$\n", + "text_format": "latex", + "bbox": [ + 225, + 630, + 348, + 670 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Then, the entity pair representation is concatenated and the entity relationship score is calculated through a fully connected layer:", + "bbox": [ + 66, + 676, + 486, + 719 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nr _ {e _ {1}, e _ {2}} = W _ {r} \\left[ h _ {e _ {1}} \\right] \\left[ h _ {e _ {2}} \\right] + b _ {r}\n$$\n", + "text_format": "latex", + "bbox": [ + 199, + 723, + 377, + 744 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, cross-entropy loss is used to optimize relational classification:", + "bbox": [ + 66, + 750, + 486, + 777 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nL _ {R E} = - \\sum_ {\\left(e _ {1}, e _ {2}\\right)} y _ {e 1, e 2} \\log P \\left(r _ {e _ {1}, e _ {2}}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 178, + 780, + 398, + 815 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Where, $y_{e1,e2}$ is the true label of the entity relationship, and $P(r_{e_1,e_2})$ is the probability of the relationship class predicted by the model. Through the joint optimization of NER and RE tasks, the entity recognition and relationship extraction", + "bbox": [ + 66, + 823, + 486, + 898 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "promote each other and improve the accuracy of medical literature information extraction.", + "bbox": [ + 508, + 66, + 926, + 95 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IV. EXPERIMENT", + "text_level": 1, + "bbox": [ + 656, + 106, + 787, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. Datasets", + "text_level": 1, + "bbox": [ + 509, + 128, + 602, + 142 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This dataset comprises 6,881 disease entities extracted from PubMed abstracts, all of which were validated by biomedical experts and categorized into four groups—Specific, Composite, Modifier, and Undetermined Diseases. The data is split into training (5,064 instances), validation (787 instances), and test (1,030 instances) sets. A BIO tagging scheme (B-Begin, I-Inside, O-Outside) is used to clearly delineate entity boundaries, and all entities are aligned with the Unified Medical Language System (UMLS). Preprocessing steps include tokenization, stop-word removal, normalization, and Word Piece tokenization for medical terms. To address data imbalance, a Disease Co-occurrence Network was employed alongside data augmentation techniques such as synonym substitution and entity masking. These methods collectively bolster model performance in medical entity extraction tasks.", + "bbox": [ + 508, + 146, + 928, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Experimental Results", + "text_level": 1, + "bbox": [ + 509, + 362, + 684, + 376 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "First, this paper gives the comparative experimental results of different pre-training models, as shown in Table 1.", + "bbox": [ + 508, + 380, + 926, + 409 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1. Performance comparison of different pre-trained language models on medical literature information extraction", + "bbox": [ + 514, + 409, + 921, + 439 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/f369296a1337c06c0f7fcb9a051fbf46e4453366724355b63c834683d53a134b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelPrecisionRecallF1-Score
Bert85.2%82.7%83.9%
BioBert88.4%86.1%87.2%
PubmedBert89.7%87.9%88.8%
ClinicalBert87.9%85.6%86.7%
", + "bbox": [ + 501, + 450, + 926, + 513 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Experimental results indicate significant differences in the performance of various pre-trained language models in medical entity extraction tasks [26]. Among them, PubMedBERT achieves the highest performance across all evaluation metrics (F1-score = 88.8%). This suggests that its pre-training strategy on large-scale biomedical literature enhances its adaptability to the textual characteristics of medical texts. In comparison, BioBERT also demonstrates high accuracy (88.4%) and recall (86.1%), ranking second only to PubMedBERT. This indicates that BioBERT maintains strong generalization ability in specific medical entity extraction tasks. ClinicalBERT performs slightly worse than BioBERT and PubMedBERT, with a relatively lower recall (85.6%) despite achieving high accuracy (87.9%). This may be attributed to its pre-training on electronic health records (EHRs), which differ in textual style and structure from medical literature.", + "bbox": [ + 508, + 515, + 928, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "General BERT exhibits the weakest performance, with an F1-score of only $83.9\\%$ . This result highlights the limitations of general-purpose pre-trained language models when processing specialized medical texts. BERT is pre-trained on a general corpus and lacks domain-specific terminology and contextual understanding, making it less effective for medical entity extraction. In contrast, BioBERT and PubMedBERT, pretrained on PubMed literature, improve their comprehension of medical terms, resulting in superior performance. PubMedBERT's advantage over BioBERT may stem from its training approach. While BioBERT fine-tunes BERT on biomedical texts, PubMedBERT is trained from scratch on", + "bbox": [ + 508, + 742, + 929, + 907 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "medical literature. This allows PubMedBERT to capture the linguistic distribution and structural patterns of biomedical texts more comprehensively.", + "bbox": [ + 70, + 66, + 486, + 109 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The findings indicate that choosing domain-specific pretrained language models substantially enhances performance in medical entity extraction tasks. PubMedBERT and BioBERT outperform other models in both accuracy and recall. This suggests that generic BERT alone is inadequate for medical Natural Language Processing (NLP) tasks, and domain-adaptive pre-training strategies are crucial for improving model performance. Future research could delve deeper into integrating pre-trained language models with knowledge graph augmentation or multi-task learning to further enhance medical entity extraction capabilities.", + "bbox": [ + 70, + 116, + 486, + 268 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Further, this paper also provides experimental comparative analysis of different named entity recognition methods based on Transformer, and the experimental results are shown in Table 2.", + "bbox": [ + 70, + 275, + 486, + 330 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/c85b7124e86f453b9452fcc538f804ff2969528d9cb8101a4c90216329642fcc.jpg", + "table_caption": [ + "Table 2. Performance comparison of different named entity recognition methods based on Transformer" + ], + "table_footnote": [], + "table_body": "
MethodPrecisionRecallF1-Score
Transformer + CRF88.1%86.3%87.2%
Transformer + Span-based89.4%87.8%88.6%
Transformer + Seq2Seq86.7%85.2%85.9%
", + "bbox": [ + 60, + 378, + 485, + 467 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Experimental results indicate that different entity extraction methods exhibit varying performance within Transformer-based architectures. Among them, the Span-based approach achieves the highest F1-score (88.6%), demonstrating its superior ability to accurately identify entity boundaries in medical texts. Compared to CRF-based token-by-token sequential labeling, the Span-based method directly predicts entity boundaries, making it more effective in extracting complex medical terms and multi-word expressions.", + "bbox": [ + 70, + 487, + 486, + 613 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Transformer + CRF method scores slightly lower $(87.2\\%)$ but excels at capturing dependencies between entities, making it suitable for structured medical texts. Conversely, the Transformer + Seq2Seq method yields a lower F1-score $(85.9\\%)$ , likely due to the decoder's errors in boundary recognition during text generation. These findings suggest that Span-based and CRF methods are more effective for medical entity extraction. Future studies might explore integrating Span-based techniques with CRF to enhance accuracy and stability. Additionally, the study explores Few-shot Learning for medical entity recognition under low-resource conditions, with experimental results detailed in Figure 2.", + "bbox": [ + 70, + 619, + 486, + 786 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Experimental results indicate that the performance of Few-shot Learning in medical entity extraction improves as the number of training samples increases. When training data is extremely limited (1-shot or 5-shot), the model's precision, recall, and F1-score remain low, with the F1-score ranging between $60\\%$ and $72\\%$ . This suggests that the model struggles to accurately identify medical entities under severe data scarcity. The primary challenge lies in the abundance of", + "bbox": [ + 70, + 792, + 486, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "specialized terminology in medical texts, making it difficult for the model to learn effective patterns from minimal data. However, with 10-shot training, performance improves significantly, with the F1-score reaching $79.1\\%$ . This result demonstrates that even a small increase in labeled data can substantially enhance the model's learning ability. This aligns with the fundamental characteristic of Few-shot Learning, which efficiently leverages limited samples.", + "bbox": [ + 513, + 66, + 926, + 179 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/19658615318995e7d3899b2ed34c9fd6d6f3bd2d7e27c2b33876b9e924816faa.jpg", + "image_caption": [ + "Figure 2. Few-shot Learning Performance on Medical NER" + ], + "image_footnote": [], + "bbox": [ + 516, + 186, + 921, + 400 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As the number of training samples increases to 20-shot and beyond, the model's precision, recall, and F1-score continue to rise, eventually plateauing after 50-shot. At 50-shot, the F1-score reaches $88.1\\%$ and approaches $89.9\\%$ at 100-shot. This indicates that with a sufficient number of training samples, the model can effectively learn entity representations and achieve high recognition accuracy. However, performance gains slow after 50-shot, suggesting that while Few-shot Learning enhances model performance up to a certain threshold, its marginal benefit diminishes as data volume increases. These findings suggest that in medical entity extraction tasks, Few-shot Learning is particularly effective in low-resource settings, whereas traditional supervised learning may offer greater stability when ample labeled data is available.", + "bbox": [ + 513, + 431, + 926, + 625 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overall, this experiment confirms the effectiveness of Few-shot Learning in medical entity extraction, particularly when labeled data is scarce. Even with a limited number of samples, the model demonstrates significant performance improvements. Future research could explore more advanced Few-shot Learning techniques, such as metric learning-based methods, GPT variants optimized for prompt design, or small-sample learning approaches integrated with knowledge graphs. These strategies could further enhance model generalization, enabling more effective applications in complex medical NLP tasks.", + "bbox": [ + 513, + 632, + 926, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "V. CONCLUSION", + "text_level": 1, + "bbox": [ + 661, + 781, + 787, + 794 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This study proposes a medical entity extraction method based on Transformer and examines the effects of different pretrained language models, extraction methods, and Few-shot Learning in low-resource scenarios. Experimental results indicate that PubMedBERT and BioBERT outperform other models in medical text processing, significantly improving entity extraction accuracy. Compared to traditional sequence", + "bbox": [ + 513, + 800, + 926, + 897 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "labeling approaches, the Span-based entity extraction method achieves the best performance, demonstrating that directly predicting entity boundaries enhances the recognition of complex medical terms. Additionally, Few-shot Learning exhibits strong adaptability in low-resource conditions, achieving high F1-scores with minimal training data. This highlights its potential for medical NLP applications.", + "bbox": [ + 66, + 66, + 486, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Despite these promising results, several aspects require further optimization. While the Transformer architecture enhances medical entity extraction, it incurs high computational costs, particularly on large-scale datasets. Future research could explore Knowledge Distillation or Lightweight Transformer variants to improve computational efficiency. This indicates that integrating Active Learning or Data Augmentation strategies may enhance model performance more efficiently. Additionally, medical texts often contain complex contextual relationships. Incorporating Knowledge Graphs into Transformer-based models could further strengthen their understanding of medical terminology.", + "bbox": [ + 66, + 171, + 486, + 338 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Future studies could expand the application of Few-shot Learning in medical NLP, such as developing more effective prompt-based learning techniques. This would enable large language models (LLMs) to achieve high-precision entity extraction with minimal labeled data. Furthermore, real-world medical text data often involves privacy concerns. Optimizing medical entity extraction models while ensuring data security remains a critical research challenge. Finally, by refining Transformer architectures, integrating external medical knowledge, and introducing adaptive learning strategies, medical entity extraction technology could play a more significant role in clinical medicine, drug discovery, and medical literature analysis. These advancements would provide intelligent and efficient tools to support medical research and practice.", + "bbox": [ + 66, + 344, + 486, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 233, + 563, + 320, + 574 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Pagad N. S. and Pradeep N., “Clinical named entity recognition methods: an overview”, Proceedings of the International Conference on Innovative Computing and Communications: Proceedings of ICICC 2021, Volume 2, pp. 151-165, 2022.", + "[2] Navarro D. F., Ijaz K., Rezazadegan D., et al., \"Clinical named entity recognition and relation extraction using natural language processing of medical free text: A systematic review\", International Journal of Medical Informatics, vol. 177, 105122, 2023.", + "[3] Durango M. C., Torres-Silva E. A. and Orozco-Duque A., “Named entity recognition in electronic health records: a methodological review”, Healthcare Informatics Research, vol. 29, no. 4, pp. 286-300, 2023.", + "[4] Y. Xiang, Q. He, T. Xu, R. Hao, J. Hu and H. Zhang, \"Adaptive Transformer Attention and Multi-Scale Fusion for Spine 3D Segmentation\", arXiv preprint arXiv:2503.12853, 2025.", + "[5] T. Xu, Y. Xiang, J. Du and H. Zhang, \"Cross-Scale Attention and Multi-Layer Feature Fusion YOLOv8 for Skin Disease Target Detection in Medical Images\", Journal of Computer Technology and Software, vol. 4, no. 2, 2025.", + "[6] W. He, Y. Zhang, T. Xu, T. An, Y. Liang and B. Zhang, \"Object detection for medical image analysis: Insights from the RT-DETR model\", arXiv preprint arXiv:2501.16469, 2025.", + "[7] M. Li, R. Hao, S. Shi, Z. Yu, Q. He and J. Zhan, “A CNN-Transformer Approach for Image-Text Multimodal Classification with Cross-Modal Feature Fusion”, 2025." + ], + "bbox": [ + 68, + 580, + 486, + 883 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[8] Q. Sun, \"Dynamic Optimization of Human-Computer Interaction Interfaces Using Graph Convolutional Networks and Q-Learning\", Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025.", + "[9] S. Duan, \"Systematic Analysis of User Perception for Interface Design Enhancement\", Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024.", + "[10] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion\", arXiv preprint arXiv:2502.03664, 2025.", + "[11] A. Liang, “Personalized Multimodal Recommendations Framework Using Contrastive Learning”, Transactions on Computational and Scientific Methods, vol. 4, no. 11, 2024.", + "[12] X. Zhou, Y. Zhang, Z. Wang, M. Lu and X. Liu, \"MAFN: multi-level attention fusion network for multimodal named entity recognition\", Multimedia Tools and Applications, vol. 83, no. 15, pp. 45047-45058, 2024.", + "[13] Y. Xu and Y. Chen, \"Attention-based interactive multi-level feature fusion for named entity recognition\", Scientific Reports, vol. 15, no. 1, 3069, 2025.", + "[14] Z. Tong, Q. Liu, H. Shi, Y. Xia, S. Wu and X. Y. Zhang, \"Semantics Fusion of Hierarchical Transformers for Multimodal Named Entity Recognition\", Proceedings of the International Conference on Intelligent Computing, pp. 414-426, 2024.", + "[15] Y. Hu, Q. Chen, J. Du, et al., \"Improving large language models for clinical named entity recognition via prompt engineering\", Journal of the American Medical Informatics Association, vol. 31, no. 9, pp. 1812-1820, 2024.", + "[16] R. Hao, Y. Xiang, J. Du, Q. He, J. Hu and T. Xu, “A Hybrid CNN-Transformer Model for Heart Disease Prediction Using Life History Data”, arXiv preprint arXiv:2503.02124, 2025.", + "[17] J. Hu, Y. Xiang, Y. Lin, J. Du, H. Zhang and H. Liu, “Multi-Scale Transformer Architecture for Accurate Medical Image Classification”, arXiv preprint arXiv:2502.06243, 2025.", + "[18] X. Li, Q. Lu, Y. Li, M. Li and Y. Qi, \"Optimized Unet with Attention Mechanism for Multi-Scale Semantic Segmentation\", arXiv preprint arXiv:2502.03813, 2025.", + "[19] J. Wei, Y. Liu, X. Huang, X. Zhang, W. Liu and X. Yan, \"Self-Supervised Graph Neural Networks for Enhanced Feature Extraction in Heterogeneous Information Networks\", Proceedings of the 2024 5th International Conference on Machine Learning and Computer Application (ICMLCA), pp. 272-276, 2024.", + "[20] J. Liu, Y. Zhang, Y. Sheng, Y. Lou, H. Wang and B. Yang, “Context-Aware Rule Mining Using a Dynamic Transformer-Based Framework”, arXiv preprint arXiv:2503.11125, 2025.", + "[21] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction\", Journal of Computer Technology and Software, vol. 4, no. 2, 2025.", + "[22] Y. Deng, “A hybrid network congestion prediction method integrating association rules and LSTM for enhanced spatiotemporal forecasting”, Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025.", + "[23] X. Yan, W. Wang, M. Xiao, Y. Li and M. Gao, \"Survival prediction across diverse cancer types using neural networks\", Proceedings of the 2024 7th International Conference on Machine Vision and Applications, pp. 134-138, 2024.", + "[24] K. Pakhale, “Comprehensive overview of named entity recognition: Models, domain-specific applications and challenges”, arXiv preprint arXiv:2309.14084, 2023.", + "[25] M. Afshar, Y. Gao, D. Gupta, E. Croxford and D. Demner-Fushman, \"On the role of the UMLS in supporting diagnosis generation proposed by Large Language Models\", Journal of Biomedical Informatics, 2024.", + "[26] V. S. Carmona, S. Jiang and B. Dong, “A Multilevel Analysis of PubMed-only BERT-based Biomedical Models”, Proceedings of the 6th Clinical Natural Language Processing Workshop, pp. 105-110, 2024." + ], + "bbox": [ + 511, + 66, + 928, + 864 + ], + "page_idx": 4 + } +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_model.json b/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9c5cead48d52e60801872df0f1218b64df0254f6 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_model.json @@ -0,0 +1,1079 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.079, + 0.069, + 0.921, + 0.138 + ], + "angle": 0, + "content": "Pre-trained Language Models and Few-shot Learning for Medical Entity Extraction" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.145, + 0.274, + 0.191 + ], + "angle": 0, + "content": "Xiaokai Wang Santa Clara University Santa Clara, USA" + }, + { + "type": "text", + "bbox": [ + 0.406, + 0.145, + 0.593, + 0.189 + ], + "angle": 0, + "content": "Guiran Liu \nSan Francisco State University \nSan Francisco, USA" + }, + { + "type": "text", + "bbox": [ + 0.702, + 0.145, + 0.886, + 0.189 + ], + "angle": 0, + "content": "Binrong Zhu \nSan Francisco State University \nSan Francisco, USA" + }, + { + "type": "text", + "bbox": [ + 0.146, + 0.223, + 0.26, + 0.266 + ], + "angle": 0, + "content": "Jacky He Cornell University New York, USA" + }, + { + "type": "text", + "bbox": [ + 0.384, + 0.223, + 0.615, + 0.268 + ], + "angle": 0, + "content": "Hongye Zheng \nThe Chinese University of Hong Kong \nHong Kong, China" + }, + { + "type": "text", + "bbox": [ + 0.702, + 0.223, + 0.889, + 0.267 + ], + "angle": 0, + "content": "Hanlu Zhang* \nStevens Institute of Technology \nHoboken, USA" + }, + { + "type": "text", + "bbox": [ + 0.07, + 0.331, + 0.486, + 0.645 + ], + "angle": 0, + "content": "Abstract-This study proposes a medical entity extraction method based on Transformer to enhance the information extraction capability of medical literature. Considering the professionalism and complexity of medical texts, we compare the performance of different pre-trained language models (BERT, BioBERT, PubMedBERT, ClinicalBERT) in medical entity extraction tasks. Experimental results show that PubMedBERT achieves the best performance \\((F1\\text{-score} = 88.8\\%)\\), indicating that a language model pre-trained on biomedical literature is more effective in the medical domain. In addition, we analyze the impact of different entity extraction methods (CRF, Span-based, Seq2Seq) and find that the Span-based approach performs best in medical entity extraction tasks \\((F1\\text{-score} = 88.6\\%)\\). It demonstrates superior accuracy in identifying entity boundaries. In low-resource scenarios, we further explore the application of Few-shot Learning in medical entity extraction. Experimental results show that even with only 10-shot training samples, the model achieves an F1-score of \\(79.1\\%\\), verifying the effectiveness of Few-shot Learning under limited data conditions. This study confirms that the combination of pre-trained language models and Few-shot Learning can enhance the accuracy of medical entity extraction. Future research can integrate knowledge graphs and active learning strategies to improve the model's generalization and stability, providing a more effective solution for medical NLP research." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.657, + 0.485, + 0.698 + ], + "angle": 0, + "content": "Keywords- Natural Language Processing, medical named entity recognition, pre-trained language model, Few-shot Learning, information extraction, deep learning" + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.707, + 0.353, + 0.72 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.726, + 0.486, + 0.906 + ], + "angle": 0, + "content": "Medical entity extraction is a key application of Natural Language Processing (NLP) in healthcare. With the rapid growth of biomedical research, the volume of medical literature is increasing exponentially. Each day, thousands of papers are added to databases such as PubMed, Medline, and Embase. Researchers must extract valuable information from this vast amount of data to support medical research, clinical decision-making, and drug development. However, traditional manual literature screening and analysis are time-consuming and labor-intensive, making it difficult to meet the demand for efficient information retrieval in modern medical research [1]. Entity extraction technology provides strong support for medical data mining by automatically identifying structured information" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.331, + 0.928, + 0.401 + ], + "angle": 0, + "content": "from unstructured text, such as disease-drug relationships, gene-phenotype associations, and clinical treatment plans [2]. In recent years, deep learning, particularly the Transformer architecture, has significantly advanced NLP, improving the performance of medical entity extraction tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.408, + 0.928, + 0.601 + ], + "angle": 0, + "content": "Traditional medical entity extraction methods mainly rely on rule-based techniques and classical machine learning models. Rule-based approaches analyze text using predefined regular expressions, knowledge base matching, and expert-defined grammar rules. While these methods achieve high accuracy in specific tasks, their generalization ability is limited, making them ineffective for handling complex syntactic structures and diverse language expressions in medical literature. However, medical texts contain highly specialized terminology and hierarchical semantic structures. Traditional models often struggle with feature selection in large-scale literature and fail to capture deep contextual information. Efficient and accurate medical entity extraction has therefore become a central research challenge in medical NLP [3]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.608, + 0.928, + 0.898 + ], + "angle": 0, + "content": "The introduction of Transformer-based models offers a new solution for medical entity extraction. Pre-trained language models such as BERT, RoBERTa, BioBERT, and PubMedBERT leverage self-attention mechanisms to learn long-range dependencies and capture deep semantic representations in medical texts. Beyond NLP, Transformer-based and hybrid architectures have also demonstrated strong performance in other domains. In computer vision and medical imaging, they have been applied to tasks such as 3D spine segmentation [4], skin disease detection [5], and object detection in clinical scans [6], showing enhanced accuracy through attention mechanisms and multi-scale fusion. In the field of multimodal learning, Transformer-CNN architectures have enabled more effective image-text classification through cross-modal feature fusion [7]. Additionally, in human-computer interaction, Transformer-related models have been used for optimizing interface design [8] and improving user experience through graph-based learning and dynamic adaptation [9]. These successes highlight the adaptability and effectiveness of Transformer models across diverse application areas [10-11]. Transformer-based models, such as BioBERT," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.071, + 0.069, + 0.488, + 0.138 + ], + "angle": 0, + "content": "have significantly improved medical entity recognition, relationship extraction, and text classification tasks. Finetuning and transfer learning enhance model adaptability, enabling better understanding of complex medical language structures compared to traditional methods [12]." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.144, + 0.488, + 0.352 + ], + "angle": 0, + "content": "This study proposes a Transformer-based framework to automate and enhance accuracy in medical entity extraction. Effective extraction supports rapid identification of medical evidence, aids clinical decision-making, and accelerates biomedical discoveries. Transformer models also enable multitask learning and generative applications, including summarization and medical reasoning, thereby broadening their utility in medical NLP research [13]. As medical data expands, Transformer models will become increasingly important in precision medicine, diagnostics, and education. However, privacy and ethical issues require ongoing attention. Integrating multi-modal medical data and knowledge graphs with Transformer models may further enhance interpretability and scalability. This research aims to advance medical NLP by improving methods for processing extensive medical literature." + }, + { + "type": "title", + "bbox": [ + 0.212, + 0.363, + 0.356, + 0.375 + ], + "angle": 0, + "content": "II. RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.381, + 0.488, + 0.534 + ], + "angle": 0, + "content": "The development of medical entity extraction has benefited greatly from advances in Transformer-based models and deep learning architectures. Notably, recent works have leveraged hierarchical and multimodal Transformer models to enhance named entity recognition (NER) performance. Tong et al. [14] proposed a semantic fusion framework using hierarchical Transformers, which enables the integration of diverse contextual representations. Similarly, prompt-based strategies have been introduced to optimize large language models for specialized entity extraction tasks, achieving better adaptability to domain-specific terminology and limited data scenarios [15]." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.54, + 0.488, + 0.583 + ], + "angle": 0, + "content": "Several studies have applied Transformer mechanisms within hybrid or multi-scale frameworks to strengthen deep representation learning. For instance, Hao et al. [16] designed a" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.069, + 0.929, + 0.193 + ], + "angle": 0, + "content": "hybrid convolutional and Transformer-based architecture that captures both local and global dependencies in sequential data, improving the generalization of deep learning models. Multiscale Transformer models further enhance performance by capturing hierarchical information, enabling more nuanced feature extraction [17]. Attention mechanisms, embedded within optimized neural architectures, also contribute to improved semantic segmentation and context-aware classification [18]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.199, + 0.929, + 0.325 + ], + "angle": 0, + "content": "Beyond Transformer-based models, other neural network enhancements have significantly contributed to the advancement of deep feature extraction. Graph neural frameworks, particularly those that utilize self-supervised learning techniques, have been proposed to enhance the representation capabilities of complex data environments [19]. Additionally, dynamic rule mining mechanisms based on Transformer variants have been developed to facilitate adaptive pattern recognition in unstructured data [20]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.331, + 0.929, + 0.483 + ], + "angle": 0, + "content": "Sequential modeling techniques also contribute valuable insights. LSTM-based prediction models have demonstrated robustness in handling time-dependent data and adaptive scheduling, providing lessons in efficient learning from limited sequences [21]. Combined with pattern discovery methods, such frameworks support enhanced spatiotemporal learning [22]. Additionally, deep neural network architectures have been effectively used to develop robust predictive systems, emphasizing the importance of carefully designed learning structures in handling heterogeneous and high-dimensional data [23]." + }, + { + "type": "title", + "bbox": [ + 0.676, + 0.494, + 0.772, + 0.507 + ], + "angle": 0, + "content": "III. METHOD" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.514, + 0.929, + 0.585 + ], + "angle": 0, + "content": "This study proposes a medical literature information extraction model based on Transformer structure, which aims to automatically extract key entities and their relationships from medical texts. The self-attention mechanism architecture in Transformer is shown in Figure 1." + }, + { + "type": "image", + "bbox": [ + 0.229, + 0.597, + 0.845, + 0.846 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.298, + 0.861, + 0.702, + 0.875 + ], + "angle": 0, + "content": "Figure 1. Basic structure diagram of self-attention mechanism" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.068, + 0.071, + 0.487, + 0.186 + ], + "angle": 0, + "content": "Given a medical text \\( X = \\{x_{1}, x_{2}, \\dots, x_{n}\\} \\), where \\( x_{i} \\) represents the i-th word, we first map the text to a high-dimensional vector space using the pre-trained medical domain BERT (such as BioBERT or PubMedBERT) to get the context representation \\( H = \\{h_{1}, h_{2}, \\dots, h_{n}\\} \\) for each word. Transformer computes the relationships between words using a self-attention mechanism. The core calculation is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.104, + 0.19, + 0.408, + 0.235 + ], + "angle": 0, + "content": "\\[\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d _ {k}}}\\right) V\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.24, + 0.487, + 0.326 + ], + "angle": 0, + "content": "Where \\( Q, K, V \\) is the query matrix, the key matrix and the value matrix respectively, and \\( d_k \\) is the scaling factor to stabilize the gradient update. Through multi-layer Transformer calculation, deep representation \\( H \\) of medical text can be obtained for subsequent information extraction tasks." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.328, + 0.487, + 0.406 + ], + "angle": 0, + "content": "In the Medical named Entity recognition (NER) task [24], we use sequence annotation to feed Transformer's output \\( \\mathrm{H} \\) into a Conditional Random Field (CRF) layer to capture medical entity dependencies [25]. For a given label sequence \\( Y = \\{y_{1}, y_{2}, \\dots, y_{n}\\} \\), define the conditional probability:" + }, + { + "type": "equation", + "bbox": [ + 0.095, + 0.415, + 0.436, + 0.466 + ], + "angle": 0, + "content": "\\[\nP (Y \\mid X) = \\frac {\\exp \\left(\\sum_ {i = 1} ^ {n} W y _ {i - 1} , y _ {i} + h _ {i} ^ {T} W y _ {i}\\right)}{\\sum_ {Y ^ {\\prime}} \\exp \\left(\\sum_ {i = 1} ^ {n} W y ^ {\\prime} _ {i - 1} y ^ {\\prime} _ {i} + h _ {i} ^ {T} W y ^ {\\prime} _ {i}\\right)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.47, + 0.487, + 0.515 + ], + "angle": 0, + "content": "Where W is the state transition matrix that controls dependencies between labels. The loss function is optimized with Negative Log-Likelihood (NLL):" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.519, + 0.385, + 0.557 + ], + "angle": 0, + "content": "\\[\nL _ {N E R} = - \\sum_ {i = 1} ^ {n} \\log P (Y \\mid X)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.562, + 0.487, + 0.627 + ], + "angle": 0, + "content": "In relation extraction task, we adopt two-channel Transformer structure to independently model entity to \\((e_1,e_2)\\). First, we compute for each entity its context representation:" + }, + { + "type": "equation", + "bbox": [ + 0.227, + 0.631, + 0.349, + 0.671 + ], + "angle": 0, + "content": "\\[\nh _ {e _ {i}} = \\frac {1}{| e _ {i} |} \\sum_ {x _ {j} \\in e _ {i}} h _ {j}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.677, + 0.487, + 0.72 + ], + "angle": 0, + "content": "Then, the entity pair representation is concatenated and the entity relationship score is calculated through a fully connected layer:" + }, + { + "type": "equation", + "bbox": [ + 0.2, + 0.724, + 0.379, + 0.746 + ], + "angle": 0, + "content": "\\[\nr _ {e _ {1}, e _ {2}} = W _ {r} \\left[ h _ {e _ {1}} \\right] \\left[ h _ {e _ {2}} \\right] + b _ {r}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.75, + 0.487, + 0.778 + ], + "angle": 0, + "content": "Finally, cross-entropy loss is used to optimize relational classification:" + }, + { + "type": "equation", + "bbox": [ + 0.179, + 0.781, + 0.399, + 0.816 + ], + "angle": 0, + "content": "\\[\nL _ {R E} = - \\sum_ {\\left(e _ {1}, e _ {2}\\right)} y _ {e 1, e 2} \\log P \\left(r _ {e _ {1}, e _ {2}}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.824, + 0.487, + 0.899 + ], + "angle": 0, + "content": "Where, \\( y_{e1,e2} \\) is the true label of the entity relationship, and \\( P(r_{e_1,e_2}) \\) is the probability of the relationship class predicted by the model. Through the joint optimization of NER and RE tasks, the entity recognition and relationship extraction" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.068, + 0.928, + 0.096 + ], + "angle": 0, + "content": "promote each other and improve the accuracy of medical literature information extraction." + }, + { + "type": "title", + "bbox": [ + 0.657, + 0.107, + 0.788, + 0.121 + ], + "angle": 0, + "content": "IV. EXPERIMENT" + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.13, + 0.603, + 0.143 + ], + "angle": 0, + "content": "A. Datasets" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.147, + 0.929, + 0.356 + ], + "angle": 0, + "content": "This dataset comprises 6,881 disease entities extracted from PubMed abstracts, all of which were validated by biomedical experts and categorized into four groups—Specific, Composite, Modifier, and Undetermined Diseases. The data is split into training (5,064 instances), validation (787 instances), and test (1,030 instances) sets. A BIO tagging scheme (B-Begin, I-Inside, O-Outside) is used to clearly delineate entity boundaries, and all entities are aligned with the Unified Medical Language System (UMLS). Preprocessing steps include tokenization, stop-word removal, normalization, and Word Piece tokenization for medical terms. To address data imbalance, a Disease Co-occurrence Network was employed alongside data augmentation techniques such as synonym substitution and entity masking. These methods collectively bolster model performance in medical entity extraction tasks." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.363, + 0.686, + 0.377 + ], + "angle": 0, + "content": "B. Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.381, + 0.928, + 0.41 + ], + "angle": 0, + "content": "First, this paper gives the comparative experimental results of different pre-training models, as shown in Table 1." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.41, + 0.922, + 0.44 + ], + "angle": 0, + "content": "Table 1. Performance comparison of different pre-trained language models on medical literature information extraction" + }, + { + "type": "table", + "bbox": [ + 0.503, + 0.451, + 0.927, + 0.515 + ], + "angle": 0, + "content": "
ModelPrecisionRecallF1-Score
Bert85.2%82.7%83.9%
BioBert88.4%86.1%87.2%
PubmedBert89.7%87.9%88.8%
ClinicalBert87.9%85.6%86.7%
" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.516, + 0.929, + 0.736 + ], + "angle": 0, + "content": "Experimental results indicate significant differences in the performance of various pre-trained language models in medical entity extraction tasks [26]. Among them, PubMedBERT achieves the highest performance across all evaluation metrics (F1-score = 88.8%). This suggests that its pre-training strategy on large-scale biomedical literature enhances its adaptability to the textual characteristics of medical texts. In comparison, BioBERT also demonstrates high accuracy (88.4%) and recall (86.1%), ranking second only to PubMedBERT. This indicates that BioBERT maintains strong generalization ability in specific medical entity extraction tasks. ClinicalBERT performs slightly worse than BioBERT and PubMedBERT, with a relatively lower recall (85.6%) despite achieving high accuracy (87.9%). This may be attributed to its pre-training on electronic health records (EHRs), which differ in textual style and structure from medical literature." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.743, + 0.93, + 0.909 + ], + "angle": 0, + "content": "General BERT exhibits the weakest performance, with an F1-score of only \\(83.9\\%\\). This result highlights the limitations of general-purpose pre-trained language models when processing specialized medical texts. BERT is pre-trained on a general corpus and lacks domain-specific terminology and contextual understanding, making it less effective for medical entity extraction. In contrast, BioBERT and PubMedBERT, pretrained on PubMed literature, improve their comprehension of medical terms, resulting in superior performance. PubMedBERT's advantage over BioBERT may stem from its training approach. While BioBERT fine-tunes BERT on biomedical texts, PubMedBERT is trained from scratch on" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.071, + 0.068, + 0.488, + 0.111 + ], + "angle": 0, + "content": "medical literature. This allows PubMedBERT to capture the linguistic distribution and structural patterns of biomedical texts more comprehensively." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.117, + 0.487, + 0.27 + ], + "angle": 0, + "content": "The findings indicate that choosing domain-specific pretrained language models substantially enhances performance in medical entity extraction tasks. PubMedBERT and BioBERT outperform other models in both accuracy and recall. This suggests that generic BERT alone is inadequate for medical Natural Language Processing (NLP) tasks, and domain-adaptive pre-training strategies are crucial for improving model performance. Future research could delve deeper into integrating pre-trained language models with knowledge graph augmentation or multi-task learning to further enhance medical entity extraction capabilities." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.276, + 0.488, + 0.332 + ], + "angle": 0, + "content": "Further, this paper also provides experimental comparative analysis of different named entity recognition methods based on Transformer, and the experimental results are shown in Table 2." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.34, + 0.475, + 0.368 + ], + "angle": 0, + "content": "Table 2. Performance comparison of different named entity recognition methods based on Transformer" + }, + { + "type": "table", + "bbox": [ + 0.061, + 0.38, + 0.486, + 0.468 + ], + "angle": 0, + "content": "
MethodPrecisionRecallF1-Score
Transformer + CRF88.1%86.3%87.2%
Transformer + Span-based89.4%87.8%88.6%
Transformer + Seq2Seq86.7%85.2%85.9%
" + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.488, + 0.487, + 0.614 + ], + "angle": 0, + "content": "Experimental results indicate that different entity extraction methods exhibit varying performance within Transformer-based architectures. Among them, the Span-based approach achieves the highest F1-score (88.6%), demonstrating its superior ability to accurately identify entity boundaries in medical texts. Compared to CRF-based token-by-token sequential labeling, the Span-based method directly predicts entity boundaries, making it more effective in extracting complex medical terms and multi-word expressions." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.62, + 0.487, + 0.787 + ], + "angle": 0, + "content": "The Transformer + CRF method scores slightly lower \\((87.2\\%)\\) but excels at capturing dependencies between entities, making it suitable for structured medical texts. Conversely, the Transformer + Seq2Seq method yields a lower F1-score \\((85.9\\%)\\), likely due to the decoder's errors in boundary recognition during text generation. These findings suggest that Span-based and CRF methods are more effective for medical entity extraction. Future studies might explore integrating Span-based techniques with CRF to enhance accuracy and stability. Additionally, the study explores Few-shot Learning for medical entity recognition under low-resource conditions, with experimental results detailed in Figure 2." + }, + { + "type": "text", + "bbox": [ + 0.071, + 0.793, + 0.487, + 0.904 + ], + "angle": 0, + "content": "Experimental results indicate that the performance of Few-shot Learning in medical entity extraction improves as the number of training samples increases. When training data is extremely limited (1-shot or 5-shot), the model's precision, recall, and F1-score remain low, with the F1-score ranging between \\(60\\%\\) and \\(72\\%\\). This suggests that the model struggles to accurately identify medical entities under severe data scarcity. The primary challenge lies in the abundance of" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.068, + 0.928, + 0.18 + ], + "angle": 0, + "content": "specialized terminology in medical texts, making it difficult for the model to learn effective patterns from minimal data. However, with 10-shot training, performance improves significantly, with the F1-score reaching \\(79.1\\%\\). This result demonstrates that even a small increase in labeled data can substantially enhance the model's learning ability. This aligns with the fundamental characteristic of Few-shot Learning, which efficiently leverages limited samples." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.187, + 0.922, + 0.401 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.411, + 0.914, + 0.426 + ], + "angle": 0, + "content": "Figure 2. Few-shot Learning Performance on Medical NER" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.433, + 0.928, + 0.626 + ], + "angle": 0, + "content": "As the number of training samples increases to 20-shot and beyond, the model's precision, recall, and F1-score continue to rise, eventually plateauing after 50-shot. At 50-shot, the F1-score reaches \\(88.1\\%\\) and approaches \\(89.9\\%\\) at 100-shot. This indicates that with a sufficient number of training samples, the model can effectively learn entity representations and achieve high recognition accuracy. However, performance gains slow after 50-shot, suggesting that while Few-shot Learning enhances model performance up to a certain threshold, its marginal benefit diminishes as data volume increases. These findings suggest that in medical entity extraction tasks, Few-shot Learning is particularly effective in low-resource settings, whereas traditional supervised learning may offer greater stability when ample labeled data is available." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.633, + 0.928, + 0.772 + ], + "angle": 0, + "content": "Overall, this experiment confirms the effectiveness of Few-shot Learning in medical entity extraction, particularly when labeled data is scarce. Even with a limited number of samples, the model demonstrates significant performance improvements. Future research could explore more advanced Few-shot Learning techniques, such as metric learning-based methods, GPT variants optimized for prompt design, or small-sample learning approaches integrated with knowledge graphs. These strategies could further enhance model generalization, enabling more effective applications in complex medical NLP tasks." + }, + { + "type": "title", + "bbox": [ + 0.663, + 0.782, + 0.788, + 0.795 + ], + "angle": 0, + "content": "V. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.801, + 0.928, + 0.898 + ], + "angle": 0, + "content": "This study proposes a medical entity extraction method based on Transformer and examines the effects of different pretrained language models, extraction methods, and Few-shot Learning in low-resource scenarios. Experimental results indicate that PubMedBERT and BioBERT outperform other models in medical text processing, significantly improving entity extraction accuracy. Compared to traditional sequence" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.068, + 0.068, + 0.487, + 0.166 + ], + "angle": 0, + "content": "labeling approaches, the Span-based entity extraction method achieves the best performance, demonstrating that directly predicting entity boundaries enhances the recognition of complex medical terms. Additionally, Few-shot Learning exhibits strong adaptability in low-resource conditions, achieving high F1-scores with minimal training data. This highlights its potential for medical NLP applications." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.172, + 0.487, + 0.339 + ], + "angle": 0, + "content": "Despite these promising results, several aspects require further optimization. While the Transformer architecture enhances medical entity extraction, it incurs high computational costs, particularly on large-scale datasets. Future research could explore Knowledge Distillation or Lightweight Transformer variants to improve computational efficiency. This indicates that integrating Active Learning or Data Augmentation strategies may enhance model performance more efficiently. Additionally, medical texts often contain complex contextual relationships. Incorporating Knowledge Graphs into Transformer-based models could further strengthen their understanding of medical terminology." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.345, + 0.487, + 0.553 + ], + "angle": 0, + "content": "Future studies could expand the application of Few-shot Learning in medical NLP, such as developing more effective prompt-based learning techniques. This would enable large language models (LLMs) to achieve high-precision entity extraction with minimal labeled data. Furthermore, real-world medical text data often involves privacy concerns. Optimizing medical entity extraction models while ensuring data security remains a critical research challenge. Finally, by refining Transformer architectures, integrating external medical knowledge, and introducing adaptive learning strategies, medical entity extraction technology could play a more significant role in clinical medicine, drug discovery, and medical literature analysis. These advancements would provide intelligent and efficient tools to support medical research and practice." + }, + { + "type": "title", + "bbox": [ + 0.234, + 0.564, + 0.321, + 0.575 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.07, + 0.581, + 0.486, + 0.628 + ], + "angle": 0, + "content": "[1] Pagad N. S. and Pradeep N., “Clinical named entity recognition methods: an overview”, Proceedings of the International Conference on Innovative Computing and Communications: Proceedings of ICICC 2021, Volume 2, pp. 151-165, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.63, + 0.485, + 0.676 + ], + "angle": 0, + "content": "[2] Navarro D. F., Ijaz K., Rezazadegan D., et al., \"Clinical named entity recognition and relation extraction using natural language processing of medical free text: A systematic review\", International Journal of Medical Informatics, vol. 177, 105122, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.679, + 0.487, + 0.724 + ], + "angle": 0, + "content": "[3] Durango M. C., Torres-Silva E. A. and Orozco-Duque A., “Named entity recognition in electronic health records: a methodological review”, Healthcare Informatics Research, vol. 29, no. 4, pp. 286-300, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.727, + 0.486, + 0.763 + ], + "angle": 0, + "content": "[4] Y. Xiang, Q. He, T. Xu, R. Hao, J. Hu and H. Zhang, \"Adaptive Transformer Attention and Multi-Scale Fusion for Spine 3D Segmentation\", arXiv preprint arXiv:2503.12853, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.765, + 0.485, + 0.811 + ], + "angle": 0, + "content": "[5] T. Xu, Y. Xiang, J. Du and H. Zhang, \"Cross-Scale Attention and Multi-Layer Feature Fusion YOLOv8 for Skin Disease Target Detection in Medical Images\", Journal of Computer Technology and Software, vol. 4, no. 2, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.813, + 0.486, + 0.849 + ], + "angle": 0, + "content": "[6] W. He, Y. Zhang, T. Xu, T. An, Y. Liang and B. Zhang, \"Object detection for medical image analysis: Insights from the RT-DETR model\", arXiv preprint arXiv:2501.16469, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.071, + 0.851, + 0.486, + 0.885 + ], + "angle": 0, + "content": "[7] M. Li, R. Hao, S. Shi, Z. Yu, Q. He and J. Zhan, “A CNN-Transformer Approach for Image-Text Multimodal Classification with Cross-Modal Feature Fusion”, 2025." + }, + { + "type": "list", + "bbox": [ + 0.07, + 0.581, + 0.487, + 0.885 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.067, + 0.928, + 0.114 + ], + "angle": 0, + "content": "[8] Q. Sun, \"Dynamic Optimization of Human-Computer Interaction Interfaces Using Graph Convolutional Networks and Q-Learning\", Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.116, + 0.928, + 0.151 + ], + "angle": 0, + "content": "[9] S. Duan, \"Systematic Analysis of User Perception for Interface Design Enhancement\", Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.154, + 0.929, + 0.188 + ], + "angle": 0, + "content": "[10] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion\", arXiv preprint arXiv:2502.03664, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.191, + 0.928, + 0.226 + ], + "angle": 0, + "content": "[11] A. Liang, “Personalized Multimodal Recommendations Framework Using Contrastive Learning”, Transactions on Computational and Scientific Methods, vol. 4, no. 11, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.228, + 0.929, + 0.274 + ], + "angle": 0, + "content": "[12] X. Zhou, Y. Zhang, Z. Wang, M. Lu and X. Liu, \"MAFN: multi-level attention fusion network for multimodal named entity recognition\", Multimedia Tools and Applications, vol. 83, no. 15, pp. 45047-45058, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.277, + 0.928, + 0.311 + ], + "angle": 0, + "content": "[13] Y. Xu and Y. Chen, \"Attention-based interactive multi-level feature fusion for named entity recognition\", Scientific Reports, vol. 15, no. 1, 3069, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.314, + 0.928, + 0.36 + ], + "angle": 0, + "content": "[14] Z. Tong, Q. Liu, H. Shi, Y. Xia, S. Wu and X. Y. Zhang, \"Semantics Fusion of Hierarchical Transformers for Multimodal Named Entity Recognition\", Proceedings of the International Conference on Intelligent Computing, pp. 414-426, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.363, + 0.928, + 0.408 + ], + "angle": 0, + "content": "[15] Y. Hu, Q. Chen, J. Du, et al., \"Improving large language models for clinical named entity recognition via prompt engineering\", Journal of the American Medical Informatics Association, vol. 31, no. 9, pp. 1812-1820, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.411, + 0.928, + 0.446 + ], + "angle": 0, + "content": "[16] R. Hao, Y. Xiang, J. Du, Q. He, J. Hu and T. Xu, “A Hybrid CNN-Transformer Model for Heart Disease Prediction Using Life History Data”, arXiv preprint arXiv:2503.02124, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.449, + 0.928, + 0.483 + ], + "angle": 0, + "content": "[17] J. Hu, Y. Xiang, Y. Lin, J. Du, H. Zhang and H. Liu, “Multi-Scale Transformer Architecture for Accurate Medical Image Classification”, arXiv preprint arXiv:2502.06243, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.486, + 0.928, + 0.521 + ], + "angle": 0, + "content": "[18] X. Li, Q. Lu, Y. Li, M. Li and Y. Qi, \"Optimized Unet with Attention Mechanism for Multi-Scale Semantic Segmentation\", arXiv preprint arXiv:2502.03813, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.524, + 0.928, + 0.58 + ], + "angle": 0, + "content": "[19] J. Wei, Y. Liu, X. Huang, X. Zhang, W. Liu and X. Yan, \"Self-Supervised Graph Neural Networks for Enhanced Feature Extraction in Heterogeneous Information Networks\", Proceedings of the 2024 5th International Conference on Machine Learning and Computer Application (ICMLCA), pp. 272-276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.583, + 0.928, + 0.618 + ], + "angle": 0, + "content": "[20] J. Liu, Y. Zhang, Y. Sheng, Y. Lou, H. Wang and B. Yang, “Context-Aware Rule Mining Using a Dynamic Transformer-Based Framework”, arXiv preprint arXiv:2503.11125, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.621, + 0.928, + 0.655 + ], + "angle": 0, + "content": "[21] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction\", Journal of Computer Technology and Software, vol. 4, no. 2, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.657, + 0.928, + 0.702 + ], + "angle": 0, + "content": "[22] Y. Deng, “A hybrid network congestion prediction method integrating association rules and LSTM for enhanced spatiotemporal forecasting”, Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.706, + 0.928, + 0.753 + ], + "angle": 0, + "content": "[23] X. Yan, W. Wang, M. Xiao, Y. Li and M. Gao, \"Survival prediction across diverse cancer types using neural networks\", Proceedings of the 2024 7th International Conference on Machine Vision and Applications, pp. 134-138, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.756, + 0.928, + 0.789 + ], + "angle": 0, + "content": "[24] K. Pakhale, “Comprehensive overview of named entity recognition: Models, domain-specific applications and challenges”, arXiv preprint arXiv:2309.14084, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.792, + 0.928, + 0.828 + ], + "angle": 0, + "content": "[25] M. Afshar, Y. Gao, D. Gupta, E. Croxford and D. Demner-Fushman, \"On the role of the UMLS in supporting diagnosis generation proposed by Large Language Models\", Journal of Biomedical Informatics, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.83, + 0.928, + 0.865 + ], + "angle": 0, + "content": "[26] V. S. Carmona, S. Jiang and B. Dong, “A Multilevel Analysis of PubMed-only BERT-based Biomedical Models”, Proceedings of the 6th Clinical Natural Language Processing Workshop, pp. 105-110, 2024." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.067, + 0.929, + 0.865 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_origin.pdf b/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a13337d31edccfb261ca1bd8002ac5963395c80a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/0a01c7e3-29d7-49b8-ab11-a052914cd3dc_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41262b5d9459a65e0681663f08723cdd443ca2b98997a29189f6ef3ec3a975f2 +size 441793 diff --git a/data/2025/2504_04xxx/2504.04385/full.md b/data/2025/2504_04xxx/2504.04385/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0d9d165fe0ab8b237c27ac863161a0c1b48665a3 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/full.md @@ -0,0 +1,183 @@ +# Pre-trained Language Models and Few-shot Learning for Medical Entity Extraction + +Xiaokai Wang Santa Clara University Santa Clara, USA + +Guiran Liu +San Francisco State University +San Francisco, USA + +Binrong Zhu +San Francisco State University +San Francisco, USA + +Jacky He Cornell University New York, USA + +Hongye Zheng +The Chinese University of Hong Kong +Hong Kong, China + +Hanlu Zhang* +Stevens Institute of Technology +Hoboken, USA + +Abstract-This study proposes a medical entity extraction method based on Transformer to enhance the information extraction capability of medical literature. Considering the professionalism and complexity of medical texts, we compare the performance of different pre-trained language models (BERT, BioBERT, PubMedBERT, ClinicalBERT) in medical entity extraction tasks. Experimental results show that PubMedBERT achieves the best performance $(F1\text{-score} = 88.8\%)$ , indicating that a language model pre-trained on biomedical literature is more effective in the medical domain. In addition, we analyze the impact of different entity extraction methods (CRF, Span-based, Seq2Seq) and find that the Span-based approach performs best in medical entity extraction tasks $(F1\text{-score} = 88.6\%)$ . It demonstrates superior accuracy in identifying entity boundaries. In low-resource scenarios, we further explore the application of Few-shot Learning in medical entity extraction. Experimental results show that even with only 10-shot training samples, the model achieves an F1-score of $79.1\%$ , verifying the effectiveness of Few-shot Learning under limited data conditions. This study confirms that the combination of pre-trained language models and Few-shot Learning can enhance the accuracy of medical entity extraction. Future research can integrate knowledge graphs and active learning strategies to improve the model's generalization and stability, providing a more effective solution for medical NLP research. + +Keywords- Natural Language Processing, medical named entity recognition, pre-trained language model, Few-shot Learning, information extraction, deep learning + +# I. INTRODUCTION + +Medical entity extraction is a key application of Natural Language Processing (NLP) in healthcare. With the rapid growth of biomedical research, the volume of medical literature is increasing exponentially. Each day, thousands of papers are added to databases such as PubMed, Medline, and Embase. Researchers must extract valuable information from this vast amount of data to support medical research, clinical decision-making, and drug development. However, traditional manual literature screening and analysis are time-consuming and labor-intensive, making it difficult to meet the demand for efficient information retrieval in modern medical research [1]. Entity extraction technology provides strong support for medical data mining by automatically identifying structured information + +from unstructured text, such as disease-drug relationships, gene-phenotype associations, and clinical treatment plans [2]. In recent years, deep learning, particularly the Transformer architecture, has significantly advanced NLP, improving the performance of medical entity extraction tasks. + +Traditional medical entity extraction methods mainly rely on rule-based techniques and classical machine learning models. Rule-based approaches analyze text using predefined regular expressions, knowledge base matching, and expert-defined grammar rules. While these methods achieve high accuracy in specific tasks, their generalization ability is limited, making them ineffective for handling complex syntactic structures and diverse language expressions in medical literature. However, medical texts contain highly specialized terminology and hierarchical semantic structures. Traditional models often struggle with feature selection in large-scale literature and fail to capture deep contextual information. Efficient and accurate medical entity extraction has therefore become a central research challenge in medical NLP [3]. + +The introduction of Transformer-based models offers a new solution for medical entity extraction. Pre-trained language models such as BERT, RoBERTa, BioBERT, and PubMedBERT leverage self-attention mechanisms to learn long-range dependencies and capture deep semantic representations in medical texts. Beyond NLP, Transformer-based and hybrid architectures have also demonstrated strong performance in other domains. In computer vision and medical imaging, they have been applied to tasks such as 3D spine segmentation [4], skin disease detection [5], and object detection in clinical scans [6], showing enhanced accuracy through attention mechanisms and multi-scale fusion. In the field of multimodal learning, Transformer-CNN architectures have enabled more effective image-text classification through cross-modal feature fusion [7]. Additionally, in human-computer interaction, Transformer-related models have been used for optimizing interface design [8] and improving user experience through graph-based learning and dynamic adaptation [9]. These successes highlight the adaptability and effectiveness of Transformer models across diverse application areas [10-11]. Transformer-based models, such as BioBERT, + +have significantly improved medical entity recognition, relationship extraction, and text classification tasks. Finetuning and transfer learning enhance model adaptability, enabling better understanding of complex medical language structures compared to traditional methods [12]. + +This study proposes a Transformer-based framework to automate and enhance accuracy in medical entity extraction. Effective extraction supports rapid identification of medical evidence, aids clinical decision-making, and accelerates biomedical discoveries. Transformer models also enable multitask learning and generative applications, including summarization and medical reasoning, thereby broadening their utility in medical NLP research [13]. As medical data expands, Transformer models will become increasingly important in precision medicine, diagnostics, and education. However, privacy and ethical issues require ongoing attention. Integrating multi-modal medical data and knowledge graphs with Transformer models may further enhance interpretability and scalability. This research aims to advance medical NLP by improving methods for processing extensive medical literature. + +# II. RELATED WORK + +The development of medical entity extraction has benefited greatly from advances in Transformer-based models and deep learning architectures. Notably, recent works have leveraged hierarchical and multimodal Transformer models to enhance named entity recognition (NER) performance. Tong et al. [14] proposed a semantic fusion framework using hierarchical Transformers, which enables the integration of diverse contextual representations. Similarly, prompt-based strategies have been introduced to optimize large language models for specialized entity extraction tasks, achieving better adaptability to domain-specific terminology and limited data scenarios [15]. + +Several studies have applied Transformer mechanisms within hybrid or multi-scale frameworks to strengthen deep representation learning. For instance, Hao et al. [16] designed a + +hybrid convolutional and Transformer-based architecture that captures both local and global dependencies in sequential data, improving the generalization of deep learning models. Multiscale Transformer models further enhance performance by capturing hierarchical information, enabling more nuanced feature extraction [17]. Attention mechanisms, embedded within optimized neural architectures, also contribute to improved semantic segmentation and context-aware classification [18]. + +Beyond Transformer-based models, other neural network enhancements have significantly contributed to the advancement of deep feature extraction. Graph neural frameworks, particularly those that utilize self-supervised learning techniques, have been proposed to enhance the representation capabilities of complex data environments [19]. Additionally, dynamic rule mining mechanisms based on Transformer variants have been developed to facilitate adaptive pattern recognition in unstructured data [20]. + +Sequential modeling techniques also contribute valuable insights. LSTM-based prediction models have demonstrated robustness in handling time-dependent data and adaptive scheduling, providing lessons in efficient learning from limited sequences [21]. Combined with pattern discovery methods, such frameworks support enhanced spatiotemporal learning [22]. Additionally, deep neural network architectures have been effectively used to develop robust predictive systems, emphasizing the importance of carefully designed learning structures in handling heterogeneous and high-dimensional data [23]. + +# III. METHOD + +This study proposes a medical literature information extraction model based on Transformer structure, which aims to automatically extract key entities and their relationships from medical texts. The self-attention mechanism architecture in Transformer is shown in Figure 1. + +![](images/9d3604a26a92766c2afa345c58e441e8a3c3bd362cbb6278471a9b45c536d4a7.jpg) +Figure 1. Basic structure diagram of self-attention mechanism + +Given a medical text $X = \{x_{1}, x_{2}, \dots, x_{n}\}$ , where $x_{i}$ represents the i-th word, we first map the text to a high-dimensional vector space using the pre-trained medical domain BERT (such as BioBERT or PubMedBERT) to get the context representation $H = \{h_{1}, h_{2}, \dots, h_{n}\}$ for each word. Transformer computes the relationships between words using a self-attention mechanism. The core calculation is as follows: + +$$ +\operatorname {A t t e n t i o n} (Q, K, V) = \operatorname {s o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {d _ {k}}}\right) V +$$ + +Where $Q, K, V$ is the query matrix, the key matrix and the value matrix respectively, and $d_k$ is the scaling factor to stabilize the gradient update. Through multi-layer Transformer calculation, deep representation $H$ of medical text can be obtained for subsequent information extraction tasks. + +In the Medical named Entity recognition (NER) task [24], we use sequence annotation to feed Transformer's output $\mathrm{H}$ into a Conditional Random Field (CRF) layer to capture medical entity dependencies [25]. For a given label sequence $Y = \{y_{1}, y_{2}, \dots, y_{n}\}$ , define the conditional probability: + +$$ +P (Y \mid X) = \frac {\exp \left(\sum_ {i = 1} ^ {n} W y _ {i - 1} , y _ {i} + h _ {i} ^ {T} W y _ {i}\right)}{\sum_ {Y ^ {\prime}} \exp \left(\sum_ {i = 1} ^ {n} W y ^ {\prime} _ {i - 1} y ^ {\prime} _ {i} + h _ {i} ^ {T} W y ^ {\prime} _ {i}\right)} +$$ + +Where W is the state transition matrix that controls dependencies between labels. The loss function is optimized with Negative Log-Likelihood (NLL): + +$$ +L _ {N E R} = - \sum_ {i = 1} ^ {n} \log P (Y \mid X) +$$ + +In relation extraction task, we adopt two-channel Transformer structure to independently model entity to $(e_1,e_2)$ . First, we compute for each entity its context representation: + +$$ +h _ {e _ {i}} = \frac {1}{| e _ {i} |} \sum_ {x _ {j} \in e _ {i}} h _ {j} +$$ + +Then, the entity pair representation is concatenated and the entity relationship score is calculated through a fully connected layer: + +$$ +r _ {e _ {1}, e _ {2}} = W _ {r} \left[ h _ {e _ {1}} \right] \left[ h _ {e _ {2}} \right] + b _ {r} +$$ + +Finally, cross-entropy loss is used to optimize relational classification: + +$$ +L _ {R E} = - \sum_ {\left(e _ {1}, e _ {2}\right)} y _ {e 1, e 2} \log P \left(r _ {e _ {1}, e _ {2}}\right) +$$ + +Where, $y_{e1,e2}$ is the true label of the entity relationship, and $P(r_{e_1,e_2})$ is the probability of the relationship class predicted by the model. Through the joint optimization of NER and RE tasks, the entity recognition and relationship extraction + +promote each other and improve the accuracy of medical literature information extraction. + +# IV. EXPERIMENT + +# A. Datasets + +This dataset comprises 6,881 disease entities extracted from PubMed abstracts, all of which were validated by biomedical experts and categorized into four groups—Specific, Composite, Modifier, and Undetermined Diseases. The data is split into training (5,064 instances), validation (787 instances), and test (1,030 instances) sets. A BIO tagging scheme (B-Begin, I-Inside, O-Outside) is used to clearly delineate entity boundaries, and all entities are aligned with the Unified Medical Language System (UMLS). Preprocessing steps include tokenization, stop-word removal, normalization, and Word Piece tokenization for medical terms. To address data imbalance, a Disease Co-occurrence Network was employed alongside data augmentation techniques such as synonym substitution and entity masking. These methods collectively bolster model performance in medical entity extraction tasks. + +# B. Experimental Results + +First, this paper gives the comparative experimental results of different pre-training models, as shown in Table 1. + +Table 1. Performance comparison of different pre-trained language models on medical literature information extraction + +
ModelPrecisionRecallF1-Score
Bert85.2%82.7%83.9%
BioBert88.4%86.1%87.2%
PubmedBert89.7%87.9%88.8%
ClinicalBert87.9%85.6%86.7%
+ +Experimental results indicate significant differences in the performance of various pre-trained language models in medical entity extraction tasks [26]. Among them, PubMedBERT achieves the highest performance across all evaluation metrics (F1-score = 88.8%). This suggests that its pre-training strategy on large-scale biomedical literature enhances its adaptability to the textual characteristics of medical texts. In comparison, BioBERT also demonstrates high accuracy (88.4%) and recall (86.1%), ranking second only to PubMedBERT. This indicates that BioBERT maintains strong generalization ability in specific medical entity extraction tasks. ClinicalBERT performs slightly worse than BioBERT and PubMedBERT, with a relatively lower recall (85.6%) despite achieving high accuracy (87.9%). This may be attributed to its pre-training on electronic health records (EHRs), which differ in textual style and structure from medical literature. + +General BERT exhibits the weakest performance, with an F1-score of only $83.9\%$ . This result highlights the limitations of general-purpose pre-trained language models when processing specialized medical texts. BERT is pre-trained on a general corpus and lacks domain-specific terminology and contextual understanding, making it less effective for medical entity extraction. In contrast, BioBERT and PubMedBERT, pretrained on PubMed literature, improve their comprehension of medical terms, resulting in superior performance. PubMedBERT's advantage over BioBERT may stem from its training approach. While BioBERT fine-tunes BERT on biomedical texts, PubMedBERT is trained from scratch on + +medical literature. This allows PubMedBERT to capture the linguistic distribution and structural patterns of biomedical texts more comprehensively. + +The findings indicate that choosing domain-specific pretrained language models substantially enhances performance in medical entity extraction tasks. PubMedBERT and BioBERT outperform other models in both accuracy and recall. This suggests that generic BERT alone is inadequate for medical Natural Language Processing (NLP) tasks, and domain-adaptive pre-training strategies are crucial for improving model performance. Future research could delve deeper into integrating pre-trained language models with knowledge graph augmentation or multi-task learning to further enhance medical entity extraction capabilities. + +Further, this paper also provides experimental comparative analysis of different named entity recognition methods based on Transformer, and the experimental results are shown in Table 2. + +Table 2. Performance comparison of different named entity recognition methods based on Transformer + +
MethodPrecisionRecallF1-Score
Transformer + CRF88.1%86.3%87.2%
Transformer + Span-based89.4%87.8%88.6%
Transformer + Seq2Seq86.7%85.2%85.9%
+ +Experimental results indicate that different entity extraction methods exhibit varying performance within Transformer-based architectures. Among them, the Span-based approach achieves the highest F1-score (88.6%), demonstrating its superior ability to accurately identify entity boundaries in medical texts. Compared to CRF-based token-by-token sequential labeling, the Span-based method directly predicts entity boundaries, making it more effective in extracting complex medical terms and multi-word expressions. + +The Transformer + CRF method scores slightly lower $(87.2\%)$ but excels at capturing dependencies between entities, making it suitable for structured medical texts. Conversely, the Transformer + Seq2Seq method yields a lower F1-score $(85.9\%)$ , likely due to the decoder's errors in boundary recognition during text generation. These findings suggest that Span-based and CRF methods are more effective for medical entity extraction. Future studies might explore integrating Span-based techniques with CRF to enhance accuracy and stability. Additionally, the study explores Few-shot Learning for medical entity recognition under low-resource conditions, with experimental results detailed in Figure 2. + +Experimental results indicate that the performance of Few-shot Learning in medical entity extraction improves as the number of training samples increases. When training data is extremely limited (1-shot or 5-shot), the model's precision, recall, and F1-score remain low, with the F1-score ranging between $60\%$ and $72\%$ . This suggests that the model struggles to accurately identify medical entities under severe data scarcity. The primary challenge lies in the abundance of + +specialized terminology in medical texts, making it difficult for the model to learn effective patterns from minimal data. However, with 10-shot training, performance improves significantly, with the F1-score reaching $79.1\%$ . This result demonstrates that even a small increase in labeled data can substantially enhance the model's learning ability. This aligns with the fundamental characteristic of Few-shot Learning, which efficiently leverages limited samples. + +![](images/19658615318995e7d3899b2ed34c9fd6d6f3bd2d7e27c2b33876b9e924816faa.jpg) +Figure 2. Few-shot Learning Performance on Medical NER + +As the number of training samples increases to 20-shot and beyond, the model's precision, recall, and F1-score continue to rise, eventually plateauing after 50-shot. At 50-shot, the F1-score reaches $88.1\%$ and approaches $89.9\%$ at 100-shot. This indicates that with a sufficient number of training samples, the model can effectively learn entity representations and achieve high recognition accuracy. However, performance gains slow after 50-shot, suggesting that while Few-shot Learning enhances model performance up to a certain threshold, its marginal benefit diminishes as data volume increases. These findings suggest that in medical entity extraction tasks, Few-shot Learning is particularly effective in low-resource settings, whereas traditional supervised learning may offer greater stability when ample labeled data is available. + +Overall, this experiment confirms the effectiveness of Few-shot Learning in medical entity extraction, particularly when labeled data is scarce. Even with a limited number of samples, the model demonstrates significant performance improvements. Future research could explore more advanced Few-shot Learning techniques, such as metric learning-based methods, GPT variants optimized for prompt design, or small-sample learning approaches integrated with knowledge graphs. These strategies could further enhance model generalization, enabling more effective applications in complex medical NLP tasks. + +# V. CONCLUSION + +This study proposes a medical entity extraction method based on Transformer and examines the effects of different pretrained language models, extraction methods, and Few-shot Learning in low-resource scenarios. Experimental results indicate that PubMedBERT and BioBERT outperform other models in medical text processing, significantly improving entity extraction accuracy. Compared to traditional sequence + +labeling approaches, the Span-based entity extraction method achieves the best performance, demonstrating that directly predicting entity boundaries enhances the recognition of complex medical terms. Additionally, Few-shot Learning exhibits strong adaptability in low-resource conditions, achieving high F1-scores with minimal training data. This highlights its potential for medical NLP applications. + +Despite these promising results, several aspects require further optimization. While the Transformer architecture enhances medical entity extraction, it incurs high computational costs, particularly on large-scale datasets. Future research could explore Knowledge Distillation or Lightweight Transformer variants to improve computational efficiency. This indicates that integrating Active Learning or Data Augmentation strategies may enhance model performance more efficiently. Additionally, medical texts often contain complex contextual relationships. Incorporating Knowledge Graphs into Transformer-based models could further strengthen their understanding of medical terminology. + +Future studies could expand the application of Few-shot Learning in medical NLP, such as developing more effective prompt-based learning techniques. This would enable large language models (LLMs) to achieve high-precision entity extraction with minimal labeled data. Furthermore, real-world medical text data often involves privacy concerns. Optimizing medical entity extraction models while ensuring data security remains a critical research challenge. Finally, by refining Transformer architectures, integrating external medical knowledge, and introducing adaptive learning strategies, medical entity extraction technology could play a more significant role in clinical medicine, drug discovery, and medical literature analysis. These advancements would provide intelligent and efficient tools to support medical research and practice. + +# REFERENCES + +[1] Pagad N. S. and Pradeep N., “Clinical named entity recognition methods: an overview”, Proceedings of the International Conference on Innovative Computing and Communications: Proceedings of ICICC 2021, Volume 2, pp. 151-165, 2022. +[2] Navarro D. F., Ijaz K., Rezazadegan D., et al., "Clinical named entity recognition and relation extraction using natural language processing of medical free text: A systematic review", International Journal of Medical Informatics, vol. 177, 105122, 2023. +[3] Durango M. C., Torres-Silva E. A. and Orozco-Duque A., “Named entity recognition in electronic health records: a methodological review”, Healthcare Informatics Research, vol. 29, no. 4, pp. 286-300, 2023. +[4] Y. Xiang, Q. He, T. Xu, R. Hao, J. Hu and H. Zhang, "Adaptive Transformer Attention and Multi-Scale Fusion for Spine 3D Segmentation", arXiv preprint arXiv:2503.12853, 2025. +[5] T. Xu, Y. Xiang, J. Du and H. Zhang, "Cross-Scale Attention and Multi-Layer Feature Fusion YOLOv8 for Skin Disease Target Detection in Medical Images", Journal of Computer Technology and Software, vol. 4, no. 2, 2025. +[6] W. He, Y. Zhang, T. Xu, T. An, Y. Liang and B. Zhang, "Object detection for medical image analysis: Insights from the RT-DETR model", arXiv preprint arXiv:2501.16469, 2025. +[7] M. Li, R. Hao, S. Shi, Z. Yu, Q. He and J. Zhan, “A CNN-Transformer Approach for Image-Text Multimodal Classification with Cross-Modal Feature Fusion”, 2025. + +[8] Q. Sun, "Dynamic Optimization of Human-Computer Interaction Interfaces Using Graph Convolutional Networks and Q-Learning", Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025. +[9] S. Duan, "Systematic Analysis of User Perception for Interface Design Enhancement", Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024. +[10] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, "Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion", arXiv preprint arXiv:2502.03664, 2025. +[11] A. Liang, “Personalized Multimodal Recommendations Framework Using Contrastive Learning”, Transactions on Computational and Scientific Methods, vol. 4, no. 11, 2024. +[12] X. Zhou, Y. Zhang, Z. Wang, M. Lu and X. Liu, "MAFN: multi-level attention fusion network for multimodal named entity recognition", Multimedia Tools and Applications, vol. 83, no. 15, pp. 45047-45058, 2024. +[13] Y. Xu and Y. Chen, "Attention-based interactive multi-level feature fusion for named entity recognition", Scientific Reports, vol. 15, no. 1, 3069, 2025. +[14] Z. Tong, Q. Liu, H. Shi, Y. Xia, S. Wu and X. Y. Zhang, "Semantics Fusion of Hierarchical Transformers for Multimodal Named Entity Recognition", Proceedings of the International Conference on Intelligent Computing, pp. 414-426, 2024. +[15] Y. Hu, Q. Chen, J. Du, et al., "Improving large language models for clinical named entity recognition via prompt engineering", Journal of the American Medical Informatics Association, vol. 31, no. 9, pp. 1812-1820, 2024. +[16] R. Hao, Y. Xiang, J. Du, Q. He, J. Hu and T. Xu, “A Hybrid CNN-Transformer Model for Heart Disease Prediction Using Life History Data”, arXiv preprint arXiv:2503.02124, 2025. +[17] J. Hu, Y. Xiang, Y. Lin, J. Du, H. Zhang and H. Liu, “Multi-Scale Transformer Architecture for Accurate Medical Image Classification”, arXiv preprint arXiv:2502.06243, 2025. +[18] X. Li, Q. Lu, Y. Li, M. Li and Y. Qi, "Optimized Unet with Attention Mechanism for Multi-Scale Semantic Segmentation", arXiv preprint arXiv:2502.03813, 2025. +[19] J. Wei, Y. Liu, X. Huang, X. Zhang, W. Liu and X. Yan, "Self-Supervised Graph Neural Networks for Enhanced Feature Extraction in Heterogeneous Information Networks", Proceedings of the 2024 5th International Conference on Machine Learning and Computer Application (ICMLCA), pp. 272-276, 2024. +[20] J. Liu, Y. Zhang, Y. Sheng, Y. Lou, H. Wang and B. Yang, “Context-Aware Rule Mining Using a Dynamic Transformer-Based Framework”, arXiv preprint arXiv:2503.11125, 2025. +[21] J. Zhan, "Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction", Journal of Computer Technology and Software, vol. 4, no. 2, 2025. +[22] Y. Deng, “A hybrid network congestion prediction method integrating association rules and LSTM for enhanced spatiotemporal forecasting”, Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025. +[23] X. Yan, W. Wang, M. Xiao, Y. Li and M. Gao, "Survival prediction across diverse cancer types using neural networks", Proceedings of the 2024 7th International Conference on Machine Vision and Applications, pp. 134-138, 2024. +[24] K. Pakhale, “Comprehensive overview of named entity recognition: Models, domain-specific applications and challenges”, arXiv preprint arXiv:2309.14084, 2023. +[25] M. Afshar, Y. Gao, D. Gupta, E. Croxford and D. Demner-Fushman, "On the role of the UMLS in supporting diagnosis generation proposed by Large Language Models", Journal of Biomedical Informatics, 2024. +[26] V. S. Carmona, S. Jiang and B. Dong, “A Multilevel Analysis of PubMed-only BERT-based Biomedical Models”, Proceedings of the 6th Clinical Natural Language Processing Workshop, pp. 105-110, 2024. \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04385/images/19658615318995e7d3899b2ed34c9fd6d6f3bd2d7e27c2b33876b9e924816faa.jpg b/data/2025/2504_04xxx/2504.04385/images/19658615318995e7d3899b2ed34c9fd6d6f3bd2d7e27c2b33876b9e924816faa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a9e57be91cd23947f5243b71dd15e369b942dd1 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/19658615318995e7d3899b2ed34c9fd6d6f3bd2d7e27c2b33876b9e924816faa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cae40625de3c9e1cfe5574d12596964b74f7cf01cb8ff23711459dbe1efa1f82 +size 32665 diff --git a/data/2025/2504_04xxx/2504.04385/images/37b63494711363b2f6809f3ea20390de142a2e4cb66232eea8691aece3eede5a.jpg b/data/2025/2504_04xxx/2504.04385/images/37b63494711363b2f6809f3ea20390de142a2e4cb66232eea8691aece3eede5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b215ee037230d90381800fcf9df1584eabd50fe --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/37b63494711363b2f6809f3ea20390de142a2e4cb66232eea8691aece3eede5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:368b5b0adfda43b0542ef60a018c277bea4d1a83889daabebf83b6e6b1d5beed +size 3449 diff --git a/data/2025/2504_04xxx/2504.04385/images/4963fc375c3ee3c31101fb038418a8f027e56712e98879de856ce5f64e131925.jpg b/data/2025/2504_04xxx/2504.04385/images/4963fc375c3ee3c31101fb038418a8f027e56712e98879de856ce5f64e131925.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7b753f00606091cb9402eca2fae110fce240881 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/4963fc375c3ee3c31101fb038418a8f027e56712e98879de856ce5f64e131925.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:696d592ec90ba65e1950832fee5c0ab5e6b9aa4294d1ab67341f19fbffecbc09 +size 3633 diff --git a/data/2025/2504_04xxx/2504.04385/images/776092a475e2b8a912e1ba6785c1da6c5327267b45ddd23298ac0e35218f7741.jpg b/data/2025/2504_04xxx/2504.04385/images/776092a475e2b8a912e1ba6785c1da6c5327267b45ddd23298ac0e35218f7741.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f4cb17b8d3ddc8ffe03cf4a7b774b09b4f00e45 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/776092a475e2b8a912e1ba6785c1da6c5327267b45ddd23298ac0e35218f7741.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8dad2f3ca2cef8b63caa1530c932247af73ba82ac236424763a49438286e508 +size 7804 diff --git a/data/2025/2504_04xxx/2504.04385/images/9d3604a26a92766c2afa345c58e441e8a3c3bd362cbb6278471a9b45c536d4a7.jpg b/data/2025/2504_04xxx/2504.04385/images/9d3604a26a92766c2afa345c58e441e8a3c3bd362cbb6278471a9b45c536d4a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45ac297b3b291b43f2289c9878a92f87e2763523 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/9d3604a26a92766c2afa345c58e441e8a3c3bd362cbb6278471a9b45c536d4a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c28dfdbe6cad1570e9561a3b48635bf6b597bb99b024fa42702e4bc31f476c84 +size 52297 diff --git a/data/2025/2504_04xxx/2504.04385/images/a034c5803646e35b5eefd953e09391d8b22478718f3167ac68e1c50d13b253ae.jpg b/data/2025/2504_04xxx/2504.04385/images/a034c5803646e35b5eefd953e09391d8b22478718f3167ac68e1c50d13b253ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7d6de49602bc411fb2a3d5768fec64643eecdec --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/a034c5803646e35b5eefd953e09391d8b22478718f3167ac68e1c50d13b253ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30a70dec92d345ac2fb42a02901a69a812828cd9e1ec5d993cb95841e2a443c1 +size 4876 diff --git a/data/2025/2504_04xxx/2504.04385/images/c20347969caa21737f64745163bc45173ab8b668ea1273abb52d6c61919f2057.jpg b/data/2025/2504_04xxx/2504.04385/images/c20347969caa21737f64745163bc45173ab8b668ea1273abb52d6c61919f2057.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28aeda4c64fb6e5418a848e0fc73f63cd5f97448 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/c20347969caa21737f64745163bc45173ab8b668ea1273abb52d6c61919f2057.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f5b626979c2af6ffca0d43fadd9304309b8cb6176ad5e7808f8ac4d131551fd +size 4678 diff --git a/data/2025/2504_04xxx/2504.04385/images/c85b7124e86f453b9452fcc538f804ff2969528d9cb8101a4c90216329642fcc.jpg b/data/2025/2504_04xxx/2504.04385/images/c85b7124e86f453b9452fcc538f804ff2969528d9cb8101a4c90216329642fcc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82db93e5e3a4de911606b8baf8b7172bae4d576f --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/c85b7124e86f453b9452fcc538f804ff2969528d9cb8101a4c90216329642fcc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6485b2f5e1fef7aacd8290184e002f4d34bde95c1f8a970c3597aa515c7d3cb7 +size 25551 diff --git a/data/2025/2504_04xxx/2504.04385/images/f369296a1337c06c0f7fcb9a051fbf46e4453366724355b63c834683d53a134b.jpg b/data/2025/2504_04xxx/2504.04385/images/f369296a1337c06c0f7fcb9a051fbf46e4453366724355b63c834683d53a134b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a78105dcecf5876bdfc6820f7f21bc9d97d32bc --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/f369296a1337c06c0f7fcb9a051fbf46e4453366724355b63c834683d53a134b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e546d5028b220443b6a66b47922124470d0d7f7607b339a6eb7d8a5445bb812d +size 24711 diff --git a/data/2025/2504_04xxx/2504.04385/images/f63c9f1777fc536eb5fce92a53a025da029f12b4081584b5f1ecca27af281aa6.jpg b/data/2025/2504_04xxx/2504.04385/images/f63c9f1777fc536eb5fce92a53a025da029f12b4081584b5f1ecca27af281aa6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0be386f0a119d271cc9a8e2acee5fcb77dc9fd36 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/images/f63c9f1777fc536eb5fce92a53a025da029f12b4081584b5f1ecca27af281aa6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e62327917a0bfde8dbaf0ed3ecf1d70d577f10d7d5f11dd1607c7d9c3319b259 +size 12947 diff --git a/data/2025/2504_04xxx/2504.04385/layout.json b/data/2025/2504_04xxx/2504.04385/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..33425995eea0f27f8675c5cfd554fe65b669d762 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04385/layout.json @@ -0,0 +1,3715 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 48, + 54, + 563, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 54, + 563, + 109 + ], + "spans": [ + { + "bbox": [ + 48, + 54, + 563, + 109 + ], + "type": "text", + "content": "Pre-trained Language Models and Few-shot Learning for Medical Entity Extraction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 114, + 167, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 114, + 167, + 151 + ], + "spans": [ + { + "bbox": [ + 82, + 114, + 167, + 151 + ], + "type": "text", + "content": "Xiaokai Wang Santa Clara University Santa Clara, USA" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 248, + 114, + 362, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 248, + 114, + 362, + 149 + ], + "spans": [ + { + "bbox": [ + 248, + 114, + 362, + 149 + ], + "type": "text", + "content": "Guiran Liu \nSan Francisco State University \nSan Francisco, USA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 429, + 114, + 542, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 114, + 542, + 149 + ], + "spans": [ + { + "bbox": [ + 429, + 114, + 542, + 149 + ], + "type": "text", + "content": "Binrong Zhu \nSan Francisco State University \nSan Francisco, USA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 176, + 159, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 176, + 159, + 210 + ], + "spans": [ + { + "bbox": [ + 89, + 176, + 159, + 210 + ], + "type": "text", + "content": "Jacky He Cornell University New York, USA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 235, + 176, + 376, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 176, + 376, + 212 + ], + "spans": [ + { + "bbox": [ + 235, + 176, + 376, + 212 + ], + "type": "text", + "content": "Hongye Zheng \nThe Chinese University of Hong Kong \nHong Kong, China" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 429, + 176, + 544, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 176, + 544, + 211 + ], + "spans": [ + { + "bbox": [ + 429, + 176, + 544, + 211 + ], + "type": "text", + "content": "Hanlu Zhang* \nStevens Institute of Technology \nHoboken, USA" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "spans": [ + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "text", + "content": "Abstract-This study proposes a medical entity extraction method based on Transformer to enhance the information extraction capability of medical literature. Considering the professionalism and complexity of medical texts, we compare the performance of different pre-trained language models (BERT, BioBERT, PubMedBERT, ClinicalBERT) in medical entity extraction tasks. Experimental results show that PubMedBERT achieves the best performance " + }, + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "inline_equation", + "content": "(F1\\text{-score} = 88.8\\%)" + }, + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "text", + "content": ", indicating that a language model pre-trained on biomedical literature is more effective in the medical domain. In addition, we analyze the impact of different entity extraction methods (CRF, Span-based, Seq2Seq) and find that the Span-based approach performs best in medical entity extraction tasks " + }, + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "inline_equation", + "content": "(F1\\text{-score} = 88.6\\%)" + }, + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "text", + "content": ". It demonstrates superior accuracy in identifying entity boundaries. In low-resource scenarios, we further explore the application of Few-shot Learning in medical entity extraction. Experimental results show that even with only 10-shot training samples, the model achieves an F1-score of " + }, + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "inline_equation", + "content": "79.1\\%" + }, + { + "bbox": [ + 42, + 262, + 297, + 510 + ], + "type": "text", + "content": ", verifying the effectiveness of Few-shot Learning under limited data conditions. This study confirms that the combination of pre-trained language models and Few-shot Learning can enhance the accuracy of medical entity extraction. Future research can integrate knowledge graphs and active learning strategies to improve the model's generalization and stability, providing a more effective solution for medical NLP research." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 43, + 520, + 296, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 520, + 296, + 552 + ], + "spans": [ + { + "bbox": [ + 43, + 520, + 296, + 552 + ], + "type": "text", + "content": "Keywords- Natural Language Processing, medical named entity recognition, pre-trained language model, Few-shot Learning, information extraction, deep learning" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 559, + 216, + 570 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 559, + 216, + 570 + ], + "spans": [ + { + "bbox": [ + 132, + 559, + 216, + 570 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 43, + 574, + 297, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 574, + 297, + 717 + ], + "spans": [ + { + "bbox": [ + 43, + 574, + 297, + 717 + ], + "type": "text", + "content": "Medical entity extraction is a key application of Natural Language Processing (NLP) in healthcare. With the rapid growth of biomedical research, the volume of medical literature is increasing exponentially. Each day, thousands of papers are added to databases such as PubMed, Medline, and Embase. Researchers must extract valuable information from this vast amount of data to support medical research, clinical decision-making, and drug development. However, traditional manual literature screening and analysis are time-consuming and labor-intensive, making it difficult to meet the demand for efficient information retrieval in modern medical research [1]. Entity extraction technology provides strong support for medical data mining by automatically identifying structured information" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 262, + 567, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 262, + 567, + 317 + ], + "spans": [ + { + "bbox": [ + 313, + 262, + 567, + 317 + ], + "type": "text", + "content": "from unstructured text, such as disease-drug relationships, gene-phenotype associations, and clinical treatment plans [2]. In recent years, deep learning, particularly the Transformer architecture, has significantly advanced NLP, improving the performance of medical entity extraction tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 323, + 567, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 323, + 567, + 475 + ], + "spans": [ + { + "bbox": [ + 313, + 323, + 567, + 475 + ], + "type": "text", + "content": "Traditional medical entity extraction methods mainly rely on rule-based techniques and classical machine learning models. Rule-based approaches analyze text using predefined regular expressions, knowledge base matching, and expert-defined grammar rules. While these methods achieve high accuracy in specific tasks, their generalization ability is limited, making them ineffective for handling complex syntactic structures and diverse language expressions in medical literature. However, medical texts contain highly specialized terminology and hierarchical semantic structures. Traditional models often struggle with feature selection in large-scale literature and fail to capture deep contextual information. Efficient and accurate medical entity extraction has therefore become a central research challenge in medical NLP [3]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 481, + 567, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 481, + 567, + 711 + ], + "spans": [ + { + "bbox": [ + 313, + 481, + 567, + 711 + ], + "type": "text", + "content": "The introduction of Transformer-based models offers a new solution for medical entity extraction. Pre-trained language models such as BERT, RoBERTa, BioBERT, and PubMedBERT leverage self-attention mechanisms to learn long-range dependencies and capture deep semantic representations in medical texts. Beyond NLP, Transformer-based and hybrid architectures have also demonstrated strong performance in other domains. In computer vision and medical imaging, they have been applied to tasks such as 3D spine segmentation [4], skin disease detection [5], and object detection in clinical scans [6], showing enhanced accuracy through attention mechanisms and multi-scale fusion. In the field of multimodal learning, Transformer-CNN architectures have enabled more effective image-text classification through cross-modal feature fusion [7]. Additionally, in human-computer interaction, Transformer-related models have been used for optimizing interface design [8] and improving user experience through graph-based learning and dynamic adaptation [9]. These successes highlight the adaptability and effectiveness of Transformer models across diverse application areas [10-11]. Transformer-based models, such as BioBERT," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 43, + 54, + 298, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 54, + 298, + 109 + ], + "spans": [ + { + "bbox": [ + 43, + 54, + 298, + 109 + ], + "type": "text", + "content": "have significantly improved medical entity recognition, relationship extraction, and text classification tasks. Finetuning and transfer learning enhance model adaptability, enabling better understanding of complex medical language structures compared to traditional methods [12]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 43, + 114, + 298, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 114, + 298, + 278 + ], + "spans": [ + { + "bbox": [ + 43, + 114, + 298, + 278 + ], + "type": "text", + "content": "This study proposes a Transformer-based framework to automate and enhance accuracy in medical entity extraction. Effective extraction supports rapid identification of medical evidence, aids clinical decision-making, and accelerates biomedical discoveries. Transformer models also enable multitask learning and generative applications, including summarization and medical reasoning, thereby broadening their utility in medical NLP research [13]. As medical data expands, Transformer models will become increasingly important in precision medicine, diagnostics, and education. However, privacy and ethical issues require ongoing attention. Integrating multi-modal medical data and knowledge graphs with Transformer models may further enhance interpretability and scalability. This research aims to advance medical NLP by improving methods for processing extensive medical literature." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 129, + 287, + 217, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 287, + 217, + 297 + ], + "spans": [ + { + "bbox": [ + 129, + 287, + 217, + 297 + ], + "type": "text", + "content": "II. RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 43, + 301, + 298, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 301, + 298, + 422 + ], + "spans": [ + { + "bbox": [ + 43, + 301, + 298, + 422 + ], + "type": "text", + "content": "The development of medical entity extraction has benefited greatly from advances in Transformer-based models and deep learning architectures. Notably, recent works have leveraged hierarchical and multimodal Transformer models to enhance named entity recognition (NER) performance. Tong et al. [14] proposed a semantic fusion framework using hierarchical Transformers, which enables the integration of diverse contextual representations. Similarly, prompt-based strategies have been introduced to optimize large language models for specialized entity extraction tasks, achieving better adaptability to domain-specific terminology and limited data scenarios [15]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 43, + 427, + 298, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 427, + 298, + 461 + ], + "spans": [ + { + "bbox": [ + 43, + 427, + 298, + 461 + ], + "type": "text", + "content": "Several studies have applied Transformer mechanisms within hybrid or multi-scale frameworks to strengthen deep representation learning. For instance, Hao et al. [16] designed a" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 314, + 54, + 568, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 54, + 568, + 152 + ], + "spans": [ + { + "bbox": [ + 314, + 54, + 568, + 152 + ], + "type": "text", + "content": "hybrid convolutional and Transformer-based architecture that captures both local and global dependencies in sequential data, improving the generalization of deep learning models. Multiscale Transformer models further enhance performance by capturing hierarchical information, enabling more nuanced feature extraction [17]. Attention mechanisms, embedded within optimized neural architectures, also contribute to improved semantic segmentation and context-aware classification [18]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 157, + 568, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 157, + 568, + 257 + ], + "spans": [ + { + "bbox": [ + 314, + 157, + 568, + 257 + ], + "type": "text", + "content": "Beyond Transformer-based models, other neural network enhancements have significantly contributed to the advancement of deep feature extraction. Graph neural frameworks, particularly those that utilize self-supervised learning techniques, have been proposed to enhance the representation capabilities of complex data environments [19]. Additionally, dynamic rule mining mechanisms based on Transformer variants have been developed to facilitate adaptive pattern recognition in unstructured data [20]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 262, + 568, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 262, + 568, + 382 + ], + "spans": [ + { + "bbox": [ + 314, + 262, + 568, + 382 + ], + "type": "text", + "content": "Sequential modeling techniques also contribute valuable insights. LSTM-based prediction models have demonstrated robustness in handling time-dependent data and adaptive scheduling, providing lessons in efficient learning from limited sequences [21]. Combined with pattern discovery methods, such frameworks support enhanced spatiotemporal learning [22]. Additionally, deep neural network architectures have been effectively used to develop robust predictive systems, emphasizing the importance of carefully designed learning structures in handling heterogeneous and high-dimensional data [23]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 413, + 391, + 472, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 391, + 472, + 401 + ], + "spans": [ + { + "bbox": [ + 413, + 391, + 472, + 401 + ], + "type": "text", + "content": "III. METHOD" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 407, + 568, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 407, + 568, + 463 + ], + "spans": [ + { + "bbox": [ + 314, + 407, + 568, + 463 + ], + "type": "text", + "content": "This study proposes a medical literature information extraction model based on Transformer structure, which aims to automatically extract key entities and their relationships from medical texts. The self-attention mechanism architecture in Transformer is shown in Figure 1." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 140, + 472, + 517, + 670 + ], + "blocks": [ + { + "bbox": [ + 140, + 472, + 517, + 670 + ], + "lines": [ + { + "bbox": [ + 140, + 472, + 517, + 670 + ], + "spans": [ + { + "bbox": [ + 140, + 472, + 517, + 670 + ], + "type": "image", + "image_path": "9d3604a26a92766c2afa345c58e441e8a3c3bd362cbb6278471a9b45c536d4a7.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 182, + 681, + 429, + 693 + ], + "lines": [ + { + "bbox": [ + 182, + 681, + 429, + 693 + ], + "spans": [ + { + "bbox": [ + 182, + 681, + 429, + 693 + ], + "type": "text", + "content": "Figure 1. Basic structure diagram of self-attention mechanism" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "spans": [ + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "text", + "content": "Given a medical text " + }, + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "inline_equation", + "content": "X = \\{x_{1}, x_{2}, \\dots, x_{n}\\}" + }, + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "inline_equation", + "content": "x_{i}" + }, + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "text", + "content": " represents the i-th word, we first map the text to a high-dimensional vector space using the pre-trained medical domain BERT (such as BioBERT or PubMedBERT) to get the context representation " + }, + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "inline_equation", + "content": "H = \\{h_{1}, h_{2}, \\dots, h_{n}\\}" + }, + { + "bbox": [ + 41, + 56, + 298, + 147 + ], + "type": "text", + "content": " for each word. Transformer computes the relationships between words using a self-attention mechanism. The core calculation is as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 150, + 249, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 150, + 249, + 186 + ], + "spans": [ + { + "bbox": [ + 63, + 150, + 249, + 186 + ], + "type": "interline_equation", + "content": "\\operatorname {A t t e n t i o n} (Q, K, V) = \\operatorname {s o f t m a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d _ {k}}}\\right) V", + "image_path": "776092a475e2b8a912e1ba6785c1da6c5327267b45ddd23298ac0e35218f7741.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "spans": [ + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "text", + "content": "Where " + }, + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "inline_equation", + "content": "Q, K, V" + }, + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "text", + "content": " is the query matrix, the key matrix and the value matrix respectively, and " + }, + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "inline_equation", + "content": "d_k" + }, + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "text", + "content": " is the scaling factor to stabilize the gradient update. Through multi-layer Transformer calculation, deep representation " + }, + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 41, + 190, + 298, + 258 + ], + "type": "text", + "content": " of medical text can be obtained for subsequent information extraction tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 259, + 298, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 259, + 298, + 321 + ], + "spans": [ + { + "bbox": [ + 41, + 259, + 298, + 321 + ], + "type": "text", + "content": "In the Medical named Entity recognition (NER) task [24], we use sequence annotation to feed Transformer's output " + }, + { + "bbox": [ + 41, + 259, + 298, + 321 + ], + "type": "inline_equation", + "content": "\\mathrm{H}" + }, + { + "bbox": [ + 41, + 259, + 298, + 321 + ], + "type": "text", + "content": " into a Conditional Random Field (CRF) layer to capture medical entity dependencies [25]. For a given label sequence " + }, + { + "bbox": [ + 41, + 259, + 298, + 321 + ], + "type": "inline_equation", + "content": "Y = \\{y_{1}, y_{2}, \\dots, y_{n}\\}" + }, + { + "bbox": [ + 41, + 259, + 298, + 321 + ], + "type": "text", + "content": ", define the conditional probability:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 58, + 328, + 266, + 369 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 328, + 266, + 369 + ], + "spans": [ + { + "bbox": [ + 58, + 328, + 266, + 369 + ], + "type": "interline_equation", + "content": "P (Y \\mid X) = \\frac {\\exp \\left(\\sum_ {i = 1} ^ {n} W y _ {i - 1} , y _ {i} + h _ {i} ^ {T} W y _ {i}\\right)}{\\sum_ {Y ^ {\\prime}} \\exp \\left(\\sum_ {i = 1} ^ {n} W y ^ {\\prime} _ {i - 1} y ^ {\\prime} _ {i} + h _ {i} ^ {T} W y ^ {\\prime} _ {i}\\right)}", + "image_path": "f63c9f1777fc536eb5fce92a53a025da029f12b4081584b5f1ecca27af281aa6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 41, + 372, + 298, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 372, + 298, + 407 + ], + "spans": [ + { + "bbox": [ + 41, + 372, + 298, + 407 + ], + "type": "text", + "content": "Where W is the state transition matrix that controls dependencies between labels. The loss function is optimized with Negative Log-Likelihood (NLL):" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 117, + 411, + 235, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 411, + 235, + 441 + ], + "spans": [ + { + "bbox": [ + 117, + 411, + 235, + 441 + ], + "type": "interline_equation", + "content": "L _ {N E R} = - \\sum_ {i = 1} ^ {n} \\log P (Y \\mid X)", + "image_path": "c20347969caa21737f64745163bc45173ab8b668ea1273abb52d6c61919f2057.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 445, + 298, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 445, + 298, + 496 + ], + "spans": [ + { + "bbox": [ + 41, + 445, + 298, + 496 + ], + "type": "text", + "content": "In relation extraction task, we adopt two-channel Transformer structure to independently model entity to " + }, + { + "bbox": [ + 41, + 445, + 298, + 496 + ], + "type": "inline_equation", + "content": "(e_1,e_2)" + }, + { + "bbox": [ + 41, + 445, + 298, + 496 + ], + "type": "text", + "content": ". First, we compute for each entity its context representation:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 499, + 213, + 531 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 499, + 213, + 531 + ], + "spans": [ + { + "bbox": [ + 138, + 499, + 213, + 531 + ], + "type": "interline_equation", + "content": "h _ {e _ {i}} = \\frac {1}{| e _ {i} |} \\sum_ {x _ {j} \\in e _ {i}} h _ {j}", + "image_path": "37b63494711363b2f6809f3ea20390de142a2e4cb66232eea8691aece3eede5a.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 41, + 536, + 298, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 536, + 298, + 570 + ], + "spans": [ + { + "bbox": [ + 41, + 536, + 298, + 570 + ], + "type": "text", + "content": "Then, the entity pair representation is concatenated and the entity relationship score is calculated through a fully connected layer:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 122, + 573, + 231, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 573, + 231, + 590 + ], + "spans": [ + { + "bbox": [ + 122, + 573, + 231, + 590 + ], + "type": "interline_equation", + "content": "r _ {e _ {1}, e _ {2}} = W _ {r} \\left[ h _ {e _ {1}} \\right] \\left[ h _ {e _ {2}} \\right] + b _ {r}", + "image_path": "4963fc375c3ee3c31101fb038418a8f027e56712e98879de856ce5f64e131925.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 41, + 594, + 298, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 594, + 298, + 616 + ], + "spans": [ + { + "bbox": [ + 41, + 594, + 298, + 616 + ], + "type": "text", + "content": "Finally, cross-entropy loss is used to optimize relational classification:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 109, + 618, + 244, + 646 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 618, + 244, + 646 + ], + "spans": [ + { + "bbox": [ + 109, + 618, + 244, + 646 + ], + "type": "interline_equation", + "content": "L _ {R E} = - \\sum_ {\\left(e _ {1}, e _ {2}\\right)} y _ {e 1, e 2} \\log P \\left(r _ {e _ {1}, e _ {2}}\\right)", + "image_path": "a034c5803646e35b5eefd953e09391d8b22478718f3167ac68e1c50d13b253ae.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 41, + 652, + 298, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 652, + 298, + 712 + ], + "spans": [ + { + "bbox": [ + 41, + 652, + 298, + 712 + ], + "type": "text", + "content": "Where, " + }, + { + "bbox": [ + 41, + 652, + 298, + 712 + ], + "type": "inline_equation", + "content": "y_{e1,e2}" + }, + { + "bbox": [ + 41, + 652, + 298, + 712 + ], + "type": "text", + "content": " is the true label of the entity relationship, and " + }, + { + "bbox": [ + 41, + 652, + 298, + 712 + ], + "type": "inline_equation", + "content": "P(r_{e_1,e_2})" + }, + { + "bbox": [ + 41, + 652, + 298, + 712 + ], + "type": "text", + "content": " is the probability of the relationship class predicted by the model. Through the joint optimization of NER and RE tasks, the entity recognition and relationship extraction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 311, + 53, + 567, + 76 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 53, + 567, + 76 + ], + "spans": [ + { + "bbox": [ + 311, + 53, + 567, + 76 + ], + "type": "text", + "content": "promote each other and improve the accuracy of medical literature information extraction." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 402, + 84, + 482, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 402, + 84, + 482, + 95 + ], + "spans": [ + { + "bbox": [ + 402, + 84, + 482, + 95 + ], + "type": "text", + "content": "IV. EXPERIMENT" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 102, + 369, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 102, + 369, + 113 + ], + "spans": [ + { + "bbox": [ + 312, + 102, + 369, + 113 + ], + "type": "text", + "content": "A. Datasets" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 311, + 116, + 568, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 116, + 568, + 281 + ], + "spans": [ + { + "bbox": [ + 311, + 116, + 568, + 281 + ], + "type": "text", + "content": "This dataset comprises 6,881 disease entities extracted from PubMed abstracts, all of which were validated by biomedical experts and categorized into four groups—Specific, Composite, Modifier, and Undetermined Diseases. The data is split into training (5,064 instances), validation (787 instances), and test (1,030 instances) sets. A BIO tagging scheme (B-Begin, I-Inside, O-Outside) is used to clearly delineate entity boundaries, and all entities are aligned with the Unified Medical Language System (UMLS). Preprocessing steps include tokenization, stop-word removal, normalization, and Word Piece tokenization for medical terms. To address data imbalance, a Disease Co-occurrence Network was employed alongside data augmentation techniques such as synonym substitution and entity masking. These methods collectively bolster model performance in medical entity extraction tasks." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 287, + 419, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 287, + 419, + 298 + ], + "spans": [ + { + "bbox": [ + 312, + 287, + 419, + 298 + ], + "type": "text", + "content": "B. Experimental Results" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 311, + 301, + 567, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 301, + 567, + 324 + ], + "spans": [ + { + "bbox": [ + 311, + 301, + 567, + 324 + ], + "type": "text", + "content": "First, this paper gives the comparative experimental results of different pre-training models, as shown in Table 1." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 324, + 564, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 324, + 564, + 348 + ], + "spans": [ + { + "bbox": [ + 315, + 324, + 564, + 348 + ], + "type": "text", + "content": "Table 1. Performance comparison of different pre-trained language models on medical literature information extraction" + } + ] + } + ], + "index": 20 + }, + { + "type": "table", + "bbox": [ + 307, + 357, + 567, + 407 + ], + "blocks": [ + { + "bbox": [ + 307, + 357, + 567, + 407 + ], + "lines": [ + { + "bbox": [ + 307, + 357, + 567, + 407 + ], + "spans": [ + { + "bbox": [ + 307, + 357, + 567, + 407 + ], + "type": "table", + "html": "
ModelPrecisionRecallF1-Score
Bert85.2%82.7%83.9%
BioBert88.4%86.1%87.2%
PubmedBert89.7%87.9%88.8%
ClinicalBert87.9%85.6%86.7%
", + "image_path": "f369296a1337c06c0f7fcb9a051fbf46e4453366724355b63c834683d53a134b.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 408, + 568, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 408, + 568, + 582 + ], + "spans": [ + { + "bbox": [ + 311, + 408, + 568, + 582 + ], + "type": "text", + "content": "Experimental results indicate significant differences in the performance of various pre-trained language models in medical entity extraction tasks [26]. Among them, PubMedBERT achieves the highest performance across all evaluation metrics (F1-score = 88.8%). This suggests that its pre-training strategy on large-scale biomedical literature enhances its adaptability to the textual characteristics of medical texts. In comparison, BioBERT also demonstrates high accuracy (88.4%) and recall (86.1%), ranking second only to PubMedBERT. This indicates that BioBERT maintains strong generalization ability in specific medical entity extraction tasks. ClinicalBERT performs slightly worse than BioBERT and PubMedBERT, with a relatively lower recall (85.6%) despite achieving high accuracy (87.9%). This may be attributed to its pre-training on electronic health records (EHRs), which differ in textual style and structure from medical literature." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 588, + 569, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 588, + 569, + 719 + ], + "spans": [ + { + "bbox": [ + 311, + 588, + 569, + 719 + ], + "type": "text", + "content": "General BERT exhibits the weakest performance, with an F1-score of only " + }, + { + "bbox": [ + 311, + 588, + 569, + 719 + ], + "type": "inline_equation", + "content": "83.9\\%" + }, + { + "bbox": [ + 311, + 588, + 569, + 719 + ], + "type": "text", + "content": ". This result highlights the limitations of general-purpose pre-trained language models when processing specialized medical texts. BERT is pre-trained on a general corpus and lacks domain-specific terminology and contextual understanding, making it less effective for medical entity extraction. In contrast, BioBERT and PubMedBERT, pretrained on PubMed literature, improve their comprehension of medical terms, resulting in superior performance. PubMedBERT's advantage over BioBERT may stem from its training approach. While BioBERT fine-tunes BERT on biomedical texts, PubMedBERT is trained from scratch on" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 43, + 53, + 298, + 87 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 53, + 298, + 87 + ], + "spans": [ + { + "bbox": [ + 43, + 53, + 298, + 87 + ], + "type": "text", + "content": "medical literature. This allows PubMedBERT to capture the linguistic distribution and structural patterns of biomedical texts more comprehensively." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 43, + 92, + 298, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 92, + 298, + 213 + ], + "spans": [ + { + "bbox": [ + 43, + 92, + 298, + 213 + ], + "type": "text", + "content": "The findings indicate that choosing domain-specific pretrained language models substantially enhances performance in medical entity extraction tasks. PubMedBERT and BioBERT outperform other models in both accuracy and recall. This suggests that generic BERT alone is inadequate for medical Natural Language Processing (NLP) tasks, and domain-adaptive pre-training strategies are crucial for improving model performance. Future research could delve deeper into integrating pre-trained language models with knowledge graph augmentation or multi-task learning to further enhance medical entity extraction capabilities." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 43, + 218, + 298, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 218, + 298, + 262 + ], + "spans": [ + { + "bbox": [ + 43, + 218, + 298, + 262 + ], + "type": "text", + "content": "Further, this paper also provides experimental comparative analysis of different named entity recognition methods based on Transformer, and the experimental results are shown in Table 2." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 37, + 300, + 297, + 370 + ], + "blocks": [ + { + "bbox": [ + 50, + 269, + 290, + 291 + ], + "lines": [ + { + "bbox": [ + 50, + 269, + 290, + 291 + ], + "spans": [ + { + "bbox": [ + 50, + 269, + 290, + 291 + ], + "type": "text", + "content": "Table 2. Performance comparison of different named entity recognition methods based on Transformer" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 37, + 300, + 297, + 370 + ], + "lines": [ + { + "bbox": [ + 37, + 300, + 297, + 370 + ], + "spans": [ + { + "bbox": [ + 37, + 300, + 297, + 370 + ], + "type": "table", + "html": "
MethodPrecisionRecallF1-Score
Transformer + CRF88.1%86.3%87.2%
Transformer + Span-based89.4%87.8%88.6%
Transformer + Seq2Seq86.7%85.2%85.9%
", + "image_path": "c85b7124e86f453b9452fcc538f804ff2969528d9cb8101a4c90216329642fcc.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 43, + 386, + 298, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 386, + 298, + 486 + ], + "spans": [ + { + "bbox": [ + 43, + 386, + 298, + 486 + ], + "type": "text", + "content": "Experimental results indicate that different entity extraction methods exhibit varying performance within Transformer-based architectures. Among them, the Span-based approach achieves the highest F1-score (88.6%), demonstrating its superior ability to accurately identify entity boundaries in medical texts. Compared to CRF-based token-by-token sequential labeling, the Span-based method directly predicts entity boundaries, making it more effective in extracting complex medical terms and multi-word expressions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 491, + 298, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 491, + 298, + 623 + ], + "spans": [ + { + "bbox": [ + 43, + 491, + 298, + 623 + ], + "type": "text", + "content": "The Transformer + CRF method scores slightly lower " + }, + { + "bbox": [ + 43, + 491, + 298, + 623 + ], + "type": "inline_equation", + "content": "(87.2\\%)" + }, + { + "bbox": [ + 43, + 491, + 298, + 623 + ], + "type": "text", + "content": " but excels at capturing dependencies between entities, making it suitable for structured medical texts. Conversely, the Transformer + Seq2Seq method yields a lower F1-score " + }, + { + "bbox": [ + 43, + 491, + 298, + 623 + ], + "type": "inline_equation", + "content": "(85.9\\%)" + }, + { + "bbox": [ + 43, + 491, + 298, + 623 + ], + "type": "text", + "content": ", likely due to the decoder's errors in boundary recognition during text generation. These findings suggest that Span-based and CRF methods are more effective for medical entity extraction. Future studies might explore integrating Span-based techniques with CRF to enhance accuracy and stability. Additionally, the study explores Few-shot Learning for medical entity recognition under low-resource conditions, with experimental results detailed in Figure 2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 43, + 628, + 298, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 628, + 298, + 715 + ], + "spans": [ + { + "bbox": [ + 43, + 628, + 298, + 715 + ], + "type": "text", + "content": "Experimental results indicate that the performance of Few-shot Learning in medical entity extraction improves as the number of training samples increases. When training data is extremely limited (1-shot or 5-shot), the model's precision, recall, and F1-score remain low, with the F1-score ranging between " + }, + { + "bbox": [ + 43, + 628, + 298, + 715 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 43, + 628, + 298, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 43, + 628, + 298, + 715 + ], + "type": "inline_equation", + "content": "72\\%" + }, + { + "bbox": [ + 43, + 628, + 298, + 715 + ], + "type": "text", + "content": ". This suggests that the model struggles to accurately identify medical entities under severe data scarcity. The primary challenge lies in the abundance of" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 53, + 567, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 53, + 567, + 142 + ], + "spans": [ + { + "bbox": [ + 314, + 53, + 567, + 142 + ], + "type": "text", + "content": "specialized terminology in medical texts, making it difficult for the model to learn effective patterns from minimal data. However, with 10-shot training, performance improves significantly, with the F1-score reaching " + }, + { + "bbox": [ + 314, + 53, + 567, + 142 + ], + "type": "inline_equation", + "content": "79.1\\%" + }, + { + "bbox": [ + 314, + 53, + 567, + 142 + ], + "type": "text", + "content": ". This result demonstrates that even a small increase in labeled data can substantially enhance the model's learning ability. This aligns with the fundamental characteristic of Few-shot Learning, which efficiently leverages limited samples." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 316, + 148, + 564, + 317 + ], + "blocks": [ + { + "bbox": [ + 316, + 148, + 564, + 317 + ], + "lines": [ + { + "bbox": [ + 316, + 148, + 564, + 317 + ], + "spans": [ + { + "bbox": [ + 316, + 148, + 564, + 317 + ], + "type": "image", + "image_path": "19658615318995e7d3899b2ed34c9fd6d6f3bd2d7e27c2b33876b9e924816faa.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 325, + 559, + 337 + ], + "lines": [ + { + "bbox": [ + 322, + 325, + 559, + 337 + ], + "spans": [ + { + "bbox": [ + 322, + 325, + 559, + 337 + ], + "type": "text", + "content": "Figure 2. Few-shot Learning Performance on Medical NER" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 342, + 567, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 342, + 567, + 495 + ], + "spans": [ + { + "bbox": [ + 314, + 342, + 567, + 495 + ], + "type": "text", + "content": "As the number of training samples increases to 20-shot and beyond, the model's precision, recall, and F1-score continue to rise, eventually plateauing after 50-shot. At 50-shot, the F1-score reaches " + }, + { + "bbox": [ + 314, + 342, + 567, + 495 + ], + "type": "inline_equation", + "content": "88.1\\%" + }, + { + "bbox": [ + 314, + 342, + 567, + 495 + ], + "type": "text", + "content": " and approaches " + }, + { + "bbox": [ + 314, + 342, + 567, + 495 + ], + "type": "inline_equation", + "content": "89.9\\%" + }, + { + "bbox": [ + 314, + 342, + 567, + 495 + ], + "type": "text", + "content": " at 100-shot. This indicates that with a sufficient number of training samples, the model can effectively learn entity representations and achieve high recognition accuracy. However, performance gains slow after 50-shot, suggesting that while Few-shot Learning enhances model performance up to a certain threshold, its marginal benefit diminishes as data volume increases. These findings suggest that in medical entity extraction tasks, Few-shot Learning is particularly effective in low-resource settings, whereas traditional supervised learning may offer greater stability when ample labeled data is available." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 501, + 567, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 501, + 567, + 611 + ], + "spans": [ + { + "bbox": [ + 314, + 501, + 567, + 611 + ], + "type": "text", + "content": "Overall, this experiment confirms the effectiveness of Few-shot Learning in medical entity extraction, particularly when labeled data is scarce. Even with a limited number of samples, the model demonstrates significant performance improvements. Future research could explore more advanced Few-shot Learning techniques, such as metric learning-based methods, GPT variants optimized for prompt design, or small-sample learning approaches integrated with knowledge graphs. These strategies could further enhance model generalization, enabling more effective applications in complex medical NLP tasks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 405, + 619, + 482, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 405, + 619, + 482, + 629 + ], + "spans": [ + { + "bbox": [ + 405, + 619, + 482, + 629 + ], + "type": "text", + "content": "V. CONCLUSION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 634, + 567, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 634, + 567, + 711 + ], + "spans": [ + { + "bbox": [ + 314, + 634, + 567, + 711 + ], + "type": "text", + "content": "This study proposes a medical entity extraction method based on Transformer and examines the effects of different pretrained language models, extraction methods, and Few-shot Learning in low-resource scenarios. Experimental results indicate that PubMedBERT and BioBERT outperform other models in medical text processing, significantly improving entity extraction accuracy. Compared to traditional sequence" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 41, + 53, + 298, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 53, + 298, + 131 + ], + "spans": [ + { + "bbox": [ + 41, + 53, + 298, + 131 + ], + "type": "text", + "content": "labeling approaches, the Span-based entity extraction method achieves the best performance, demonstrating that directly predicting entity boundaries enhances the recognition of complex medical terms. Additionally, Few-shot Learning exhibits strong adaptability in low-resource conditions, achieving high F1-scores with minimal training data. This highlights its potential for medical NLP applications." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 41, + 136, + 298, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 136, + 298, + 268 + ], + "spans": [ + { + "bbox": [ + 41, + 136, + 298, + 268 + ], + "type": "text", + "content": "Despite these promising results, several aspects require further optimization. While the Transformer architecture enhances medical entity extraction, it incurs high computational costs, particularly on large-scale datasets. Future research could explore Knowledge Distillation or Lightweight Transformer variants to improve computational efficiency. This indicates that integrating Active Learning or Data Augmentation strategies may enhance model performance more efficiently. Additionally, medical texts often contain complex contextual relationships. Incorporating Knowledge Graphs into Transformer-based models could further strengthen their understanding of medical terminology." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 41, + 273, + 298, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 273, + 298, + 437 + ], + "spans": [ + { + "bbox": [ + 41, + 273, + 298, + 437 + ], + "type": "text", + "content": "Future studies could expand the application of Few-shot Learning in medical NLP, such as developing more effective prompt-based learning techniques. This would enable large language models (LLMs) to achieve high-precision entity extraction with minimal labeled data. Furthermore, real-world medical text data often involves privacy concerns. Optimizing medical entity extraction models while ensuring data security remains a critical research challenge. Finally, by refining Transformer architectures, integrating external medical knowledge, and introducing adaptive learning strategies, medical entity extraction technology could play a more significant role in clinical medicine, drug discovery, and medical literature analysis. These advancements would provide intelligent and efficient tools to support medical research and practice." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 143, + 446, + 196, + 455 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 446, + 196, + 455 + ], + "spans": [ + { + "bbox": [ + 143, + 446, + 196, + 455 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 42, + 460, + 298, + 700 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 42, + 460, + 297, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 460, + 297, + 497 + ], + "spans": [ + { + "bbox": [ + 42, + 460, + 297, + 497 + ], + "type": "text", + "content": "[1] Pagad N. S. and Pradeep N., “Clinical named entity recognition methods: an overview”, Proceedings of the International Conference on Innovative Computing and Communications: Proceedings of ICICC 2021, Volume 2, pp. 151-165, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 43, + 498, + 296, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 498, + 296, + 535 + ], + "spans": [ + { + "bbox": [ + 43, + 498, + 296, + 535 + ], + "type": "text", + "content": "[2] Navarro D. F., Ijaz K., Rezazadegan D., et al., \"Clinical named entity recognition and relation extraction using natural language processing of medical free text: A systematic review\", International Journal of Medical Informatics, vol. 177, 105122, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 43, + 537, + 298, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 537, + 298, + 573 + ], + "spans": [ + { + "bbox": [ + 43, + 537, + 298, + 573 + ], + "type": "text", + "content": "[3] Durango M. C., Torres-Silva E. A. and Orozco-Duque A., “Named entity recognition in electronic health records: a methodological review”, Healthcare Informatics Research, vol. 29, no. 4, pp. 286-300, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 43, + 575, + 297, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 575, + 297, + 604 + ], + "spans": [ + { + "bbox": [ + 43, + 575, + 297, + 604 + ], + "type": "text", + "content": "[4] Y. Xiang, Q. He, T. Xu, R. Hao, J. Hu and H. Zhang, \"Adaptive Transformer Attention and Multi-Scale Fusion for Spine 3D Segmentation\", arXiv preprint arXiv:2503.12853, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 43, + 605, + 296, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 605, + 296, + 642 + ], + "spans": [ + { + "bbox": [ + 43, + 605, + 296, + 642 + ], + "type": "text", + "content": "[5] T. Xu, Y. Xiang, J. Du and H. Zhang, \"Cross-Scale Attention and Multi-Layer Feature Fusion YOLOv8 for Skin Disease Target Detection in Medical Images\", Journal of Computer Technology and Software, vol. 4, no. 2, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 43, + 643, + 297, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 643, + 297, + 672 + ], + "spans": [ + { + "bbox": [ + 43, + 643, + 297, + 672 + ], + "type": "text", + "content": "[6] W. He, Y. Zhang, T. Xu, T. An, Y. Liang and B. Zhang, \"Object detection for medical image analysis: Insights from the RT-DETR model\", arXiv preprint arXiv:2501.16469, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 43, + 673, + 297, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 43, + 673, + 297, + 700 + ], + "spans": [ + { + "bbox": [ + 43, + 673, + 297, + 700 + ], + "type": "text", + "content": "[7] M. Li, R. Hao, S. Shi, Z. Yu, Q. He and J. Zhan, “A CNN-Transformer Approach for Image-Text Multimodal Classification with Cross-Modal Feature Fusion”, 2025." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 313, + 53, + 568, + 685 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 313, + 53, + 567, + 90 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 53, + 567, + 90 + ], + "spans": [ + { + "bbox": [ + 313, + 53, + 567, + 90 + ], + "type": "text", + "content": "[8] Q. Sun, \"Dynamic Optimization of Human-Computer Interaction Interfaces Using Graph Convolutional Networks and Q-Learning\", Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 91, + 567, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 91, + 567, + 119 + ], + "spans": [ + { + "bbox": [ + 314, + 91, + 567, + 119 + ], + "type": "text", + "content": "[9] S. Duan, \"Systematic Analysis of User Perception for Interface Design Enhancement\", Journal of Computer Science and Software Applications, vol. 5, no. 2, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 121, + 568, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 121, + 568, + 148 + ], + "spans": [ + { + "bbox": [ + 314, + 121, + 568, + 148 + ], + "type": "text", + "content": "[10] J. Hu, T. An, Z. Yu, J. Du and Y. Luo, \"Contrastive Learning for Cold Start Recommendation with Adaptive Feature Fusion\", arXiv preprint arXiv:2502.03664, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 151, + 567, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 151, + 567, + 178 + ], + "spans": [ + { + "bbox": [ + 314, + 151, + 567, + 178 + ], + "type": "text", + "content": "[11] A. Liang, “Personalized Multimodal Recommendations Framework Using Contrastive Learning”, Transactions on Computational and Scientific Methods, vol. 4, no. 11, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 180, + 568, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 180, + 568, + 217 + ], + "spans": [ + { + "bbox": [ + 313, + 180, + 568, + 217 + ], + "type": "text", + "content": "[12] X. Zhou, Y. Zhang, Z. Wang, M. Lu and X. Liu, \"MAFN: multi-level attention fusion network for multimodal named entity recognition\", Multimedia Tools and Applications, vol. 83, no. 15, pp. 45047-45058, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 219, + 567, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 219, + 567, + 246 + ], + "spans": [ + { + "bbox": [ + 314, + 219, + 567, + 246 + ], + "type": "text", + "content": "[13] Y. Xu and Y. Chen, \"Attention-based interactive multi-level feature fusion for named entity recognition\", Scientific Reports, vol. 15, no. 1, 3069, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 248, + 567, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 248, + 567, + 285 + ], + "spans": [ + { + "bbox": [ + 313, + 248, + 567, + 285 + ], + "type": "text", + "content": "[14] Z. Tong, Q. Liu, H. Shi, Y. Xia, S. Wu and X. Y. Zhang, \"Semantics Fusion of Hierarchical Transformers for Multimodal Named Entity Recognition\", Proceedings of the International Conference on Intelligent Computing, pp. 414-426, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 287, + 567, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 287, + 567, + 323 + ], + "spans": [ + { + "bbox": [ + 314, + 287, + 567, + 323 + ], + "type": "text", + "content": "[15] Y. Hu, Q. Chen, J. Du, et al., \"Improving large language models for clinical named entity recognition via prompt engineering\", Journal of the American Medical Informatics Association, vol. 31, no. 9, pp. 1812-1820, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 325, + 567, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 325, + 567, + 353 + ], + "spans": [ + { + "bbox": [ + 314, + 325, + 567, + 353 + ], + "type": "text", + "content": "[16] R. Hao, Y. Xiang, J. Du, Q. He, J. Hu and T. Xu, “A Hybrid CNN-Transformer Model for Heart Disease Prediction Using Life History Data”, arXiv preprint arXiv:2503.02124, 2025." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 355, + 567, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 355, + 567, + 382 + ], + "spans": [ + { + "bbox": [ + 314, + 355, + 567, + 382 + ], + "type": "text", + "content": "[17] J. Hu, Y. Xiang, Y. Lin, J. Du, H. Zhang and H. Liu, “Multi-Scale Transformer Architecture for Accurate Medical Image Classification”, arXiv preprint arXiv:2502.06243, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 384, + 567, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 384, + 567, + 412 + ], + "spans": [ + { + "bbox": [ + 314, + 384, + 567, + 412 + ], + "type": "text", + "content": "[18] X. Li, Q. Lu, Y. Li, M. Li and Y. Qi, \"Optimized Unet with Attention Mechanism for Multi-Scale Semantic Segmentation\", arXiv preprint arXiv:2502.03813, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 415, + 567, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 415, + 567, + 459 + ], + "spans": [ + { + "bbox": [ + 314, + 415, + 567, + 459 + ], + "type": "text", + "content": "[19] J. Wei, Y. Liu, X. Huang, X. Zhang, W. Liu and X. Yan, \"Self-Supervised Graph Neural Networks for Enhanced Feature Extraction in Heterogeneous Information Networks\", Proceedings of the 2024 5th International Conference on Machine Learning and Computer Application (ICMLCA), pp. 272-276, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 461, + 567, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 461, + 567, + 489 + ], + "spans": [ + { + "bbox": [ + 314, + 461, + 567, + 489 + ], + "type": "text", + "content": "[20] J. Liu, Y. Zhang, Y. Sheng, Y. Lou, H. Wang and B. Yang, “Context-Aware Rule Mining Using a Dynamic Transformer-Based Framework”, arXiv preprint arXiv:2503.11125, 2025." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 491, + 567, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 491, + 567, + 518 + ], + "spans": [ + { + "bbox": [ + 314, + 491, + 567, + 518 + ], + "type": "text", + "content": "[21] J. Zhan, \"Elastic Scheduling of Micro-Modules in Edge Computing Based on LSTM Prediction\", Journal of Computer Technology and Software, vol. 4, no. 2, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 520, + 567, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 520, + 567, + 555 + ], + "spans": [ + { + "bbox": [ + 314, + 520, + 567, + 555 + ], + "type": "text", + "content": "[22] Y. Deng, “A hybrid network congestion prediction method integrating association rules and LSTM for enhanced spatiotemporal forecasting”, Transactions on Computational and Scientific Methods, vol. 5, no. 2, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 559, + 567, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 559, + 567, + 596 + ], + "spans": [ + { + "bbox": [ + 314, + 559, + 567, + 596 + ], + "type": "text", + "content": "[23] X. Yan, W. Wang, M. Xiao, Y. Li and M. Gao, \"Survival prediction across diverse cancer types using neural networks\", Proceedings of the 2024 7th International Conference on Machine Vision and Applications, pp. 134-138, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 598, + 567, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 598, + 567, + 624 + ], + "spans": [ + { + "bbox": [ + 314, + 598, + 567, + 624 + ], + "type": "text", + "content": "[24] K. Pakhale, “Comprehensive overview of named entity recognition: Models, domain-specific applications and challenges”, arXiv preprint arXiv:2309.14084, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 627, + 567, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 627, + 567, + 655 + ], + "spans": [ + { + "bbox": [ + 314, + 627, + 567, + 655 + ], + "type": "text", + "content": "[25] M. Afshar, Y. Gao, D. Gupta, E. Croxford and D. Demner-Fushman, \"On the role of the UMLS in supporting diagnosis generation proposed by Large Language Models\", Journal of Biomedical Informatics, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 657, + 567, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 657, + 567, + 685 + ], + "spans": [ + { + "bbox": [ + 314, + 657, + 567, + 685 + ], + "type": "text", + "content": "[26] V. S. Carmona, S. Jiang and B. Dong, “A Multilevel Analysis of PubMed-only BERT-based Biomedical Models”, Proceedings of the 6th Clinical Natural Language Processing Workshop, pp. 105-110, 2024." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_content_list.json b/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4410c4ad4bf14c968103af62b0b055918c7fd07c --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_content_list.json @@ -0,0 +1,4257 @@ +[ + { + "type": "text", + "text": "SIGACT News Complexity Theory Column", + "text_level": 1, + "bbox": [ + 289, + 87, + 709, + 109 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Meta-Mathematics of Computational Complexity Theory", + "text_level": 1, + "bbox": [ + 209, + 114, + 787, + 138 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Igor C. Oliveira1", + "bbox": [ + 416, + 148, + 576, + 169 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9070a982ca734a985fe9029753fd5664fa385b28312c9e502177b89de18d5c68.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 442, + 178, + 553, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 465, + 295, + 532, + 309 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We survey results on the formalization and independence of mathematical statements related to major open problems in computational complexity theory. Our primary focus is on recent findings concerning the (un)provability of complexity bounds within theories of bounded arithmetic. This includes the techniques employed and related open problems, such as the (non)existence of a feasible proof that $\\mathsf{P} = \\mathsf{NP}$ .", + "bbox": [ + 155, + 319, + 839, + 381 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Contents", + "text_level": 1, + "bbox": [ + 112, + 393, + 209, + 411 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Introduction 2", + "2 Preliminaries 3" + ], + "bbox": [ + 114, + 425, + 883, + 465 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2.1 Complexity Theory 3", + "2.2 Theories of Bounded Arithmetic 3" + ], + "bbox": [ + 138, + 468, + 883, + 497 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2.2.1 $\\mathrm{PV}_1$ 4", + "2.2.2 $\\mathbf{S}_2^1,\\mathbf{T}_2^1$ , and Beyond 4", + "2.2.3 $\\mathsf{APC}_1$ 6" + ], + "bbox": [ + 174, + 498, + 883, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3 Auxiliary Definitions and Results 6", + "bbox": [ + 114, + 556, + 883, + 570 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3.1 Witnessing Theorems 6", + "3.2 Bounded Arithmetic and Propositional Proofs 7", + "3.3 Cuts of Models of Bounded Arithmetic 8" + ], + "bbox": [ + 138, + 571, + 883, + 614 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4 The Strength of Bounded Arithmetic 9", + "bbox": [ + 114, + 628, + 883, + 643 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "4.1 Formalization of Results from Algorithms and Complexity 9", + "4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in $\\mathsf{PV}_1$ 10" + ], + "bbox": [ + 138, + 645, + 883, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5 Unprovability of Complexity Bounds 14", + "bbox": [ + 114, + 686, + 883, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5.1 Unprovability of Upper Bounds 14", + "bbox": [ + 138, + 703, + 883, + 715 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "5.1.1 LEARN-Uniform Circuits and Unprovability 14", + "5.1.2 $\\mathsf{P} = \\mathsf{NP}$ and Propositional Proof Complexity 17" + ], + "bbox": [ + 174, + 718, + 883, + 747 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5.2 Unprovability of Lower Bounds 18", + "bbox": [ + 138, + 748, + 883, + 761 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "5.2.1 Average-Case Circuit Lower Bounds 18", + "5.2.2 Extended Frege Lower Bounds 21" + ], + "bbox": [ + 174, + 763, + 883, + 791 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5.3 Connection Between Upper Bounds and Lower Bounds 22", + "bbox": [ + 138, + 792, + 883, + 806 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "6 Additional Recent Developments 23", + "bbox": [ + 114, + 820, + 883, + 835 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.04416v1 [cs.CC] 6 Apr 2025", + "bbox": [ + 22, + 268, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1Department of Computer Science, University of Warwick, UK. Email: igor.oliveira@warwick.ac.uk.", + "bbox": [ + 135, + 845, + 800, + 859 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 898, + 503, + 909 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 88, + 282, + 108 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The investigation of the inherent complexity of computational tasks is a central research direction in theoretical computer science. While unconditional results are known in a variety of restricted contexts (i.e., with respect to weak models of computation), despite significant efforts, several central questions of the field remain wide open. Prominent examples include the relation between complexity classes P and NP, understanding the power of non-uniform Boolean circuits, and bounding the length of proofs in propositional proof systems such as Frege and extended Frege.", + "bbox": [ + 109, + 122, + 883, + 224 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The investigation of the difficulty of settling these problems has long been an important and influential area of research by itself (e.g., barrier results such as [BGS75, RR97, AW09, $\\mathrm{CHO}^{+}22$ ]). Unfortunately, these results tend to be ad-hoc and do not consider a standard and robust notion of proof. In order to build a general theory, several works have considered provability in the usual sense of mathematical logic. Most importantly, this enables a deeper investigation of complexity theory that considers not only the running time of a program or the size of a circuit but also the feasibility of proving their existence and correctness. In particular, we can explore the fundamental question of what can and cannot be feasibly computed, along with the meta-question of what lower and upper bounds can and cannot be feasibly proven.", + "bbox": [ + 109, + 224, + 883, + 362 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A fundamental goal of this research is to", + "bbox": [ + 140, + 363, + 436, + 378 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "$(\\star)$ identify a suitable logical theory capable of formalizing most, if not all, known results in algorithms and complexity, and determine whether the major open problems mentioned above are provable or unprovable within this theory.2", + "bbox": [ + 109, + 388, + 883, + 441 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Although we are still far from reaching this goal, progress has been made in understanding the (un)provability of statements concerning the complexity of computations within certain fragments of Peano Arithmetic, collectively known as Bounded Arithmetic. These theories are designed to capture proofs that manipulate and reason with concepts from a specified complexity class. For instance, a proof by induction whose inductive hypothesis can be expressed as an NP predicate is one such example. The earliest theory of this kind was $\\mathsf{I}\\Delta_0$ , introduced by Parikh [Par71], who explored the intuitive concept of feasibility in arithmetic and addressed the infeasibility of exponentiation. The relationship between Parikh's theory and computational complexity was fully recognized and advanced by Paris and Wilkie in a series of influential papers during the 1980s (see [WP87]). Other significant theories include Cook's theory $\\mathsf{PV}_1$ [Coo75], which formalizes polynomial-time reasoning; Jerabek's theory $\\mathsf{APC}_1$ [Jer04, Jer05, Jer07], which extends $\\mathsf{PV}_1$ by incorporating the dual weak pigeonhole principle for polynomial-time functions and formalizes probabilistic polynomial-time reasoning; and Buss's theories $\\mathsf{S}_2^i$ and $\\mathsf{T}_2^i$ [Bus86], which include induction principles corresponding to various levels of the polynomial-time hierarchy.", + "bbox": [ + 109, + 450, + 883, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "These theories are capable of formalizing advanced results. For instance, it is known that $\\mathrm{PV}_1$ can prove the PCP Theorem [Pic15b], while $\\mathrm{APC}_1$ can establish several significant circuit lower bounds [MP20], including monotone circuit lower bounds for $k$ -Clique and bounded-depth circuit lower bounds for the Parity function. Further examples include the explicit construction of expander graphs [BKKK20] and the correctness of randomized polynomial-time matching algorithms [LC11], among many others.", + "bbox": [ + 109, + 672, + 883, + 758 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given the expressive power of these theories, even if we are not yet able to establish a breakthrough result of the magnitude of $(\\star)$ , determining the (un)provability of complexity bounds of interest in theories of bounded arithmetic still represents significant progress towards our understanding of the power and limits of feasible computations and proofs. This survey aims to provide an introduction to some of these results,", + "bbox": [ + 109, + 758, + 883, + 828 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "As we elaborate in Section 5, the unprovability of a statement is equivalent to the consistency of its negation, which can be at least as important.", + "bbox": [ + 109, + 835, + 885, + 866 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 898, + 504, + 909 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "the underlying techniques, and related open problems. While our primary focus is on recent developments, in order to provide a broader perspective we also cover some classical results. Due to space limitations, the survey is not exhaustive, and several references had to be omitted (although some recent developments are mentioned in Section 6).", + "bbox": [ + 109, + 90, + 880, + 157 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Preliminaries", + "text_level": 1, + "bbox": [ + 112, + 184, + 289, + 202 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Complexity Theory", + "text_level": 1, + "bbox": [ + 112, + 218, + 323, + 234 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We will rely on a few additional standard definitions from complexity theory, such as basic complexity classes, Boolean circuits and formulas, and propositional proof systems. These can be found in textbooks such as [AB09] and [Kra19]. Below we only establish notation and review a classical result that offers a convenient way to talk about polynomial-time computations in some logical theories.", + "bbox": [ + 109, + 244, + 883, + 311 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We use $\\mathsf{SIZE}[s]$ to denote the set of languages computed by Boolean circuits of size $s(n)$ .", + "bbox": [ + 138, + 313, + 782, + 330 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In theoretical computer science, one typically considers functions and predicates that operate over binary strings. This is equivalent to operations on integers, by identifying each non-negative integer with its binary representation. Let $\\mathbb{N}$ denote the set of non-negative integers. For $a\\in \\mathbb{N}$ , we let $|a|\\triangleq \\lceil \\log_2(a + 1)\\rceil$ denote the length of the binary representation of $a$ . For a constant $k\\geq 1$ , we say that a function $f\\colon \\mathbb{N}^k\\to \\mathbb{N}$ is computable in polynomial time if $f(x_{1},\\ldots ,x_{k})$ can be computed in time polynomial in $|x_{1}|,\\ldots ,|x_{k}|$ . (For convenience, we might write $|\\vec{x} |\\triangleq |x_1|,\\dots ,|x_k|.$ ) Recall that FP denotes the set of polynomial time functions. While the definition of polynomial time refers to a machine model, FP can also be introduced in a machine independent way as the closure of a set of base functions under composition and limited recursion on notation. In more detail, we can consider the following class $\\mathcal{F}$ of base functions:", + "bbox": [ + 109, + 330, + 883, + 484 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} c (x) \\triangleq 0, \\quad s (x) \\triangleq x + 1, \\quad a (x) \\triangleq \\lfloor x / 2 \\rfloor , \\quad d (x) \\triangleq 2 \\cdot x, \\quad \\pi_ {\\ell} ^ {i} (x _ {1}, \\ldots , x _ {\\ell}) \\triangleq x _ {i}, \\quad x \\# y \\triangleq 2 ^ {| x | \\cdot | y |}, \\\\ x \\leq y \\triangleq \\left\\{ \\begin{array}{l l} 1 & \\text {i f} x \\leq y \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right. \\quad \\text {C h o i c e} (x, y, z) \\triangleq \\left\\{ \\begin{array}{l l} y & \\text {i f} x > 0 \\\\ z & \\text {o t h e r w i s e .} \\end{array} \\right. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 129, + 496, + 866, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We say that a function $f(\\vec{x},y)$ is defined from functions $g(\\vec{x})$ , $h(\\vec{x},y,z)$ , and $k(\\vec{x},y)$ by limited recursion on notation if", + "bbox": [ + 109, + 583, + 880, + 614 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf (\\vec {x}, 0) = g (\\vec {x})\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 631, + 509, + 647 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf (\\vec {x}, y) = h (\\vec {x}, y, f (\\vec {x}, \\lfloor y / 2 \\rfloor))\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 652, + 620, + 669 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nf (\\vec {x}, y) \\leq k (\\vec {x}, y)\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 672, + 526, + 689 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "for every sequence $(\\vec{x},y)$ of natural numbers. Cobham [Cob65] proved that FP is the least class of functions that contains $\\mathcal{F}$ and is closed under composition and limited recursion on notation.", + "bbox": [ + 109, + 703, + 880, + 736 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Theories of Bounded Arithmetic", + "text_level": 1, + "bbox": [ + 112, + 757, + 431, + 773 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Bounded arithmetic has a long and rich history (see [Bus97] for an introduction, and [HP93, Kra95, CN10] for a detailed treatment). The correspondence between the theories and complexity classes manifests in multiple ways. For instance, witnessing results show that every provably total function in a given theory $\\mathsf{T}_{\\mathcal{C}}$ (i.e., when $\\forall x \\exists!y \\psi(x,y)$ is provable, for certain formulas $\\psi$ ) is computable within the corresponding complexity class $\\mathcal{C}$ (i.e., the function $y = f(x)$ is in $\\mathcal{C}$ ). There is also a close connection between", + "bbox": [ + 109, + 785, + 880, + 869 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 898, + 503, + 909 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "theories of bounded arithmetic and propositional proof systems, e.g., propositional translations between proofs of certain sentences in $\\mathsf{PV}_1$ or $\\mathsf{S}_2^1$ and polynomial-size proofs in the extended Frege proof system of the corresponding propositional formulas. We review some related results in Section 3.1 and Section 3.2, respectively. In this section, we provide an overview of some widely investigated theories of bounded arithmetic: $\\mathsf{PV}_1$ , $\\mathsf{S}_2^1$ , $\\mathsf{T}_2^1$ , and $\\mathsf{APC}_1$ . We assume basic familiarity with first-order logic. Results claimed below without reference can be found in [Kra95].", + "bbox": [ + 109, + 90, + 887, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2.1 PV", + "text_level": 1, + "bbox": [ + 112, + 214, + 202, + 231 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\mathsf{PV}_1$ [Coo75] (see also [KPT91]) is a first-order theory whose intended model is the set $\\mathbb{N}$ of natural numbers, together with the standard interpretation for constants and functions symbols such as $0, +, \\times, \\text{etc.}$ . The vocabulary (language) of $\\mathsf{PV}_1$ , denoted $\\mathcal{L}_{\\mathsf{PV}_1}$ , contains a function symbol for each polynomial-time algorithm $f: \\mathbb{N}^k \\to \\mathbb{N}$ (where $k$ is any constant). These function symbols, and the axioms defining them, are obtained through Cobham's characterization of polynomial-time functions discussed in Section 2.1.", + "bbox": [ + 109, + 239, + 883, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\mathrm{PV}_1$ also postulates an induction axiom scheme that simulates binary search, and one can show that it admits induction over quantifier-free formulas (i.e., polynomial-time predicates). We discuss induction axioms in more detail in Section 2.2.2.", + "bbox": [ + 109, + 325, + 883, + 376 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We will use later in the text that $\\mathsf{PV}_1$ admits a formulation where all axioms are universal formulas (i.e., $\\forall \\vec{x}\\phi (\\vec{x})$ , where $\\phi$ is a quantifier-free formula). In other words, $\\mathsf{PV}_1$ is a universal theory.", + "bbox": [ + 111, + 377, + 883, + 411 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While the details of the definition of $\\mathrm{PV}_1$ are fairly technical (see, e.g., the longer overview in [CLO24b] or the exposition in [Kra95]), such details are often not needed. In particular, $\\mathrm{PV}_1$ has an equivalent formalization that does not require Cobham's result [Jef06].", + "bbox": [ + 109, + 411, + 883, + 463 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2.2 $\\mathsf{S}_2^1,\\mathsf{T}_2^1$ , and Beyond", + "text_level": 1, + "bbox": [ + 112, + 481, + 318, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While $\\mathrm{PV}_1$ can be related to polynomial-time computations and feasible proofs, Buss [Bus86] introduced a hierarchy of theories with close ties to the different levels of the polynomial hierarchy. To specify the theories, we will need a few definitions.", + "bbox": [ + 109, + 508, + 883, + 559 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The language $\\mathcal{L}_{\\mathsf{B}}$ of these theories contains the predicate symbols $=$ and $\\leq$ , the constant symbols 0 and 1, and function symbols $S$ (successor), $+$ , $\\cdot$ , $\\lfloor x / 2 \\rfloor$ , $|x|$ (interpreted as the length of $x$ as in Section 2.1), and $\\#$ (\"smash\"; interpreted as $x \\# y = 2^{|x| \\cdot |y|}$ ).", + "bbox": [ + 111, + 561, + 883, + 612 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A bounded quantifier is a quantifier of the form $Qy \\leq t$ , where $Q \\in \\{\\exists, \\forall\\}$ and $t$ is a term not involving $y$ . Similarly, a sharply bounded quantifier is one of the form $Qy \\leq |t|$ . Formally, such quantifiers are simply abbreviations. For instance,", + "bbox": [ + 109, + 612, + 883, + 662 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\forall y \\leq t (\\vec {x}) \\varphi (\\vec {x}, y) \\triangleq \\forall y (y \\leq t (\\vec {x}) \\rightarrow \\varphi (\\vec {x}, y)), a n d\n$$\n", + "text_format": "latex", + "bbox": [ + 292, + 676, + 700, + 695 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\exists y \\leq t (\\vec {x}) \\varphi (\\vec {x}, y) \\triangleq \\exists y (y \\leq t (\\vec {x}) \\wedge \\varphi (\\vec {x}, y)).\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 696, + 658, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A formula where each quantifier appears bounded (resp., sharply bounded) is said to be a bounded (resp., sharply bounded) formula. It is not hard to show that every sharply bounded formula defines a polynomial-time predicate over the standard model $\\mathbb{N}$ under its usual operations. On the other hand, bounded quantifiers allow us to define predicates in NP, coNP, and beyond.", + "bbox": [ + 109, + 729, + 883, + 797 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We can introduce a hierarchy of formulas by counting alternations of bounded quantifiers. The class $\\Pi_0^b = \\Sigma_0^b$ contains the sharply bounded formulas. We then recursively define, for each $i\\geq 1$ , the classes $\\Sigma_i^b$ and $\\Pi_{i}^{b}$ according to the quantifier structure of the sentence, ignoring the appearance of sharply bounded quantifiers. For instance, if $\\varphi \\in \\Sigma_0^b$ and $\\psi \\triangleq \\exists y\\leq t(\\vec{x})\\varphi (y,\\vec{x})$ , then $\\psi \\in \\Sigma_1^b$ (see, e.g., [Kra95] for the", + "bbox": [ + 111, + 797, + 883, + 868 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 898, + 504, + 909 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "technical details in the general case). As alluded to above, it is known that, for each $i \\geq 1$ , a predicate $P(\\vec{x})$ is in $\\Sigma_i^p$ (the $i$ -th level of the polynomial hierarchy) if and only if there is a $\\Sigma_i^b$ -formula that agrees with it over $\\mathbb{N}$ .", + "bbox": [ + 109, + 90, + 880, + 138 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The theories introduced by Buss share a common set BASIC of finitely many axioms postulating the expected arithmetic behavior of the constants, predicates, and function symbols, e.g., $x + y = y + x$ and $|1| = 1$ (see, e.g., [Kra95, Page 68] for the complete list). The only difference among the theories is the kind of induction axiom scheme that each of them postulates.", + "bbox": [ + 111, + 142, + 880, + 210 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theory $\\mathsf{T}_2^1$ . This is a theory in the language $\\mathcal{L}_{\\mathbb{B}}$ extending BASIC by the induction axiom IND", + "bbox": [ + 111, + 229, + 805, + 247 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi (0) \\wedge \\forall x (\\varphi (x) \\rightarrow \\varphi (x + 1)) \\rightarrow \\forall x \\varphi (x)\n$$\n", + "text_format": "latex", + "bbox": [ + 338, + 258, + 656, + 277 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for all $\\Sigma_1^b$ -formulas $\\varphi(a)$ . The formula $\\varphi(a)$ may contain other free variables in addition to $a$ .", + "bbox": [ + 111, + 287, + 784, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We say that $\\mathsf{T}_2^1$ supports induction for NP predicates. Intuitively, this means that we can aim to prove a result in $\\mathsf{T}_2^1$ by induction, provided the induction hypothesis is defined by a predicate computable in NP. This definition can be extended to a theory that postulates induction for $\\Sigma_i^b$ -formulas, which gives rise to the theory $\\mathsf{T}_2^i$ .", + "bbox": [ + 109, + 321, + 880, + 391 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Theory $S_2^1$ . This is a theory in the language $\\mathcal{L}_{\\mathsf{B}}$ extending BASIC by the polynomial induction axiom PIND", + "bbox": [ + 109, + 410, + 880, + 441 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi (0) \\wedge \\forall x (\\varphi (\\lfloor x / 2 \\rfloor) \\rightarrow \\varphi (x)) \\rightarrow \\forall x \\varphi (x)\n$$\n", + "text_format": "latex", + "bbox": [ + 336, + 444, + 658, + 463 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for all $\\Sigma_1^b$ -formulas $\\varphi(a)$ . The formula $\\varphi(a)$ may contain other free variables in addition to $a$ .", + "bbox": [ + 111, + 468, + 782, + 486 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Intuitively, polynomial induction reduces the proof of $\\varphi(x)$ to proving $\\varphi(\\lfloor x/2 \\rfloor)$ . Unlike the standard induction axiom, this approach allows us to reach the base case in just $\\mathrm{poly}(n)$ steps when starting with an integer $x$ represented by $\\mathrm{poly}(n)$ bits. This has implications for the efficiency of translating certain proofs in $\\mathsf{S}_2^1$ into sequences of propositional proofs and for the extraction of polynomial-time algorithms from proofs (see Section 3.1 and Section 3.2). Analogously to $\\mathsf{T}_2^i$ , we can define the theories $\\mathsf{S}_2^i$ via polynomial induction for $\\Sigma_i^b$ -formulas.", + "bbox": [ + 109, + 503, + 880, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It is known that $\\mathsf{PV}_1$ is essentially equivalent to $\\mathsf{T}_2^0$ under an appropriate vocabulary and axioms [Jer'06], and that $\\mathsf{S}_2^i \\subseteq \\mathsf{T}_2^i \\subseteq \\mathsf{S}_2^{i+1}$ for every $i \\geq 1$ .", + "bbox": [ + 111, + 606, + 879, + 638 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When stating and proving results in $\\mathsf{S}_2^1$ , it is convenient to employ a more expressive vocabulary under which any polynomial-time function can be easily described. Moreover, it is possible to achieve this in a conservative way, i.e., without increasing the power of the theory. In more detail, let $\\Gamma$ be a set of $\\mathcal{L}_{\\mathsf{B}}$ -formulas. We say that a polynomial-time function $f\\colon \\mathbb{N}^k\\to \\mathbb{N}$ is $\\Gamma$ -definable in $\\mathsf{S}_2^1$ if there is a formula $\\psi (\\vec{x},y)\\in \\Gamma$ for which the following conditions hold:", + "bbox": [ + 111, + 641, + 880, + 726 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(i) For every $a \\in \\mathbb{N}^k$ , $f(\\vec{a}) = b$ if and only if $\\mathbb{N} \\models \\varphi(\\vec{a}, b)$ .", + "(ii) $\\mathsf{S}_2^1\\vdash \\forall \\vec{x}\\left(\\exists y\\left(\\varphi (\\vec{x},y)\\land \\forall z\\left(\\varphi (\\vec{x},z)\\to y = z\\right)\\right). \\right.$" + ], + "bbox": [ + 125, + 734, + 555, + 780 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Every function $f \\in \\mathsf{FP}$ is $\\Sigma_1^b$ -definable in $S_2^1$ . By adding all functions in $\\mathsf{FP}$ to the vocabulary of $S_2^1$ and by extending $S_2^1$ with their defining axioms (i.e., $\\forall x \\varphi(x, f(x))$ ), we obtain a theory $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ that can refer to polynomial-time predicates using quantifier-free formulas. $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ proves the polynomial induction scheme for both $\\Sigma_1^b$ -formulas and $\\Pi_1^b$ -formulas in the extended vocabulary. $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ is conservative over $S_2^1$ , in the sense that any $\\mathcal{L}_{\\mathsf{B}}$ -sentence provable in $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ is also provable in $S_2^1$ .", + "bbox": [ + 109, + 787, + 880, + 875 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 898, + 503, + 909 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A $\\forall \\Sigma_{i}^{b}$ -sentence is simply a sentence $\\psi = \\forall \\vec{x} \\varphi(\\vec{x})$ where $\\varphi \\in \\Sigma_{i}^{b}$ . Every $\\forall \\Sigma_{1}^{b}$ -sentence provable in $S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})$ is also provable in $\\mathsf{PV}_1$ . In other words, $S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})$ is $\\forall \\Sigma_{1}^{b}$ -conservative over $\\mathsf{PV}_1$ . On the other hand, it is known that if $S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}}) = \\mathsf{PV}_1$ , then the polynomial-time hierarchy collapses.", + "bbox": [ + 109, + 90, + 880, + 143 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.2.3 APC", + "text_level": 1, + "bbox": [ + 112, + 162, + 212, + 178 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In order to formalize probabilistic methods and randomized algorithms, Jeřábek [Jeř04, Jeř05, Jeř07] formulated the theory $\\mathsf{APC}_1$ (this terminology is from [BKT14]) by extending $\\mathsf{PV}_1$ with the dual Weak Pigeonhole Principle (dWPHP) for $\\mathsf{PV}_1$ functions:", + "bbox": [ + 109, + 189, + 883, + 241 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {A P C} _ {1} \\triangleq \\mathsf {P V} _ {1} \\cup \\{\\mathsf {d W P H P} (f) \\mid f \\in \\mathcal {L} _ {\\mathsf {P V}} \\}.\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 253, + 653, + 272 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Informally, each sentence $\\mathrm{dWPHP}(f)$ postulates that, for every length $n = |N|$ , there is $y < (1 + 1/n) \\cdot N$ such that $f(x) \\neq y$ for every $x < N$ .", + "bbox": [ + 109, + 285, + 883, + 318 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "It is known that the dual Weak Pigeonhole Principle for polynomial-time predicates can be proved in $\\mathsf{T}_2^2$ [MPW02], and consequently $\\mathsf{APC}_1 \\subseteq \\mathsf{T}_2^2(\\mathcal{L}_{\\mathsf{PV}})$ .", + "bbox": [ + 111, + 319, + 883, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Auxiliary Definitions and Results", + "text_level": 1, + "bbox": [ + 111, + 378, + 485, + 398 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Witnessing Theorems", + "text_level": 1, + "bbox": [ + 112, + 412, + 344, + 430 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Suppose a sentence $\\psi$ of a certain syntactic form admits a proof in a theory $T$ over a vocabulary $\\mathcal{L}$ . A witnessing theorem allows us to extract computational information from any such proof, by showing that an existential quantifier in $\\psi$ can be witnessed by $\\mathcal{L}$ -terms. The simplest example of such a result is stated next.", + "bbox": [ + 109, + 439, + 883, + 491 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3.1 (Herbrand's Theorem (see, e.g., [Bus94, McK10])). Let $T$ be a universal theory over a vocabulary $\\mathcal{L}$ . Let $\\varphi(x,y)$ be a quantifier-free $\\mathcal{L}$ -formula, and suppose that $T \\vdash \\forall x \\exists y \\varphi(x,y)$ . There is a constant $k \\geq 1$ and $\\mathcal{L}$ -terms $t_1(x),\\ldots ,t_k(x)$ such that", + "bbox": [ + 109, + 502, + 883, + 554 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nT \\vdash \\varphi (x, t _ {1} (x)) \\lor \\varphi (x, t _ {2} (x)) \\lor \\dots \\lor \\varphi (x, t _ {k} (x)).\n$$\n", + "text_format": "latex", + "bbox": [ + 308, + 566, + 684, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As an immediate consequence, if we apply Theorem 3.1 to $T \\triangleq \\mathrm{PV}_1$ , we obtain $\\mathcal{L}_{\\mathrm{PV}}$ -terms (corresponding to polynomial-time functions over $\\mathbb{N}$ ) such that, given $a \\in \\mathbb{N}$ , at least one of them produces a witness $b \\in \\mathbb{N}$ such that $\\mathbb{N} \\models \\varphi(a, b)$ .", + "bbox": [ + 109, + 598, + 883, + 648 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Next, we consider the provability of more complex sentences in a universal theory.", + "bbox": [ + 138, + 650, + 736, + 667 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3.2 (KPT Theorem [KPT91]). Let $T$ be a universal theory with vocabulary $\\mathcal{L}$ , $\\varphi(w, u, v)$ be a quantifier-free $\\mathcal{L}$ -formula, and suppose that $T \\vdash \\forall w \\exists u \\forall v \\varphi(w, u, v)$ . Then there exist a constant $k \\geq 1$ and $\\mathcal{L}$ -terms $t_1, \\ldots, t_k$ such that", + "bbox": [ + 109, + 678, + 883, + 729 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nT \\vdash \\varphi (w, t _ {1} (w), v _ {1}) \\vee \\varphi (w, t _ {2} (w, v _ {1}), v _ {2}) \\vee \\dots \\vee \\varphi (w, t _ {k} (w, v _ {1}, \\dots , v _ {k - 1}), v _ {k}),\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 742, + 795, + 761 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where the notation $t_i(w, v_1, \\ldots, v_{i-1})$ indicates that these are the only variables occurring in $t_i$ .", + "bbox": [ + 111, + 773, + 805, + 791 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Theorem 3.2 has a natural interpretation as an interactive game with finitely many rounds, which we revisit in Section 5.1.1 in the context of the provability of circuit upper bounds.", + "bbox": [ + 109, + 803, + 883, + 837 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3The dWPHP axiom scheme is also referred to as the surjective Weak Pigeonhole Principle in some references.", + "bbox": [ + 132, + 845, + 794, + 861 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 898, + 503, + 909 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A similar form of Theorem 3.2 holds under the provability of a $\\forall \\exists \\forall \\exists$ -sentence (see, e.g., $\\mathrm{[CKK^{+}24]}$ for a concrete application in the context of circuit lower bounds). In contrast, there is no straightforward analogue of the KPT Theorem for a larger number of quantifier alternations. In this case, more general formulations are needed, such as the ones considered in [Pud06, BKT14, LO23].", + "bbox": [ + 109, + 90, + 880, + 157 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "It is also possible to establish witnessing theorems for theories that are not universal. This can be done either by first transforming the theory into a universal theory through the inclusion of new function symbols and quantifier elimination, or via direct approaches (see, e.g., [Kra95, Section 7.3]). Another example is Buss's Theorem for $S_2^1$ , which can be used to show that every $\\forall \\Sigma_1^b$ -sentence provable in $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ is also provable in $\\mathsf{PV}_1$ . This has two implications. First, we can combine this result with Theorem 3.1, which yields polynomial-time algorithms from proofs of $\\forall \\Sigma_1^b$ -sentences in $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ . Second, this means that in some situations we can establish the provability of a sentence in $\\mathsf{PV}_1$ using the more convenient theory $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ (see Section 4.2 for an example).", + "bbox": [ + 109, + 160, + 883, + 297 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.2 Bounded Arithmetic and Propositional Proofs", + "text_level": 1, + "bbox": [ + 109, + 316, + 545, + 333 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we explain a connection between $\\mathsf{PV}_1$ and the extended Frege proof system discovered by [Coo75]. In short, it says that if a universal $\\mathcal{L}_{\\mathsf{PV}}$ -sentence $\\phi(x)$ is provable in $\\mathsf{PV}_1$ , then there is a translation of $\\phi(x)$ into a sequence $\\{G_n\\}_{n \\geq 1}$ of propositional formulas $G_n(p_1, \\ldots, p_n)$ such that each $G_n$ has an extended Frege proof $\\pi_n$ of size polynomial in $n$ .4", + "bbox": [ + 109, + 343, + 883, + 411 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "First, we review some concepts and fix notation, deferring the details to a standard textbook (e.g., [Kra19]). Recall that a propositional formula $G(p_{1},\\ldots ,p_{n})$ is formed using variables $p_1,\\dots ,p_n$ , constants 0 and 1, and logical connectives $\\land ,\\lor$ , and $\\neg$ . A Frege $(\\mathcal{F})$ proof system is a \"textbook\" style proof system for propositional logic. It can be formulated as a finite set of axiom schemes together with the modus ponens rule. $\\mathcal{F}$ is known to be sound and complete. The size of a Frege proof is the total number of symbols occurring in the proof. In the extended Frege $(e\\mathcal{F})$ proof system, we also allow repeated subformulas appearing in a proof to be abbreviated via new variables.", + "bbox": [ + 109, + 412, + 883, + 532 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Cook's Translation [Coo75]. Let $\\varphi$ be a universal $\\mathcal{L}_{\\mathrm{PV}}$ -sentence of the form $\\varphi \\triangleq \\forall x \\psi(x)$ , where $\\psi(x)$ is a quantifier-free formula. Cook [Coo75] established that if $\\varphi$ is provable in $\\mathrm{PV}_1$ , then there is a sequence $\\{G_n\\}_{n \\geq 1}$ of propositional tautologies such that", + "bbox": [ + 109, + 550, + 883, + 603 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Each $G_{n}(p_{1},\\ldots ,p_{n})$ is a polynomial-size formula.", + "- $G_{n}$ encodes that $\\psi(x)$ is true whenever $|x| \\leq n$ , i.e., over all integers encoded as $n$ -bit strings.", + "- $G_{n}$ admits polynomial-size $e\\mathcal{F}$ -proofs.", + "- Moreover, the existence of polynomial-size $e\\mathcal{F}$ -proofs for each $G_{n}$ is provable in $\\mathrm{PV}_1$ . (We will need this additional property of the translation in Section 5.2.2.)" + ], + "bbox": [ + 137, + 609, + 879, + 728 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For a formula $\\psi(x)$ as above, we often write $||\\psi||_n$ to denote the corresponding propositional formula over inputs of length $n$ .", + "bbox": [ + 109, + 737, + 880, + 771 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For more information about the relation between proofs in bounded arithmetic and propositional proofs, including additional examples of propositional translations, we refer to [Bey09, Kra19].", + "bbox": [ + 109, + 787, + 880, + 821 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "Conceptually, this is analogous to the translation of a polynomial-time Turing machine $M$ into a sequence $\\{C_n\\}_{n\\geq 1}$ of polynomial-size Boolean circuits, one for each input length $n$ .", + "bbox": [ + 109, + 830, + 883, + 859 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "5We note that $G_{n}(p_{1},\\ldots ,p_{n})$ might contain auxiliary variables beyond $p_1,\\dots ,p_n$ .", + "bbox": [ + 133, + 859, + 630, + 875 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 898, + 503, + 909 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Cuts of Models of Bounded Arithmetic", + "text_level": 1, + "bbox": [ + 112, + 90, + 486, + 107 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Many fundamental results in bounded arithmetic are established using model-theoretic techniques (see, e.g., the exposition of Parikh's Theorem in [Kra95]). We will provide an example in Section 5.2.2. In this section, we include the required background for the result. We assume basic familiarity with model theory.", + "bbox": [ + 111, + 118, + 883, + 167 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While the definitions and results presented below can be adapted to other theories of bounded arithmetic, we focus on the theory $S_2^1$ for concreteness.", + "bbox": [ + 111, + 169, + 883, + 204 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Definition 3.3 (Cut in a Model of Arithmetic). A cut in a model $M$ of $\\mathsf{S}_2^1$ is a nonempty set $I \\subseteq M$ such that:", + "bbox": [ + 111, + 210, + 883, + 244 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. For every $a, b \\in M$ , if $b \\in I$ and $a < b$ then $a \\in I$ .", + "2. For every $a \\in M$ , if $a \\in I$ then $a + 1 \\in I$ ." + ], + "bbox": [ + 133, + 255, + 524, + 299 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this case, we write $I \\subseteq_{e} M$ .", + "bbox": [ + 112, + 308, + 336, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Note that a cut is not necessarily closed under operations such as addition and multiplication.", + "bbox": [ + 138, + 334, + 808, + 351 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Claim 3.4. Let $M$ be a model of $S_2^1$ , and let $I \\subseteq_e M$ . Moreover, assume that $I$ is closed under $+, \\cdot$ , and # operations. Let $\\varphi(a, \\vec{b})$ be a bounded formula with all free variables displayed. Let $\\vec{v}$ be elements of $I$ . Then for every $u \\in I$ ,", + "bbox": [ + 111, + 359, + 885, + 412 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nI \\vDash \\varphi (u, \\vec {v}) \\quad \\Longleftrightarrow \\quad M \\vDash \\varphi (u, \\vec {v}).\n$$\n", + "text_format": "latex", + "bbox": [ + 359, + 412, + 635, + 431 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Claim 3.4 can be proved by induction on the complexity of $\\varphi$ . Using the claim, one can establish the following lemma.", + "bbox": [ + 111, + 439, + 883, + 472 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Lemma 3.5. Let $M$ be a model of $\\mathsf{S}_2^1$ , and let $I \\subseteq_{e} M$ . Moreover, assume that $I$ is closed under $+, \\cdot,$ and $\\#$ operations. Then $I$ is a model of $\\mathsf{S}_2^1$ .", + "bbox": [ + 111, + 481, + 883, + 517 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Since it is not hard to check that a cut $I$ as above satisfies the BASIC axioms of $S_2^1$ , the proof of Lemma 3.5 essentially amounts to verifying that $I$ satisfies the corresponding induction principle (see, e.g., [Kra95, Lemma 5.1.3] for a similar argument).", + "bbox": [ + 111, + 525, + 883, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For a model $M$ , we say that $n \\in M$ is a length if there is $N \\in M$ such that $n = |N|$ .", + "bbox": [ + 138, + 575, + 750, + 594 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Lemma 3.6. Let $M_0$ be a nonstandard countable model of $\\mathsf{S}_2^1$ . Then there is a (countable) cut $M$ of $M_0$ that is a model of $\\mathsf{S}_2^1$ and a length $n \\in M$ , where $n = |e|$ for some nonstandard $e \\in M$ , for which the following holds. For every $b \\in M$ there is a standard number $k$ such that $M \\models |b| \\leq n^k$ .", + "bbox": [ + 111, + 602, + 883, + 654 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Proof. Let $e \\in M_0$ be nonstandard, and let $n \\triangleq |e|$ . Consider the set", + "bbox": [ + 111, + 662, + 607, + 679 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\nI _ {e} \\triangleq \\left\\{a \\in M _ {0} \\mid a \\leq t (e) \\text {f o r s o m e} \\mathcal {L} _ {\\mathrm {B}} \\text {- t e r m} t (x) \\right\\},\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 689, + 681, + 709 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where we compare elements with respect to the interpretation of the relation symbol $\\leq$ in $M_0$ . Note that $I_e$ is a cut of $M_0$ and $e \\in I_e$ . Moreover, it is not hard to check that it is closed under addition, multiplication, and smash operations. By Lemma 3.5, $I_e$ is a model of $\\mathbb{S}_2^1$ . Finally, by construction, for every $b \\in I_e$ we have $b \\leq t(e)$ for some $\\mathcal{L}_{\\mathsf{B}}$ -term $t$ . A simple induction on the structure of $t$ shows the existence of a standard number $k$ such that $|b| \\leq n^k$ in $I_e$ .", + "bbox": [ + 111, + 718, + 883, + 804 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Finally, we will need the following definition.", + "bbox": [ + 138, + 815, + 470, + 830 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Definition 3.7 (Cofinal extension). We say that an extension $M'$ of a model $M$ is cofinal (or $M$ is cofinal in $M'$ ) if for every $a \\in M'$ there is $b \\in M$ such that $a \\leq b$ in $M'$ . If this is the case, we write $M' \\supseteq_{\\mathrm{cf}} M$ .", + "bbox": [ + 111, + 840, + 883, + 875 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 898, + 504, + 909 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 The Strength of Bounded Arithmetic", + "text_level": 1, + "bbox": [ + 111, + 88, + 522, + 108 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In connection with the fundamental research goal mentioned in Section 1, research on the provability of complexity bounds has achieved significant progress on two complementary fronts: the formalization of several established results from algorithms and complexity within theories of bounded arithmetic, and the unprovability of complexity bounds in the same theories, often conditional on a computational assumption.", + "bbox": [ + 109, + 122, + 883, + 190 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In Section 4.1, we explore what it means to formalize results from algorithms and complexity theory within the framework of bounded arithmetic, highlighting some of the nuances involved. In Section 4.2, we present some concrete details of the formalization of a formula lower bound in $\\mathsf{PV}_1$ .", + "bbox": [ + 109, + 191, + 883, + 241 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Formalization of Results from Algorithms and Complexity", + "text_level": 1, + "bbox": [ + 111, + 262, + 650, + 280 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Several central theorems from mathematics and computer science can be proved in bounded arithmetic. They include results from number theory [Woo81, PWW88], graph theory and extremal combinatorics [Oja04], randomized algorithms and probabilistic arguments [Jer05, LC11, Lé14], probabilistic checkable proofs [Pic15b], circuit lower bounds [MP20], expander graphs [BKKK20], linear algebra [TC21], Zhuk's CSP algorithm [Gay23, Gay24], etc. The reader can find numerous other examples in [CN10, Kra19, MP20] and references therein.", + "bbox": [ + 109, + 289, + 883, + 390 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In some cases, the formalization of an existing result in bounded arithmetic is straightforward, specially once an appropriate framework has been developed (e.g., the approximate counting framework of [Jér07], which enables the use of tools from probability theory in $\\mathsf{APC}_1$ ). However, sometimes one needs to discover a new proof whose concepts can be defined in the theory and their associated properties established using the available inductive axioms (e.g., Razborov's formalization of the Switching Lemma [Raz95a]).", + "bbox": [ + 109, + 392, + 883, + 477 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We provide two instructive examples below. The first is a consequence of the formalization of the PCP Theorem in $\\mathsf{PV}_1$ , while the second concerns different ways of formulating a circuit lower bound statement in bounded arithmetic.", + "bbox": [ + 109, + 478, + 883, + 527 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The PCP Theorem in $\\mathsf{PV}_1$ . Pich [Pic15b] proved the PCP Theorem in $\\mathsf{PV}_1$ by formalizing Dinur's proof [Din07]. Exploiting the standard connection between PCPs and hardness of approximation, Pich's result can be used to show that $\\mathsf{PV}_1$ establishes the NP-hardness of approximating the value of a $k$ -SAT instance. This means in particular that, for a suitable $\\mathcal{L}_{\\mathsf{PV}}$ -function symbol $f$ obtained from Dinur's argument, $\\mathsf{PV}_1$ proves that $f$ is a gap-inducing reduction from the Boolean Formula Satisfiability Problem to $k$ -SAT (for a sufficiently large $k$ ):", + "bbox": [ + 109, + 549, + 883, + 652 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathrm {P V} _ {1} \\vdash \\forall \\varphi \\left(\\operatorname {F l a} (\\varphi) \\wedge \\exists y \\operatorname {S a t} (\\varphi , y) \\rightarrow k - C N F (f (\\varphi)) \\wedge \\exists z \\operatorname {S a t} (f (\\varphi), z)\\right) \\\\ \\mathrm {P V} _ {1} \\vdash \\forall \\varphi \\left(\\operatorname {F l a} (\\varphi) \\wedge \\forall y \\neg \\operatorname {S a t} (\\varphi , y) \\rightarrow k - \\operatorname {C N F} (f (\\varphi)) \\wedge \\forall z \\operatorname {V a l u e} _ {\\leq 1 - \\delta} (f (\\varphi), z)\\right) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 191, + 662, + 799, + 722 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "where all the expressions are quantifier-free $\\mathcal{L}_{\\mathrm{PV}}$ -formulas: $\\mathsf{Fla}(x)$ checks if $x$ is a valid description of a Boolean formula, $k$ -CNF(x) checks if $x$ is a valid description of a $k$ -CNF, $\\mathsf{Sat}(u,v)$ checks if $v$ is a satisfying assignment for $u$ , and $\\mathsf{Value}_{\\leq 1 - \\delta}(u,v)$ holds if $v$ satisfies at most a $(1 - \\delta)$ -fraction of the clauses in $u$ (with $\\delta > 0$ being a universal constant from the formalized Dinur's proof).", + "bbox": [ + 109, + 732, + 883, + 800 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In the formalization the key point is that $\\mathsf{PV}_1$ proves that the function symbol $f$ behaves as expected. In practice, in order to achieve this, a typical formalization is presented in a semi-formal way, and might claim on a few occasions that some algorithm $f_1$ constructed in a particular way from another algorithm $f_2$ can be defined in $\\mathsf{PV}_1$ . This means that $\\mathsf{PV}_1$ proves that $f_1$ behaves as described in the definition.", + "bbox": [ + 111, + 801, + 883, + 869 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 898, + 503, + 909 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This is possible thanks to Cobham's characterization of FP and the axioms of $\\mathrm{PV}_1$ , which ensure that the theory \"understands\" how different algorithms are constructed from one another. In many cases, the verification that $\\mathrm{PV}_1$ proves the desired properties is straightforward but tedious, requiring some initial setup of basic capabilities of $\\mathrm{PV}_1$ (often referred to as \"bootstrapping\") which is part of the standard background in bounded arithmetic.", + "bbox": [ + 109, + 90, + 887, + 176 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Circuit Lower Bound Statements. We discuss two ways of formalizing a complexity lower bound. In this example, for a given size bound $s(n)$ (e.g., $s(n) = n^2$ ), we consider an $\\mathcal{L}_{\\mathrm{PV}}$ -sentence $\\mathsf{FLB}_s^\\oplus$ stating that Boolean formulas for the parity function on $n$ bits require at least $s(n)$ leaves:", + "bbox": [ + 109, + 196, + 883, + 250 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\forall N \\forall n \\forall F (n = | N | \\wedge n \\geq 1 \\wedge \\mathsf {F l a} (F) \\wedge \\mathsf {S i z e} (F) < s (n) \\rightarrow \\exists x (| x | \\leq n \\wedge \\mathsf {E v a l} (F, x) \\neq \\oplus (x)),\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 261, + 849, + 280 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where we identify $n$ -bit strings with natural numbers of length at most $n$ , and employ a well-behaved $\\mathcal{L}_{\\mathrm{PV}}$ -function symbol $\\oplus$ such that $\\mathrm{PV}_1$ proves the basic properties of the parity function, e.g., $\\mathrm{PV}_1 \\vdash \\oplus (x1) = 1 - \\oplus (x)$ .6", + "bbox": [ + 109, + 292, + 883, + 344 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Note that $\\mathsf{FLB}_s^\\oplus$ is a $\\forall \\Sigma_1^b$ -sentence. Consequently, if $\\mathsf{PV}_1 \\vdash \\mathsf{FLB}_s^\\oplus$ , we obtain via Herbrand's Theorem (Theorem 3.1) a polynomial-time algorithm $A$ that, when given $N$ of length $n$ and the description of an $n$ -bit formula $F$ of size $< s(n)$ , $A(N,F)$ outputs a string $x \\in \\{0,1\\}^n$ such that $F(x) \\neq \\oplus(x)$ . In other words, circuit lower bounds provable in $\\mathsf{PV}_1$ are constructive in the sense that they also provide an efficient refuter witnessing that $F$ does not compute parity (see [CJSW21] for more on this topic).", + "bbox": [ + 109, + 345, + 883, + 430 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The aforementioned formalization is informally referred to as a \"Log\" formalization of circuit lower bounds. This is because the main parameter $n$ is the length of a variable $N$ and all objects quantified over are of length polynomial in $n$ . It is also possible to consider a formalization where $n = ||N||$ ( $n$ is the length of the length of $N$ ), which is known as a \"LogLog\" formalization. This allows us to quantify over exponentially larger objects, e.g., under such a formalization the entire truth-table of a formula $F$ has length polynomial in the length of $N$ .", + "bbox": [ + 109, + 430, + 883, + 532 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Obtaining a Log formalization (e.g., [MP20]) is a stronger result than obtaining a LogLog formalization (e.g., [Raz95a]). In particular, in contrast to the discussion above, a witnessing theorem applied to a LogLog formalization provides a refuter with access to $N$ and thus running in time $\\mathrm{poly}(N) = \\mathrm{poly}(2^n)$ . Conversely, the unprovability of a LogLog circuit lower bound statement (e.g., [PS21, LO23]) is a stronger result than the unprovability of a Log statement. We refer to the introduction of [MP20] for a more extensive discussion on this matter.", + "bbox": [ + 109, + 532, + 883, + 633 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in $\\mathsf{PV}_1$", + "text_level": 1, + "bbox": [ + 109, + 655, + 723, + 672 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this section, we explore some details of a formalization in $\\mathrm{PV}_1$ that the parity function $\\oplus$ on $n$ bits requires Boolean formulas of size $\\geq n^{3/2}$ [Sub61]. We follow the notation introduced in Section 4.1.", + "bbox": [ + 109, + 683, + 883, + 715 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\text {T h e o r m 4 . 1} \\left(\\left[ C K K ^ {+} 2 4 \\right]\\right). L e t s (n) \\triangleq n ^ {3 / 2}. T h e n P V _ {1} \\vdash F L B _ {s} ^ {\\oplus}.\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 726, + 586, + 744 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The formalization is an adaptation of the argument presented in [Juk12, Section 6.3], which proceeds as follows:", + "bbox": [ + 109, + 756, + 883, + 789 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "1. [Juk12, Lemma 6.8]: For any formula $F$ on $n$ -bit inputs, it is possible to fix one of its variables so that the resulting formula $F_{1}$ satisfies $\\mathrm{Size}(F_1) \\leq (1 - 1 / n)^{3 / 2} \\cdot \\mathrm{Size}(F)$ .", + "bbox": [ + 133, + 801, + 883, + 837 + ], + "page_idx": 9 + }, + { + "type": "page_footnote", + "text": "We often abuse notation and treat $x$ as a string in semi-formal discussions.", + "bbox": [ + 132, + 845, + 580, + 861 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 898, + 509, + 909 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "2. [Juk12, Theorem 6.10]: If we apply this result $\\ell \\triangleq n - k$ times, we obtain a formula $F_{\\ell}$ on $k$ -bit inputs such that", + "bbox": [ + 133, + 90, + 879, + 125 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S i z e} (F _ {\\ell}) \\leq \\operatorname {S i z e} (F) \\cdot (1 - 1 / n) ^ {3 / 2} \\cdot (1 - 1 / (n - 1)) ^ {3 / 2} \\dots (1 - 1 / (k + 1)) ^ {3 / 2} = \\operatorname {S i z e} (F) \\cdot (k / n) ^ {3 / 2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 155, + 136, + 880, + 157 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3. [Juk12, Example 6.11]: Finally, if the initial formula $F$ computes the parity function, by setting $\\ell = n - 1$ we get $1 \\leq \\operatorname{Size}(F_{\\ell}) \\leq (1/n)^{3/2} \\cdot \\operatorname{Size}(F)$ , and consequently $\\operatorname{Size}(F) \\geq n^{3/2}$ .", + "bbox": [ + 133, + 176, + 879, + 210 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We present the argument in a more constructive way when formalizing the result in $\\mathrm{PV}_1$ . In more detail, given a small formula $F$ , we recursively construct (and establish correctness by induction) an $n$ -bit input $y$ witnessing that $F$ does not compute the parity function.", + "bbox": [ + 109, + 220, + 880, + 272 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Proof. We follow closely the presentation from $\\left[\\mathrm{CKK}^{+}24\\right]$ . For brevity, we only discuss the formalization of the main inductive argument. More details can be found in $\\left[\\mathrm{CKK}^{+}24\\right]$ . Given $b \\in \\{0,1\\}$ , we introduce the function $\\oplus^b(x) \\triangleq \\oplus(x) + b \\pmod{2}$ . In order to prove $\\mathsf{FLB}_s^\\oplus$ in $\\mathsf{PV}_1$ , we explicitly consider a polynomial-time function $R(1^n, F, b)$ with the following property:", + "bbox": [ + 109, + 284, + 880, + 352 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "If $\\operatorname{Size}(F) < s(n)$ then $R(1^n, F, b)$ outputs an $n$ -bit string $y_n^b$ such that $\\operatorname{Eval}(F, y_n^b) \\neq \\oplus^b(y_n^b)$ .", + "bbox": [ + 155, + 359, + 834, + 378 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In other words, $R(1^n,F,b)$ witnesses that the formula $F$ does not compute the function $\\oplus^b$ over $n$ -bit strings. Note that the correctness of $R$ is captured by a sentence $\\operatorname{Ref}_{R,s}$ described as follows:", + "bbox": [ + 109, + 383, + 879, + 417 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\forall 1 ^ {n} \\forall F (\\mathsf {F l a} (F) \\wedge \\mathsf {S i z e} (F) < s (n) \\rightarrow | y _ {n} ^ {0} | _ {\\ell} = | y _ {n} ^ {1} | _ {\\ell} = n \\wedge F (y _ {n} ^ {0}) \\neq \\oplus^ {0} (y _ {n} ^ {0}) \\wedge F (y _ {n} ^ {1}) \\neq \\oplus^ {1} (y _ {n} ^ {1}))\n$$\n", + "text_format": "latex", + "bbox": [ + 140, + 430, + 849, + 450 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where we employ the abbreviations $y_{n}^{0} \\triangleq R(1^{n}, F, 0)$ and $y_{n}^{1} \\triangleq R(1^{n}, F, 1)$ , and for convenience use $|z|_{\\ell}$ to denote the bitlength of $z$ . Our plan is to define $R$ and show that $\\mathsf{PV}_1 \\vdash \\mathsf{Ref}_{R,s}$ . Note that this implies $\\mathsf{FLB}_s^{\\oplus}$ in $\\mathsf{PV}_1$ by standard first-order logic reasoning.", + "bbox": [ + 109, + 462, + 879, + 513 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The correctness of $R(1^n, F, b)$ will be established by polynomial induction on $N$ (equivalently, induction on $n = |N|$ ). Since $\\operatorname{Ref}_{R,s}$ is a universal sentence and $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ is $\\forall \\Sigma_1^b$ -conservative over $\\mathsf{PV}_1$ (i.e., provability of such a sentence in $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ implies its provability in $\\mathsf{PV}_1$ ), it is sufficient to describe a formalization in the more convenient theory $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ . For this reason, polynomial induction for NP and coNP predicates (admissible in $S_2^1(\\mathcal{L}_{\\mathsf{PV}})$ ; see, e.g., [Kra95, Section 5.2]) is available during the formalization. More details follow.", + "bbox": [ + 109, + 515, + 879, + 613 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The procedure $R(1^n, F, b)$ makes use of a few polynomial-time sub-routines (briefly discussed in the comments in the pseudocode below) and is defined in the following way:", + "bbox": [ + 109, + 618, + 879, + 651 + ], + "page_idx": 10 + }, + { + "type": "page_footnote", + "text": "7 Actually, for technical reasons related to the induction step, we will simultaneously construct an $n$ -bit input $y_{n}^{0}$ witnessing that $F$ does not compute the parity function and an $n$ -bit input $y_{n}^{1}$ witnessing that $F$ does not compute the negation of the parity function.", + "bbox": [ + 111, + 674, + 879, + 714 + ], + "page_idx": 10 + }, + { + "type": "page_footnote", + "text": "For convenience, we often write $1^n$ instead of explicitly considering parameters $N$ and $n = |N|$ . In practice, it means that $R$ gets as input $N$ (together with other parameters) but with respect to $N$ it only depends on $n = |N|$ .", + "bbox": [ + 112, + 717, + 880, + 744 + ], + "page_idx": 10 + }, + { + "type": "page_footnote", + "text": "${}^{9}$ Similarly,the notation ${\\forall 1}^{n}$ denotes $\\forall N\\forall n$ but we add the condition that $n = \\left| N\\right|$ in the subsequent formula. We might also write just $F\\left( x\\right)$ instead of $\\operatorname{Eval}\\left( {F,x}\\right)$", + "bbox": [ + 114, + 746, + 880, + 773 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 897, + 506, + 907 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Input: $1^n$ for some $n \\geq 1$ , formula $F$ over $n$ -bit inputs, $b \\in \\{0,1\\}$ .", + "bbox": [ + 140, + 95, + 624, + 112 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Let $s(n) \\triangleq n^{3/2}$ . If $\\operatorname{Size}(F) \\geq s(n)$ or $\\neg \\mathsf{Fla}(F)$ return \"error\";", + "2 If $\\operatorname{Size}(F) = 0$ , $F$ computes a constant function $b_{F} \\in \\{0,1\\}$ . In this case, return the $n$ -bit string $y_{n}^{b} \\triangleq y_{1}^{b} 0^{n-1}$ such that $\\oplus^{b}(y_{1}^{b} 0^{n-1}) \\neq b_{F}$ ;", + "3 Let $\\widetilde{F} \\triangleq \\text{Normalize}(1^n, F)$ ; // $\\widetilde{F}$ satisfies the conditions in the proof of [Juk12, Claim 6.9], $\\text{Size}(\\widetilde{F}) \\leq \\text{Size}(F)$ , $\\forall x \\in \\{0, 1\\}^n F(x) = \\widetilde{F}(x)$ .", + "4 Let $\\rho \\triangleq \\text{Find-Restriction}(1^n, \\widetilde{F})$ , where $\\rho: [n] \\to \\{0, 1, \\star\\}$ and $|\\rho^{-1}(\\star)| = n - 1$ ; // $\\rho$ restricts a suitable variable $x_i$ to a bit $c_i$ , as in [Juk12, Lemma 6.8].", + "5 Let $F' \\triangleq \\text{Apply-Restriction}(1^n, \\widetilde{F}, \\rho)$ . Moreover, let $b' \\triangleq b \\oplus c_i$ and $n' \\triangleq n - 1$ ; // $F'$ is an $n'$ -bit formula; $\\forall z \\in \\{0, 1\\}^{\\rho^{-1}(\\star)} F'(z) = \\widetilde{F}(z \\cup x_i \\mapsto c_i)$ .", + "6 Let $y_{n'}^{b'} \\triangleq R(1^{n'}, F', b')$ and return the $n$ -bit string $y_n^b \\triangleq y_{n'}^{b'} \\cup y_i \\mapsto c_i$ ;" + ], + "bbox": [ + 125, + 112, + 839, + 339 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Algorithm 1: Refuter Algorithm $R(1^n, F, b)$ [CKK+24].", + "bbox": [ + 263, + 343, + 676, + 359 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "(The pseudocode presented above is only an informal specification of $R(1^n, F, b)$ . As mentioned in Section 4.1, a completely formal proof in $\\mathsf{PV}_1$ would employ Cobham's formalism and would specify how $R(1^n, F, b)$ can be defined from previously defined algorithms (e.g., Apply-Restriction) via the allowed operations.)", + "bbox": [ + 111, + 369, + 883, + 436 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We note that $R(1^n, F, b)$ runs in time polynomial in $n + |F| + |b|$ and that it is definable in $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})$ . Next, as an instructive example, we establish the correctness $R(1^n, F, b)$ in $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})$ by polynomial induction (PIND) for $\\Pi_1^b$ -formulas, assuming that the subroutines appearing in the pseudocode of $R(1^n, F, b)$ satisfy the necessary properties (provably in $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})$ ).", + "bbox": [ + 111, + 436, + 883, + 506 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Lemma 4.2. Let $s(n) \\triangleq n^{3/2}$ . Then $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\mathsf{Ref}_{R,s}$ .", + "bbox": [ + 112, + 512, + 516, + 531 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Proof. We consider the formula $\\varphi(N)$ defined as", + "bbox": [ + 112, + 542, + 470, + 558 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\forall F \\forall n (n = | N | \\wedge n \\geq 1 \\wedge \\operatorname {F l a} (F) \\wedge \\operatorname {S i z e} (F) < s (n)) \\rightarrow \\\\ \\left(\\left| y _ {n} ^ {0} \\right| _ {\\ell} = \\left| y _ {n} ^ {1} \\right| _ {\\ell} = n \\wedge F \\left(y _ {n} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {n} ^ {0}\\right) \\wedge F \\left(y _ {n} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {n} ^ {1}\\right)\\right), \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 571, + 805, + 622 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where as before we use $y_{n}^{0} \\triangleq R(1^{n}, F, 0)$ and $y_{n}^{1} \\triangleq R(1^{n}, F, 1)$ . Note that $\\varphi(N)$ is a $\\Pi_1^b$ -formula. Below, we argue that", + "bbox": [ + 111, + 628, + 882, + 662 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {S} _ {2} ^ {1} (\\mathcal {L} _ {\\mathsf {P V}}) \\vdash \\varphi (1) \\quad \\text {a n d} \\quad \\mathsf {S} _ {2} ^ {1} (\\mathcal {L} _ {\\mathsf {P V}}) \\vdash \\forall N \\varphi (\\lfloor N / 2 \\rfloor) \\rightarrow \\varphi (N).\n$$\n", + "text_format": "latex", + "bbox": [ + 271, + 662, + 720, + 681 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Then, by polynomial induction for $\\Pi_1^b$ -formulas (available in $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})$ ) and using that $\\varphi(0)$ trivially holds, it follows that $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\forall N \\varphi(N)$ . In turn, this yields $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\mathsf{Ref}_{R,s}$ .", + "bbox": [ + 111, + 688, + 883, + 724 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Base Case: $\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathrm{PV}})\\vdash \\varphi (1)$ . In this case, for a given formula $F$ and length $n$ , the hypothesis of $\\varphi (1)$ is satisfied only if $n = 1$ , $F$ is a valid description of a formula, and $\\operatorname {Size}(F) = 0$ . Let $y_1^0\\triangleq R(1,F,0)$ and $y_{1}^{1}\\triangleq R(1,F,1)$ . We need to prove that", + "bbox": [ + 111, + 742, + 883, + 795 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\left| y _ {1} ^ {0} \\right| _ {\\ell} = \\left| y _ {1} ^ {1} \\right| _ {\\ell} = 1 \\wedge F \\left(y _ {1} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {1} ^ {0}\\right) \\wedge F \\left(y _ {1} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {1} ^ {1}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 805, + 705, + 825 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Since $n = 1$ and $\\mathrm{Size}(F) = 0$ , $F$ evaluates to a constant $b_{F}$ on every input bit. The statement above is implied by Line 2 in the definition of $R(n,F,b)$ .", + "bbox": [ + 111, + 838, + 883, + 873 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "(Polynomial) Induction Step: $\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})\\vdash \\forall N\\varphi (\\lfloor N / 2\\rfloor)\\to \\varphi (N)$ . Fix an arbitrary $N$ , let $n\\triangleq |N|$ , and assume that $\\varphi (\\lfloor N / 2\\rfloor)$ holds. By the induction hypothesis, for every valid formula $F^{\\prime}$ with $\\mathrm{Size}(F^{\\prime}) < n'^{3 / 2}$ , where $n^\\prime \\triangleq n - 1$ , we have", + "bbox": [ + 111, + 90, + 880, + 140 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\left| y _ {n ^ {\\prime}} ^ {0} \\right| _ {\\ell} = \\left| y _ {n ^ {\\prime}} ^ {1} \\right| _ {\\ell} = n ^ {\\prime} \\wedge F ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {n ^ {\\prime}} ^ {0}\\right) \\wedge F ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {n ^ {\\prime}} ^ {1}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 254, + 152, + 880, + 172 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $y_{n^{\\prime}}^{0}\\triangleq R(1^{n^{\\prime}},F^{\\prime},0)$ and $y_{n^{\\prime}}^{1}\\triangleq R(1^{n^{\\prime}},F^{\\prime},1)$", + "bbox": [ + 111, + 181, + 483, + 200 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Now let $n \\geq 2$ , and let $F$ be a valid description of a formula over $n$ -bit inputs with $\\mathrm{Size}(F) < n^{3/2}$ . By the size bound on $F$ , $R(1^n, F, b)$ ignores Line 1. If $\\mathrm{Size}(F) = 0$ , then similarly to the base case it is trivial to check that the conclusion of $\\varphi(N)$ holds. Therefore, we assume that $\\mathrm{Size}(F) \\geq 1$ and $R(1^n, F, b)$ does not stop at Line 2.", + "bbox": [ + 109, + 200, + 880, + 267 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Consider the following definitions:", + "bbox": [ + 138, + 268, + 392, + 284 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "1. $\\widetilde{F} \\triangleq \\mathrm{Normalize}(1^n, F)$ (Line 3),", + "bbox": [ + 135, + 297, + 395, + 316 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5. $b' \\triangleq b \\oplus c_i$ (Line 5), where $\\rho$ restricts $x_i$ to $c_i$ ,", + "bbox": [ + 526, + 297, + 880, + 316 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2. $\\rho \\triangleq$ Find-Restriction $(1^n,\\widetilde{F})$ (Line 4),", + "bbox": [ + 135, + 327, + 434, + 347 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6. $y_{n^{\\prime}}^{b^{\\prime}}\\triangleq R(1^{n^{\\prime}},F^{\\prime},b^{\\prime})$ (Line 6),", + "bbox": [ + 526, + 325, + 761, + 347 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "3. $F^{\\prime}\\triangleq$ Apply-Restriction $(1^{n},\\widetilde{F},\\rho)$ (Line 5),", + "bbox": [ + 135, + 356, + 470, + 376 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "7. $y_{n}^{b}\\triangleq y_{n^{\\prime}}^{b^{\\prime}}\\cup y_{i}\\mapsto c_{i}$ (Line 6),", + "bbox": [ + 526, + 354, + 759, + 376 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4. $n^{\\prime}\\triangleq n - 1$ (Line 5),", + "bbox": [ + 133, + 386, + 308, + 404 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "8. $s \\triangleq \\operatorname{Size}(F)$ , $\\widetilde{s} \\triangleq \\operatorname{Size}(\\widetilde{F})$ , and $s' \\triangleq \\operatorname{Size}(F')$ .", + "bbox": [ + 526, + 385, + 880, + 405 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We rely on the provability in $\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})$ of the following statements about the subroutines of $R(1^{n},F,b)$ (see [CKK+24]):", + "bbox": [ + 111, + 417, + 880, + 453 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(i) $\\widetilde{s}\\leq s$", + "bbox": [ + 130, + 465, + 209, + 483 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(iii) $\\forall x\\in \\{0,1\\} ^n\\widetilde{F} (x) = F(x)$", + "bbox": [ + 513, + 465, + 759, + 484 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(ii) $s' \\leq \\widetilde{s} \\cdot (1 - 1/n)^{3/2}$ ,", + "bbox": [ + 127, + 494, + 321, + 513 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(iv) $\\forall z\\in \\{0,1\\}^{\\rho^{-1}(\\star)}F'(z) = \\widetilde{F}\\big(z\\cup x_i\\mapsto c_i\\big).$", + "bbox": [ + 514, + 493, + 867, + 513 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "By Items (i) and (ii) together with the bound $s < n^{3/2}$ ,", + "bbox": [ + 112, + 526, + 506, + 544 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {S} _ {2} ^ {1} \\left(\\mathcal {L} _ {\\mathsf {P V}}\\right) \\vdash s ^ {\\prime} \\leq \\widetilde {s} \\cdot (1 - 1 / n) ^ {3 / 2} \\leq s \\cdot (1 - 1 / n) ^ {3 / 2} < n ^ {3 / 2} \\cdot (1 - 1 / n) ^ {3 / 2} = (n - 1) ^ {3 / 2}.\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 553, + 821, + 574 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Thus $F'$ is a valid formula on $n'$ -bit inputs of size $< n'^{3/2}$ . By the first condition in the induction hypothesis (Equation (1)) and the definition of each $y_{n}^{b}$ , we have $|y_{n}^{0}|_{\\ell} = |y_{n}^{1}|_{\\ell} = n$ . Using the definitions listed above, the last two conditions in the induction hypothesis (Equation (1)), and Items (iii) and (iv), we derive in $S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})$ the following statements for each $b \\in \\{0, 1\\}$ :", + "bbox": [ + 111, + 584, + 880, + 654 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nF ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) \\neq \\oplus^ {b ^ {\\prime}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 408, + 662, + 583, + 685 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nF (y _ {n} ^ {b}) = F ^ {\\prime} (y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}),\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 686, + 576, + 705 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nF (y _ {n} ^ {b}) \\neq \\oplus^ {b ^ {\\prime}} (y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}).\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 705, + 581, + 726 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Therefore, using basic facts about the function symbols $\\oplus^0$ and $\\oplus^1$ ,", + "bbox": [ + 112, + 736, + 599, + 753 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\oplus^ {b ^ {\\prime}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) = \\oplus^ {b \\oplus c _ {i}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) = c _ {i} \\oplus \\left(\\oplus^ {b} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right)\\right) = c _ {i} \\oplus \\left(\\oplus^ {b} \\left(y _ {n} ^ {b}\\right) \\oplus c _ {i}\\right) = \\oplus^ {b} \\left(y _ {n} ^ {b}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 763, + 766, + 785 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "These statements imply that, for each $b \\in \\{0,1\\}$ , $F(y_{n}^{b}) \\neq \\oplus^{b}(y_{n}^{b})$ . In other words, the conclusion of $\\varphi(N)$ holds. This completes the proof of the induction step.", + "bbox": [ + 111, + 795, + 880, + 829 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As explained above, the provability of $\\operatorname{Ref}_{R,s}$ in $\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})$ implies its provability in $\\mathsf{PV}_1$ . Since $\\mathsf{PV}_1 \\vdash \\operatorname{Ref}_{R,s} \\to \\mathsf{FLB}_s^\\oplus$ , this completes the proof of Theorem 4.1.", + "bbox": [ + 111, + 840, + 880, + 875 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 898, + 506, + 909 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We have seen that a non-trivial formula size lower bound can be established in $\\mathsf{PV}_1$ . More advanced circuit lower bounds are known to be provable assuming additional axioms extending $\\mathsf{PV}_1$ (e.g., [Kra95, Section 15.2] and [MP20]), but their provability in $\\mathsf{PV}_1$ (or equivalently, in $\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})$ ) is less clear.", + "bbox": [ + 109, + 90, + 885, + 146 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Open Problem 4.3. For each $d \\geq 1$ and $\\ell \\geq 1$ , can $\\mathsf{PV}_1$ prove that the parity function on $n$ bits cannot be computed by depth- $d$ circuits of size $n^\\ell$ ?", + "bbox": [ + 109, + 154, + 885, + 189 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Open Problem 4.4. For each $\\ell \\geq 1$ , is there a constant $k = k(\\ell)$ such that $\\mathsf{PV}_1$ proves that every monotone circuit for the $k$ -clique problem on $n$ -vertex graphs must be of size at least $n^\\ell$ ?", + "bbox": [ + 109, + 199, + 885, + 233 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 Unprovability of Complexity Bounds", + "text_level": 1, + "bbox": [ + 109, + 258, + 524, + 280 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The investigation of the unprovability of complexity bounds within theories of bounded arithmetic has a long and rich history. Much of the early work took place in the nineties, with significant results obtained by Razborov [Raz95a, Raz95b], Krajicek [Kra97], and other researchers. Since then, and in particular over the last decade, there has been renewed interest and progress in establishing unprovability results (see, e.g., [CK07, PS21, CKKO21, LO23, ABM23] and references therein).", + "bbox": [ + 109, + 292, + 883, + 377 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In Section 5.1, we consider the unprovability of complexity upper bounds. The unprovability of an inclusion such as $\\mathsf{NP} \\subseteq \\mathsf{SIZE}[n^k]$ is equivalent to the consistency of NP $\\not\\subseteq \\mathsf{SIZE}[n^k]$ with the corresponding theory. Such a consistency result establishes that, while we cannot confirm the separation is true in the standard model of natural numbers, we know it holds in a non-standard model of a theory so strong that complexity theory appears almost indistinguishable from the standard one. We stress that establishing the consistency of a lower bound is a necessary step towards showing that the lower bound is true. For this reason, the unprovability of upper bounds can be formally seen as progress towards showing unconditional complexity lower bounds.", + "bbox": [ + 109, + 378, + 883, + 513 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In Section 5.2, we turn our attention to the unprovability of complexity lower bounds. This direction is partly driven by the desire to formally understand why proving complexity lower bounds is challenging, and to explore the possibility of a more fundamental underlying reason for this difficulty. Moreover, it might provide examples of hard sentences for logical theories and of hard propositional tautologies for proof systems. The investigation of the meta-mathematics of lower bounds has also found unexpected applications in algorithms and complexity (e.g., [CIKK16]).", + "bbox": [ + 109, + 515, + 883, + 617 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Finally, in Section 5.3 we connect the two directions and explain how the unprovability of circuit lower bounds in $\\mathsf{PV}_1$ yields the unprovability of $\\mathsf{P} = \\mathsf{NP}$ in $\\mathsf{PV}_1$ . The latter can be seen as a weakening of the $\\mathsf{P}$ versus NP problem that considers the existence of feasible proofs that $\\mathsf{P} = \\mathsf{NP}$ . This further motivates the investigation of the unprovability of lower bounds.", + "bbox": [ + 109, + 618, + 883, + 686 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.1 Unprovability of Upper Bounds", + "text_level": 1, + "bbox": [ + 109, + 705, + 428, + 724 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.1.1 LEARN-Uniform Circuits and Unprovability", + "text_level": 1, + "bbox": [ + 109, + 733, + 516, + 750 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Cook and Krajicek [CK07] considered the provability of NP $\\subseteq$ SIZE[poly] in bounded arithmetic and obtained a number of conditional negative results. [KO17], building on techniques from [CK07], showed that for no integer $k\\geq 1$ the theory $\\mathsf{PV}_1$ proves that $\\mathsf{P}\\subseteq \\mathsf{SIZE}[n^k ]$ . Note that this is an unconditional result. Thus, for a natural theory capable of formalizing advanced results from complexity theory, such as the PCP Theorem, we can unconditionally rule out the provability of $\\mathsf{P}\\subseteq \\mathsf{SIZE}[n^{k}]$ . A slightly stronger model-theoretic formulation of the result of [KO17] appears in [BM20].", + "bbox": [ + 109, + 758, + 883, + 863 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 898, + 509, + 910 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "[BKO20] obtained results for stronger theories and ruled out the provability of infinitely often inclusions. In more detail, for an $\\mathcal{L}_{\\mathrm{PV}}$ -function symbol $h$ , consider the sentence", + "bbox": [ + 111, + 90, + 880, + 125 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\left. \\cup B _ {k} ^ {i. o.} [ h ] \\triangleq \\forall 1 ^ {m} \\exists 1 ^ {n} \\exists C _ {n} \\forall x \\left(n \\geq m \\wedge | C _ {n} | \\leq n ^ {k} \\wedge \\left(| x | \\leq n \\rightarrow \\psi (n, C _ {n}, x, h)\\right)\\right), \\right. ^ {1 0}\n$$\n", + "text_format": "latex", + "bbox": [ + 184, + 137, + 807, + 157 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\psi$ is a quantifier-free $\\mathcal{L}_{\\mathsf{PV}}$ -formula stating that $h(x) \\neq 0$ if and only if the evaluation of the circuit $C_n$ on $x$ (viewed as an $n$ -bit string) is 1. In other words, $\\mathsf{UB}_k^{i.o.}[h]$ states that the language defined by $h$ (which is in $\\mathsf{P}$ ) admits circuits of size at most $n^k$ on infinitely many input lengths $n$ . [BKO20] showed that for each $k \\geq 1$ , there is an $\\mathcal{L}_{\\mathsf{PV}}$ -function symbol $h$ such that $\\mathsf{PV}_1$ does not prove $\\mathsf{UB}_k^{i.o.}[h]$ . Similarly, they established that $\\mathsf{S}_2^1 \\not\\vdash \\mathsf{NP} \\subseteq \\text{i.o.SIZE}[n^k]$ and $\\mathsf{T}_2^1 \\not\\vdash \\mathsf{P}^{\\mathsf{NP}} \\subseteq \\text{i.o.SIZE}[n^k]$ .", + "bbox": [ + 109, + 170, + 883, + 255 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Building on these results, [CKKO21] introduced a modular framework to establish the unprovability of circuit upper bounds in bounded arithmetic using a learning-theoretic perspective. Next, we describe how their approach can be used to show a slightly weaker form of the result from [BKO20] described above. For an $\\mathcal{L}_{\\mathrm{PV}}$ -function symbol $h$ , we consider a sentence $\\mathsf{UB}_{c,k}[h]$ stating that $L_{h} \\in \\mathsf{SIZE}[c \\cdot n^{k}]$ , where $x \\in L_{h}$ if and only if $h(x) \\neq 0$ , i.e.,", + "bbox": [ + 109, + 256, + 883, + 340 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\bigcup \\mathrm {B} _ {c, k} [ h ] \\triangleq \\forall 1 ^ {n} \\exists C _ {n} \\forall x \\left(\\left| C _ {n} \\right| \\leq c \\cdot n ^ {k} \\wedge \\left(\\left| x \\right| \\leq n \\rightarrow (\\operatorname {E v a l} \\left(C _ {n}, x, n\\right) = 1 \\leftrightarrow h (x) \\neq 0)\\right)\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 166, + 353, + 883, + 373 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\operatorname{Eval}(C_n, x, n)$ is an $\\mathcal{L}_{\\mathrm{PV}}$ -function that evaluates the circuit $C_n$ on the $n$ -bit string described by $x$ . Our goal is to show that for every $k \\geq 1$ there is a function symbol $h$ such that, for no choice of $c \\geq 1$ , $\\mathrm{PV}_1$ proves $\\mathrm{UB}_{c,k}[h]$ . (Note that in all results discussed in this section, we consider Log formalizations, as explained in Section 4.1.)", + "bbox": [ + 109, + 386, + 883, + 454 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Overview of the Approach. Note that $\\mathsf{UB}_{c,k}[h]$ claims the existence of circuits for $L_{h}$ , i.e., it states a non-uniform upper bound. We explore the constructive aspect of $\\mathsf{PV}_1$ proofs, by extracting computational information from a $\\mathsf{PV}_1$ -proof that such circuits exist. The argument has a logical component, where we extract from a proof of $\\mathsf{UB}_{c,k}[h]$ a \"LEARN-uniform\" construction of a sequence $\\{C_n\\}_n$ of circuits for $L_{h}$ and a complexity-theoretic component, where we unconditionally establish that for each $k$ LEARN-uniform circuits of this form do not exist for some $h$ . Altogether, we get that for some $h$ theory $\\mathsf{PV}_1$ does not prove $\\mathsf{UB}_{c,k}[h]$ (no matter the choice of $c$ ).", + "bbox": [ + 109, + 470, + 883, + 592 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "LEARN-uniform circuits. We will be interested in languages that can be efficiently learned with a bounded number of equivalence queries, in the following sense. For functions $s, q \\colon \\mathbb{N} \\to \\mathbb{N}$ , we say that a language $L \\subseteq \\{0,1\\}^*$ is in LEARN-uniform $^{\\mathsf{EQ}[q]}$ SIZE[s] if there is a polynomial-time algorithm $A^{\\mathsf{EQ}(L_n)}(1^n)$ that outputs a circuit of size at most $s(n)$ for $L_n$ after making at most $q(n)$ equivalence queries to $L_n$ , where $L_n = L \\cap \\{0,1\\}^n$ . The equivalence query oracle, given the description of an $n$ -bit circuit $D$ of size a most $s(n)$ , replies \"yes\" if $D$ computes $L_n$ , or provides some counter-example $w$ such that $D(w) \\neq L_n(w)$ .", + "bbox": [ + 111, + 608, + 883, + 712 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Extracting LEARN-uniform circuits from $\\mathsf{PV}_1$ proofs. For convenience, write $\\mathsf{UB}_{c,k}[h] = \\forall 1^n \\exists C_n \\forall x \\phi(1^n, C_n, x)$ in Equation (2), where $\\phi(1^n, C_n, x)$ is a quantifier-free formula. Since $\\mathsf{PV}_1$ is a universal theory, under the assumption that $\\mathsf{PV}_1 \\vdash \\mathsf{UB}_{c,k}[h]$ , we can apply Theorem 3.2 (KPT Witnessing Theorem) to obtain the provability in $\\mathsf{PV}_1$ of the disjunction", + "bbox": [ + 109, + 728, + 883, + 797 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\forall 1 ^ {n} \\forall x _ {1} \\dots \\forall x _ {k} (\\phi (1 ^ {n}, t _ {1} (1 ^ {n}), x _ {1}) \\vee \\phi (1 ^ {n}, t _ {2} (1 ^ {n}, x _ {1}), x _ {2}) \\vee \\dots \\vee \\phi (1 ^ {n}, t _ {k} (1 ^ {n}, x _ {1}, \\dots , x _ {k - 1}), x _ {k})) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 119, + 808, + 883, + 837 + ], + "page_idx": 14 + }, + { + "type": "page_footnote", + "text": "10Recall that $1^n$ is simply a convenient notation to refer to a variable $n$ that is set to $|N|$ for some variable $N$ .", + "bbox": [ + 127, + 845, + 772, + 861 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 898, + 506, + 909 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $t_1, \\ldots, t_k$ are $\\mathcal{L}_{\\mathsf{PV}}$ -terms and $k = O(1)$ . Most importantly, due to the soundness of $\\mathsf{PV}_1$ , this statement is true over the standard model $\\mathbb{N}$ . Additionally, the terms in $\\mathsf{PV}_1$ correspond to polynomial-time algorithms. Next, we will discuss how to interpret Equation (3) over $\\mathbb{N}$ as an interactive protocol and how this perspective leads to a LEARN-uniform construction.", + "bbox": [ + 109, + 90, + 883, + 157 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The KPT Witnessing Theorem can be intuitively understood as follows [KPS90]. Consider a search problem $Q(1^n)$ , where given the input $1^n$ , we need to find $D$ such that $\\forall x \\phi(1^n, D, x)$ . The problem $Q(1^n)$ can be solved using a $k$ -round Student-Teacher protocol. In the first round, the student proposes $D_1 = t_1(1^n)$ as a solution to the search problem $Q(1^n)$ . This solution is either correct, or there exists a counterexample $w_1$ such that $\\neg \\phi(1^n, t_1(1^n), w_1)$ . The teacher then provides this counterexample value $w_1$ , and the protocol moves to the next round. In each subsequent round $1 \\leq i < k$ , the student computes $D_i = t_i(1^n, w_1, \\ldots, w_{i-1})$ based on the counterexamples $w_1, \\ldots, w_{i-1}$ received in the previous rounds. This $D_i$ is either a correct solution for $Q(1^n)$ , in which case the problem is solved, or there is another counterexample $w_i$ provided by the teacher such that $\\neg \\phi(1^n, t_i(1^n, w_1, \\ldots, w_{i-1}), w_i)$ . If the latter is the case, the protocol continues to the next round $i + 1$ . The theorem guarantees that for every input $1^n$ , the student will successfully solve the search problem $Q(1^n)$ within some round $1 \\leq i \\leq k$ .", + "bbox": [ + 109, + 160, + 883, + 347 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "From a $\\mathrm{PV}_1$ proof of a circuit upper bound for a language $L_h$ , we can derive a Student-Teacher protocol for the search problem $Q(1^n)$ corresponding to Equation (3). In this protocol, the student proposes a candidate circuit $D$ , and the teacher provides a counterexample $w$ to $D$ (an input $w$ such that $D(w) \\neq L_h(w)$ ) if one exists. (Note that $\\phi(1^n, D, x)$ might not be true for other reasons, e.g., if $|D| > c \\cdot n^k$ , but in such cases there is no need to invoke the equivalence query oracle and we can proceed in the Student-Teacher protocol with, say, $w = 0^n$ .) The student is guaranteed to succeed after at most $k$ queries, regardless of the counterexamples provided by the teacher. Finally, for every input $n$ , the student computes according to a constant number of fixed $\\mathrm{PV}_1$ terms $t_1, \\ldots, t_k$ . Since a $\\mathrm{PV}_1$ term is merely a composition of a finite number of $\\mathrm{PV}_1$ function symbols (polynomial-time algorithms), the student's computation runs in polynomial time. Therefore, from the provability in $\\mathrm{PV}_1$ of a non-uniform circuit upper bound for a language $L \\in \\mathsf{P}$ , we can extract a LEARN-uniform family of circuits for $L$ .", + "bbox": [ + 109, + 348, + 883, + 537 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Unconditional lower bound against LEARN-uniform circuits. The argument described above reduces the unprovability of upper bounds to a complexity-theoretic question with no reference to logic. To complete the proof, it is enough to show that for each $k$ there is a language $L \\in \\mathbb{P}$ such that $L \\notin \\mathrm{LEARN-uniform}^{\\mathrm{EQ}[O(1)]} \\mathrm{SIZE}[O(n^{k})]$ . This unconditional lower bound against LEARN-uniform circuits is established in [CKKO21] by generalizing a lower bound from [SW14] against P-uniform circuits, which can be interpreted as LEARN-uniform constructions with $q = 0$ queries. Roughly speaking, [CKKO21] shows that one can eliminate each equivalence query using a small amount of non-uniform advice, and that the base case where no queries are present (as in [SW14]) can be extended to a lower bound against a bounded amount of advice.", + "bbox": [ + 109, + 553, + 883, + 707 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "This completes the sketch of the argument. The approach is fairly general and can be adapted to other theories. The strength of the theory affects the learning model against which one needs to obtain lower bounds (e.g., by increasing the number of queries or allowing randomized learners).", + "bbox": [ + 109, + 724, + 883, + 776 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Open Problem 5.1. Show that $\\mathsf{S}_2^1$ does not prove that $\\mathsf{P} \\subseteq \\mathsf{SIZE}[n^k]$ .", + "bbox": [ + 109, + 782, + 609, + 800 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In order to solve Open Problem 5.1, using the connection from [CKKO21] it is sufficient to show that $\\mathsf{P} \\not\\subset \\mathsf{LEARN}\\text{-uniform}^{\\mathsf{EQ}[q]} \\mathsf{SIZE}[O(n^{k})]$ for $q = \\mathrm{poly}(n)$ . In other words, this amounts to understanding the class of languages that admit circuits that can be produced with a polynomial number of equivalence queries.", + "bbox": [ + 109, + 806, + 883, + 875 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Open Problem 5.2. Show that $\\mathsf{T}_2^1$ does not prove that $\\mathsf{NP} \\subseteq \\mathsf{SIZE}[n^k]$ .", + "bbox": [ + 111, + 90, + 625, + 109 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5.1.2 $\\mathsf{P} = \\mathsf{NP}$ and Propositional Proof Complexity", + "text_level": 1, + "bbox": [ + 111, + 128, + 511, + 147 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Suppose that $\\mathsf{P}$ is actually equal to NP. In this scenario, there exists a polynomial-time algorithm $g$ (i.e., a $\\mathrm{PV}_1$ function symbol) that can find a satisfying assignment for any given satisfiable formula. In other words, if $\\operatorname{Formula}(F, 1^n)$ denotes an $\\mathcal{L}_{\\mathsf{PV}}$ -formula that checks if $F$ is a valid description of a formula over $n$ input bits, and $\\operatorname{Sat}(F, x)$ is an $\\mathcal{L}_{\\mathsf{PV}}$ -formula that checks if $x$ satisfies the formula encoded by $F$ , the sentence", + "bbox": [ + 111, + 155, + 885, + 224 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\varphi_ {\\mathrm {P} = \\mathrm {N P}} [ g ] \\triangleq \\forall 1 ^ {n} \\forall F \\forall x \\left(\\left(\\operatorname {F o r m u l a} (F, 1 ^ {n}) \\wedge \\operatorname {S a t} (F, x)\\right)\\rightarrow \\operatorname {S a t} (F, g (F))\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 234, + 883, + 255 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "is true in the standard model $\\mathbb{N}$ .", + "bbox": [ + 111, + 265, + 343, + 280 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Open Problem 5.3. Show that for no polynomial-time function symbol $g$ theory $\\mathrm{PV}_1$ proves the sentence $\\varphi_{\\mathrm{P} = \\mathrm{NP}}[g]$ .", + "bbox": [ + 111, + 292, + 885, + 328 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Equivalently, Open Problem 5.3 states that $\\mathsf{PV}_1$ (and by standard conservation results $S_2^1$ ) is consistent with $\\mathsf{P} \\neq \\mathsf{NP}$ . This means that either $\\mathsf{P} \\neq \\mathsf{NP}$ , as is commonly assumed, making the conjecture trivially true, or $\\mathsf{P} = \\mathsf{NP}$ , but this cannot be proven using only polynomial-time concepts and reasoning. Therefore, Open Problem 5.3 represents a formal weakening of the conjecture that $\\mathsf{P} \\neq \\mathsf{NP}$ . The statement is known to follow from the purely combinatorial conjecture that the extended Frege propositional proof system $e\\mathcal{F}$ (see Section 3.2) is not polynomially bounded, which is a major open problem in proof complexity.", + "bbox": [ + 111, + 335, + 883, + 441 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Theorem 5.4 ([Coo75]). Suppose that there is a sequence $\\{F_n\\}_{n\\geq 1}$ of propositional tautologies of size polynomial in $n$ that require eF proofs of size $n^{\\omega (1)}$ . Then there is no function symbol $g$ such that $\\mathsf{PV}_1$ proves $\\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]$ .", + "bbox": [ + 109, + 449, + 883, + 502 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proof. Here we only provide a sketch of the proof. More details and extensions of the result can be found in the textbooks [Kra95, Kra19]. We establish that if $\\mathsf{PV}_1 \\vdash \\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]$ for some $g$ , then every tautology has a polynomial size $e\\mathcal{F}$ proof.", + "bbox": [ + 111, + 510, + 883, + 561 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Recall the definitions and results from Section 3.2. For a propositional proof system $P$ (described by an $\\mathcal{L}_{\\mathrm{PV}}$ function symbol), we consider an $\\mathcal{L}_{\\mathrm{PV}}$ -sentence stating the soundness of $P$ :", + "bbox": [ + 112, + 561, + 883, + 595 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {S o u n d} _ {P} \\triangleq \\forall 1 ^ {n} \\forall F \\forall \\pi (\\mathsf {F o r m u l a} (F, 1 ^ {n}) \\land \\mathsf {P r o o f} _ {P} (F, \\pi)) \\to \\forall x (| x | \\leq n \\to \\mathsf {S a t} (F, x)),\n$$\n", + "text_format": "latex", + "bbox": [ + 176, + 607, + 818, + 627 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $\\operatorname{Proof}_P(F, \\pi)$ states that $\\pi$ is a valid $P$ -proof of $F$ .", + "bbox": [ + 111, + 637, + 529, + 654 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Note that if $F$ is not a tautology then $g(\\neg F)$ outputs a satisfying assignment of $\\neg F$ , while if $F$ is a tautology then $\\neg F$ admits no satisfying assignment. We consider a proof system $P_g$ defined as follows: Given a valid description of an $n$ -bit propositional formula $F$ and a candidate proof $\\widetilde{\\pi}$ , $P_g$ accepts $\\widetilde{\\pi}$ as a proof of $F$ if and only if", + "bbox": [ + 111, + 655, + 885, + 722 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\ng (\\neg F) = \\widetilde {\\pi} \\quad \\text {a n d} \\quad \\neg \\operatorname {S a t} (\\neg F, \\widetilde {\\pi}) ,\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 723, + 624, + 742 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "where $\\neg F$ represents the negation of $F$ . Observe that for any tautology $F$ , $\\pi_F \\triangleq g(\\neg F)$ is a valid $P_g$ -proof of $F$ .", + "bbox": [ + 111, + 747, + 885, + 780 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Note that $\\mathsf{PV}_1\\vdash \\mathsf{Sound}_{P_g}$ , which follows from the provability of Equation (4) and the definition of $P_{g}$ using $g$ . Now consider the quantifier-free $\\mathcal{L}_{\\mathsf{PV}}$ -formula", + "bbox": [ + 111, + 782, + 883, + 816 + ], + "page_idx": 16 + }, + { + "type": "equation", + "text": "\n$$\n\\psi \\triangleq \\neg \\operatorname {F o r m u l a} (F, 1 ^ {n}) \\vee \\neg \\operatorname {P r o o f} _ {P _ {g}} (F, \\pi) \\vee | x | > n \\vee \\operatorname {S a t} (F, x).\n$$\n", + "text_format": "latex", + "bbox": [ + 261, + 827, + 733, + 847 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The provability of $\\forall 1^n\\forall F\\forall \\pi \\psi$ in $\\mathsf{PV}_1$ follows from the provability of $\\mathsf{Sound}_{P_g}$", + "bbox": [ + 111, + 857, + 697, + 876 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 898, + 509, + 910 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Using Cook's translation (Section 3.2), the sequence of propositional formulas $||\\psi||_m$ admits $e\\mathcal{F}$ -proofs of polynomial size. Moreover, given an actual $n$ -bit propositional formula $F$ of polynomial size and the corresponding $P_g$ -proof $\\pi_F$ (represented by fixed strings $\\langle F\\rangle$ and $\\langle \\pi_F\\rangle$ ), one can show that there are polynomial size $e\\mathcal{F}$ proofs of both $||\\mathrm{Formula}(\\langle F\\rangle,1^n)||_{\\mathrm{poly}(n)}$ and $||\\mathrm{Proof}_{P_g}(\\langle F\\rangle,\\langle \\pi_F\\rangle)||_{\\mathrm{poly}(n)}$ . (Intuitively, this follows by an evaluation of the expressions on these fixed inputs.) Since $e\\mathcal{F}$ is closed under substitution, we can derive in $e\\mathcal{F}$ with a polynomial size proof the formula $||\\mathrm{Sat}(\\langle F\\rangle,x)||_{\\mathrm{poly}(n)}$ .", + "bbox": [ + 109, + 90, + 885, + 195 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Finally, for every propositional formula $F(x)$ on $n$ -bit inputs, it is possible to efficiently prove in $e\\mathcal{F}$ the propositional formula $||\\mathrm{Sat}(\\langle F\\rangle ,x)||_{\\mathrm{poly}(n)}\\to F(x)$ . (This can be established by a slightly more general structural induction on formulas $F$ using information about $||\\cdot||$ and $\\langle \\cdot \\rangle$ .) Overall, since $e\\mathcal{F}$ is closed under implication, it follows from these derivations that there is a polynomial size $e\\mathcal{F}$ proof of $F$ . This completes the sketch of the proof of the result.", + "bbox": [ + 109, + 196, + 885, + 280 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Open Problem 5.3 would also follow from a proof that Buss's hierarchy of theories $\\mathsf{T}_2^i$ does not collapse [KPT91], another central problem in bounded arithmetic. More precisely, it is enough to obtain the following separation.", + "bbox": [ + 109, + 290, + 885, + 340 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Open Problem 5.5. Show that for some $i > j \\geq 1$ we have $\\mathsf{T}_2^i \\neq \\mathsf{T}_2^j$ .", + "bbox": [ + 109, + 352, + 617, + 372 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "It is known that $\\mathrm{PV}_1$ proves that $\\mathsf{P} = \\mathsf{NP}$ if and only if it proves that $\\mathsf{NP} = \\mathsf{coNP}$ . Consequently, a super-polynomial lower bound on the length of $e\\mathcal{F}$ proofs also yields the consistency of $\\mathsf{NP} \\neq \\mathsf{coNP}$ with $\\mathrm{PV}_1$ .", + "bbox": [ + 109, + 381, + 883, + 431 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Finally, we remark that the use of witnessing theorems alone (as done in Section 5.1.1) is probably not sufficient to settle Open Problem 5.3. This is because these theorems typically also hold when we extend the theory with all true universal statements. Thus an unprovability argument that only employs the witnessing theorem would establish unconditionally that each sentence $\\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]$ is false and therefore $\\mathsf{P}\\neq \\mathsf{NP}$ . Some researchers interpret this as evidence that the investigation of propositional proof complexity might be unavoidable. Another approach to Open Problem 5.3 is discussed in Section 5.3.", + "bbox": [ + 109, + 433, + 883, + 537 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5.2 Unprovability of Lower Bounds", + "text_level": 1, + "bbox": [ + 109, + 556, + 428, + 573 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "5.2.1 Average-Case Circuit Lower Bounds", + "text_level": 1, + "bbox": [ + 109, + 583, + 450, + 599 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this section, we discuss the unprovability of strong average-case lower bounds in $\\mathrm{PV}_1$ . We focus on an unprovability result from [PS21], stated and proved in a slightly stronger form in [LO23]. The proof is based on a technique introduced by [Kra11] and further explored in [Pic15a].", + "bbox": [ + 109, + 609, + 883, + 660 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We consider an average-case separation of co-nondeterministic circuits against non-deterministic circuits of subexponential size. In more detail, we investigate the provability of a sentence $\\mathsf{LB}^1 (s_1,s_2,m,n_0)$ stating that, for every input length $n\\geq n_0$ , there is a co-nondeterministic circuit $C$ of size $\\leq s_{1}(n)$ such that, for every nondeterministic circuit $D$ of size $\\leq s_2(n)$ , we have", + "bbox": [ + 109, + 661, + 883, + 729 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname * {P r} _ {x \\sim \\{0, 1 \\} ^ {n}} \\Big [ C (x) = D (x) \\Big ] \\leq 1 - \\frac {m (n)}{2 ^ {n}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 346, + 741, + 648, + 777 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Let $\\mathrm{coNSIZE}[s(n)]$ and $\\mathrm{NSIZE}[s(n)]$ refer to co-nondeterministic circuits and nondeterministic circuits of size $s(n)$ , respectively. More formally, $\\mathrm{LB}^1(s_1, s_2, m, n_0)$ is an $\\mathcal{L}_{\\mathrm{PV}}$ -sentence capturing the following lower", + "bbox": [ + 109, + 789, + 885, + 824 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "11Due to space constraints, we do not elaborate on the formalization of NP = coNP.", + "bbox": [ + 127, + 833, + 630, + 849 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 898, + 508, + 909 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "bound statement:", + "bbox": [ + 112, + 92, + 240, + 104 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\forall n \\in \\operatorname {L o g L o g} \\text {w i t h} n \\geq n _ {0} \\exists C \\in \\operatorname {c o N S I Z E} \\left[ s _ {1} (n) \\right] \\forall D \\in \\operatorname {N S I Z E} \\left[ s _ {2} (n) \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 121, + 720, + 140 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\exists m = m (n) \\text {d i s t i n c t} x ^ {1}, \\dots , x ^ {m} \\text {s . t . E r r o r} (C, D, x ^ {i}) \\text {f o r a l l} i \\in [ m ],\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 143, + 787, + 162 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "where $\\operatorname{Error}(C, D, x)$ means that the circuits $C$ and $D$ disagree on the input $x$ . This statement can be seen as an average-case form of the coNP $\\nsubseteq \\mathsf{NP} / \\mathsf{poly}$ conjecture if we let $s_1(n) = n^{O(1)}$ , $s_2(n) = n^{\\omega(1)}$ , and $m(n) = 2^n / n$ . (Note that we consider in this section a LogLog formalization, according to the notation explained in Section 4.1.)", + "bbox": [ + 111, + 175, + 885, + 243 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Theorem 5.6 ([PS21, LO23]). Let $d \\geq 1$ , $\\delta > 0$ , and $n_0 \\geq 1$ be arbitrary parameters, and let $s_1(n) = n^d$ , $s_2(n) = 2^{n^\\delta}$ , and $m(n) = 2^n / n$ . Then $\\mathsf{PV}_1$ does not prove the sentence $\\mathsf{LB}^1(s_1, s_2, m, n_0)$ .", + "bbox": [ + 111, + 253, + 883, + 292 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In the remainder of this section, we provide some intuition about the proof of this result.", + "bbox": [ + 138, + 301, + 772, + 318 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Overview of the Argument. Suppose, towards a contradiction, that $\\mathsf{PV}_1\\vdash \\mathsf{LB}^1 (s_1,s_2,m,n_0)$ with parameters as above. The central idea of the argument is that establishing a strong complexity lower bound within bounded arithmetic leads to a corresponding complexity upper bound. These lower and upper bounds contradict each other. Consequently, this contradiction implies the unprovability of the lower bound statement. In a bit more detail, the argument proceeds as follows:", + "bbox": [ + 111, + 335, + 883, + 421 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(i) The provability of the average-case lower bound sentence $\\mathsf{LB}^1 (s_1,s_2,m,n_0)$ implies the provability of a worst-case lower bound for coNSIZE $[n^d]$ against NSIZE $[2^{n^\\delta}]$ . We formalize the latter by a sentence $\\mathsf{LB}_{\\mathrm{wst}}^1 (s_1,s_2,n_0)$ .", + "(ii) Given any proof of $\\mathsf{LB}_{\\mathsf{wst}}^{1}(s_{1}, s_{2}, n_{0})$ in $\\mathsf{PV}_1$ , we extract a complexity upper bound for an arbitrary co-nondeterministic circuit $E_{m}(x)$ over an input $x$ of length $m$ and of size at most $\\mathrm{poly}(m)$ . More precisely, we show that there is a deterministic circuit $B_{m}$ of size $\\leq 2^{m^{o(1)}}$ such that $\\operatorname{Pr}_{x \\sim \\{0,1\\}^m}[E_m(x) = B_m(x)] \\geq 1/2 + 2^{-m^{o(1)}}$ .", + "(iii) We invoke an existing hardness amplification result to conclude that, on any large enough input length $n$ , every co-nondeterministic circuit $C_n$ of size $\\leq n^d$ agrees with some nondeterministic circuit $D_n$ of size $\\leq 2^{n^\\delta}$ on more than a $1 - 1/n$ fraction of the inputs." + ], + "bbox": [ + 120, + 431, + 885, + 631 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Since $\\mathsf{PV}_1$ is a sound theory, i.e., every theorem of $\\mathsf{PV}_1$ is a true sentence, Item (iii) is in contradiction with the complexity lower bound stated in $\\mathsf{LB}^1(s_1, s_2, m, n_0)$ . Consequently, $\\mathsf{PV}_1$ does not prove this sentence.", + "bbox": [ + 111, + 641, + 883, + 676 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The most interesting step of the argument is the proof of Item (ii). The key point is that the proof of a lower bound in $\\mathrm{PV}_1$ must be somewhat constructive, in the sense that it not only shows that every small circuit $D$ fails to solve the problem but also produces a string $w$ witnessing this fact. Below we give a simple example of its usefulness, showing a setting where a constructive lower bound yields an upper bound. Note that the application of a witnessing theorem to a LogLog formalization provides algorithms running in time poly $(2^n)$ . The example provided next shows that this is still useful.", + "bbox": [ + 111, + 693, + 885, + 796 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Lemma 5.7 ([CLO24a]). Let $L \\in \\mathsf{NP}$ . Suppose that there is a uniform algorithm $R(1^n, D)$ such that, for every co-nondeterministic circuit $D$ on $n$ input variables and of size at most $n^{\\log n}$ , $R(1^n, D)$ runs in time $2^{O(n)}$ and outputs a string $w \\in \\{0, 1\\}^n$ such that $D(w) \\neq L(w)$ . Then, for every language $L' \\in \\mathsf{NP}$ and for every constant $\\varepsilon > 0$ , we have $L' \\in \\mathsf{DTIME}[2^{n^\\varepsilon}]$ .", + "bbox": [ + 109, + 806, + 885, + 876 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 898, + 509, + 909 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Proof. Suppose that $L \\in \\mathsf{NTIME}[n^d]$ for some $d \\in \\mathbb{N}$ . Let $M'$ be a nondeterministic machine that decides $L'$ and runs in time at most $n^{c'}$ , where $c' \\in \\mathbb{N}$ . Let $\\varepsilon > 0$ be an arbitrary constant. Let $\\gamma = \\gamma(d, \\varepsilon) > 0$ be a small enough constant to be defined later. Finally, let $R$ be the algorithm provided by the hypothesis of the lemma. We show that the following deterministic algorithm $B^{\\gamma}(x)$ decides $L'$ in time $O(2^{n^{\\varepsilon}})$ :", + "bbox": [ + 111, + 90, + 887, + 161 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Input: $x \\in \\{0,1\\}^n$ for some $n \\geq 1$ .", + "bbox": [ + 140, + 178, + 403, + 195 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Compute the description of a co-nondeterministic circuit $E'$ of size at most $n^{2c'}$ that decides the complement of $L'$ ;", + "// In other words, $E'(u) = 1 - L'(u)$ for every string $u \\in \\{0,1\\}^n$ .", + "2 Produce the description of a co-nondeterministic circuit $D_{x}(y)$ , where $y \\in \\{0,1\\}^{n^{\\gamma}}$ , such that $D_{x}(y)$ ignores its input $y$ and computes according to $E'(x)$ ;", + "// While the length of $y$ is smaller than the length of $u$ , $D_x$ and $E'$ share the same nondeterministic input string, and $E'$ sets $u$ to be the fixed string $x$ .", + "3 Compute $w = R(1^{n^{\\gamma}}, D_x) \\in \\{0, 1\\}^{n^{\\gamma}}$ ;", + "4 Determine the bit $b = L(w)$ by a brute force computation, then return $b$ ;" + ], + "bbox": [ + 125, + 196, + 834, + 367 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Algorithm 2: Algorithm $B^{\\gamma}(x)$ for deciding language $L'$ .", + "bbox": [ + 259, + 372, + 679, + 388 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "First, we argue that $B^{\\gamma}$ decides $L'$ . Since $D_x$ is a co-nondeterministic circuit over inputs of length $m \\triangleq n^{\\gamma}$ and has size at most $n^{2c'} = m^{2c'/\\gamma} \\leq m^{\\log m}$ (for a large enough $m$ ), $R(1^{n^{\\gamma}}, D_x)$ outputs a string $w \\in \\{0, 1\\}^{n^{\\gamma}}$ such that $L(w) = 1 - D_x(w)$ . Consequently,", + "bbox": [ + 111, + 398, + 885, + 450 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nb = L (w) = 1 - D _ {x} (w) = 1 - E ^ {\\prime} (x) = 1 - \\left(1 - L ^ {\\prime} (x)\\right) = L ^ {\\prime} (x),\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 462, + 743, + 479 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "i.e., the output bit of $B^{\\gamma}(x)$ is correct.", + "bbox": [ + 111, + 494, + 388, + 508 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Next, we argue that $B^{\\gamma}$ runs in time at most $O(2^{n^{\\varepsilon}})$ . Clearly, Steps 1-2 run in $\\mathrm{poly}(n)$ time. Moreover, Step 3 runs in time $2^{O(n^{\\gamma})}$ under the assumption on the running time of $R(1^{n^{\\gamma}}, D_x)$ . This is at most $2^{n^{\\varepsilon}}$ if we set $\\gamma \\leq \\varepsilon / 2$ . Finally, since $L \\in \\mathsf{NTIME}[n^d]$ , the brute force computation in Step 4 can be performed in deterministic time $2^{O(\\ell^d)}$ over an input of length $\\ell$ . Since $\\ell = n^{\\gamma} = |w|$ in our case, if $\\gamma \\leq \\varepsilon / 2d$ we get that Step 4 runs in time at most $2^{n^{\\varepsilon}}$ . Overall, if we set $\\gamma \\triangleq \\varepsilon / 2d$ , it follows that $B^{\\gamma}$ runs in time at most $O(2^{n^{\\varepsilon}})$ . This completes the proof that $L' \\in \\mathsf{DTIME}[2^{n^{\\varepsilon}}]$ .", + "bbox": [ + 111, + 511, + 887, + 614 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The proof of Item (ii) is significantly more sophisticated, since one does not get an algorithm $R$ as above from a $\\mathrm{PV}_1$ proof of the lower bound sentence $\\mathsf{LB}^1(s_1, s_2, m, n_0)$ . The argument combines a witnessing theorem for sentences with more than four quantifier alternations and an ingenious technique from [Kra11] that relies on ideas from the theory of computational pseudorandomness.", + "bbox": [ + 111, + 627, + 887, + 696 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Open Problem 5.8. Strengthen the unprovability result from Theorem 5.6 in the following directions:", + "bbox": [ + 111, + 705, + 844, + 723 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(a) show that it holds in the polynomial size regime, i.e., with $s_1(n) = n^a$ and for some $s_2(n) = n^b$ ;", + "(b) establish the unprovability of worst-case lower bounds against nondeterministic circuits;", + "(c) show the unprovability of average-case lower bounds against deterministic circuits;", + "(d) establish the same result with respect to a stronger theory." + ], + "bbox": [ + 125, + 734, + 846, + 835 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "We refer to [LO23, CLO24a, CLO24b] for some related results and partial progress.", + "bbox": [ + 111, + 848, + 717, + 864 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "5.2.2 Extended Frege Lower Bounds", + "text_level": 1, + "bbox": [ + 112, + 90, + 408, + 107 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "This section covers a result on the unprovability of super-polynomial size extended Frege $(e\\mathcal{F})$ lower bounds in $\\mathrm{PV}_1$ [KP89] (see also [CU93, Bus90]). We refer to Section 3.2 for the necessary background. We will also need the definitions and results from Section 3.3.", + "bbox": [ + 109, + 118, + 883, + 167 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We adapt the presentation from [Kra19]. Consider the theory $\\mathsf{PV}_1$ and its language $\\mathcal{L}_{\\mathsf{PV}}$ . We shall use the following $\\mathcal{L}_{\\mathsf{PV}}$ formulas:", + "bbox": [ + 109, + 169, + 883, + 203 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $\\operatorname{Sat}(x, y)$ : a quantifier-free formula formalizing that $y$ is a satisfying assignment of the Boolean formula $x$ ;", + "- $\\operatorname{Taut}(x) \\triangleq \\forall y \\leq x \\operatorname{Sat}(x, y)$ ;", + "Proof $P(x,z)$ : a quantifier-free formula formalizing that $z$ is a $P$ -proof of $x$" + ], + "bbox": [ + 140, + 213, + 879, + 301 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The following lemma is central to the unprovability result.", + "bbox": [ + 138, + 313, + 560, + 330 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Lemma 5.9. Let $M \\models \\mathsf{PV}_1$ , and assume that $\\phi \\in M$ is a propositional formula. The following statements are equivalent:", + "bbox": [ + 109, + 340, + 883, + 375 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "(i) There is no eF-proof of $\\phi$ in $M$ :", + "bbox": [ + 130, + 385, + 393, + 401 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nM \\models \\forall z \\neg \\operatorname {P r o o f} _ {e \\mathcal {F}} (\\phi , z).\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 414, + 616, + 431 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "(ii) There is an extension $M^{\\prime}\\supseteq M$ (also a model of $\\mathsf{PV}_1$ ) in which $\\phi$ is falsified:", + "bbox": [ + 125, + 448, + 709, + 465 + ], + "page_idx": 20 + }, + { + "type": "equation", + "text": "\n$$\nM ^ {\\prime} \\vDash \\exists y \\operatorname {S a t} (\\neg \\phi , y).\n$$\n", + "text_format": "latex", + "bbox": [ + 437, + 477, + 601, + 496 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The proof of Lemma 5.9 proceeds by compactness and uses that the correctness of the propositional translation from $\\mathsf{PV}_1$ to $e\\mathcal{F}$ (Section 3.2) is also provable in $\\mathsf{PV}_1$ .", + "bbox": [ + 109, + 513, + 883, + 547 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Lemma 5.10. Let $M$ be a nonstandard countable model of $\\mathsf{PV}_1$ . Then it has a cofinal extension $M' \\supseteq_{\\mathrm{cf}} M$ (also a model of $\\mathsf{PV}_1$ ) such that every tautology in $M'$ has an eF-proof in $M'$ .", + "bbox": [ + 109, + 556, + 883, + 592 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The proof of Lemma 5.10 iterates Lemma 5.9 while taking cuts to ensure that the limit extension $M' = \\bigcup_{i} M_i$ (where $M_0 = M$ ) is cofinal in $M$ . Since each $M_i \\models \\mathsf{PV}_1$ and $\\mathsf{PV}_1$ is universal, we also have $M' \\models \\mathsf{PV}_1$ .", + "bbox": [ + 109, + 601, + 883, + 651 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We will need the following analogue of Lemma 3.6 for $\\mathsf{PV}_1$ .", + "bbox": [ + 140, + 652, + 576, + 669 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Fact 5.11. Let $M_0$ be a nonstandard countable model of $\\mathsf{PV}_1$ . Then there is a (countable) cut $M$ of $M_0$ that is a (nonstandard) model of $\\mathsf{PV}_1$ and a length $n \\in M$ , where $n = |a|$ for some nonstandard $a \\in M$ , such that for every $b \\in M$ we have $M \\models |b| \\leq n^k$ for some standard number $k$ .", + "bbox": [ + 109, + 680, + 883, + 731 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The next result is a consequence of the existence of nonstandard countable models, Fact 5.11, and Lemma 5.10.", + "bbox": [ + 109, + 742, + 883, + 773 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Lemma 5.12. There is a model $M^{*}$ of $\\mathsf{PV}_1$ such that the following properties hold:", + "bbox": [ + 109, + 785, + 712, + 801 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(i) Any tautology in $M^{*}$ has an eF-proof in $M^{*}$", + "(ii) There is a nonstandard element $a \\in M^*$ of length $n \\triangleq |a|$ such that for any element $b \\in M^*$ there is a standard number $k$ such that $M^* \\models |b| \\leq n^k$ ." + ], + "bbox": [ + 125, + 813, + 880, + 875 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 898, + 506, + 909 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Theorem 5.13 (Unprovability of super-polynomial size $e\\mathcal{F}$ lower bounds in $\\mathrm{PV}_1$ [KP89]). Consider the sentence", + "bbox": [ + 111, + 90, + 887, + 125 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\Psi_ {e \\mathcal {F}} \\triangleq \\forall x \\exists \\phi \\geq x [ \\operatorname {T a u t} (\\phi) \\wedge \\forall \\pi (| \\pi | \\leq | \\phi | \\# | \\phi | \\rightarrow \\neg \\operatorname {P r o o f} _ {e \\mathcal {F}} (\\phi , \\pi)) ]. ^ {1 2}\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 125, + 771, + 143 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "The sentence $\\Psi_{e\\mathcal{F}}$ is not provable in $\\mathsf{PV}_1$ .", + "bbox": [ + 112, + 151, + 416, + 167 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Proof. Suppose $\\mathsf{PV}_1 \\vdash \\Psi_{e\\mathcal{F}}$ . Let $M^*$ , $a$ , and $n \\triangleq |a|$ be as in Lemma 5.12. Since $\\Psi_{e\\mathcal{F}}$ holds in $M^*$ , there is a tautology $\\phi \\in M^*$ with $\\phi \\geq a$ and consequently $|\\phi| \\geq n$ such that $\\phi$ does not have an $e\\mathcal{F}$ -proof of size $|\\phi|\\# |\\phi|$ in $M^*$ . On the other hand, by the two properties of $M^*$ given by Lemma 5.12, the formula $\\phi$ has an $e\\mathcal{F}$ -proof of size at most $n^k$ for some standard number $k$ . Finally, since the element $a$ is nonstandard, we have $n^k \\leq n\\# n \\leq |\\phi|\\# |\\phi|$ in $M^\\star$ . This contradiction implies that $\\mathsf{PV}_1$ does not prove $\\Psi_{e\\mathcal{F}}$ .", + "bbox": [ + 111, + 178, + 885, + 266 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Open Problem 5.14. Show that $\\mathsf{PV}_1$ cannot prove fixed-polynomial size lower bounds on the length of $e\\mathcal{F}$ proofs.", + "bbox": [ + 109, + 275, + 885, + 310 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Open Problem 5.15. Establish the unprovability of the sentence $\\Psi_{e\\mathcal{F}}$ in theory $\\mathsf{S}_2^1$ .", + "bbox": [ + 111, + 320, + 712, + 339 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "5.3 Connection Between Upper Bounds and Lower Bounds", + "text_level": 1, + "bbox": [ + 111, + 357, + 622, + 376 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In this section, we explain a result from [BKO20] showing that the unprovability of $\\mathsf{P} = \\mathsf{NP}$ (Open Problem 5.3) is related to the unprovability of circuit lower bounds. For a $\\mathsf{PV}_1$ function symbol $h$ and a circuit size parameter $k\\in \\mathbb{N}$ , consider the sentence", + "bbox": [ + 111, + 385, + 885, + 436 + ], + "page_idx": 21 + }, + { + "type": "equation", + "text": "\n$$\n\\mathsf {L B} _ {k} ^ {a. e.} (h) \\triangleq \\neg \\mathsf {U B} _ {k} ^ {i. o.} [ h ],\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 446, + 596, + 468 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "where $\\mathsf{UB}_k^{i.o.}[h]$ is the sentence defined in Section 5.1.1. The sentence $\\mathsf{LB}_k^{a.e.}(h)$ states that the language defined by $h$ is hard on input length $n$ for circuits of size $n^k$ whenever $n$ is sufficiently large.", + "bbox": [ + 111, + 479, + 883, + 513 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Theorem 5.16 (Unprovability of $\\mathsf{P} = \\mathsf{NP}$ in $\\mathsf{PV}_1$ from the unprovability of lower bounds in $\\mathsf{PV}_1$ [BKO20]). If there exists $k\\in \\mathbb{N}$ such that for no function symbol $h$ theory $\\mathsf{PV}_1$ proves the sentence $\\mathsf{LB}_k^{a.e.}(h)$ , then for no function symbol $f$ theory $\\mathsf{PV}_1$ proves the sentence $\\varphi_{\\mathsf{P} = \\mathsf{NP}}(f)$ .", + "bbox": [ + 111, + 525, + 885, + 578 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Theorem 5.16 shows that if $\\mathrm{PV}_1$ does not prove $n^k$ -size lower bounds for a language in $\\mathsf{P}$ , then $\\mathsf{P} \\neq \\mathsf{NP}$ is consistent with $\\mathrm{PV}_1$ . Note that the hypothesis of Theorem 5.16 is weaker than the assumption that $\\mathrm{PV}_1$ does not prove that $\\mathsf{NP} \\not\\subsetneq \\mathsf{SIZE}[n^k]$ for some $k$ .", + "bbox": [ + 111, + 587, + 883, + 640 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Sketch of the proof of Theorem 5.16. We proceed in the contrapositive. We formalize in $\\mathsf{PV}_1$ the result that if $\\mathsf{P} = \\mathsf{NP}$ , then for any parameter $k$ , $\\mathsf{P} \\not\\subsetneq$ i.o. $\\mathsf{SIZE}[n^k]$ (see, e.g., [Lip94, Theorem 3]). This result combines the collapse of $\\mathsf{PH}$ to $\\mathsf{P}$ with Kannan's argument [Kan82] that $\\mathsf{PH}$ can define languages that are almost-everywhere hard against circuits of fixed-polynomial size. Typically, proving this claim requires showing the existence of a truth table of size $2^n$ that is hard against circuits of size $n^k$ . However, this result might not be provable in $\\mathsf{PV}_1$ .", + "bbox": [ + 111, + 648, + 885, + 751 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "We address this issue as follows. From the provability in $\\mathsf{PV}_1$ that $\\mathsf{P} = \\mathsf{NP}$ , it follows that for each $i\\geq 1$ theory $\\mathsf{T}_2^i$ collapses to $\\mathsf{PV}_1$ [KPT91]. Recall that the dual weak pigeonhole principle (dWPHP) for $\\mathcal{L}_{\\mathsf{PV}}$ -functions is provable in $\\mathsf{T}_2^2$ . Define a $\\mathsf{PV}_1$ function symbol $g$ that takes as input a circuit $C$ of size $n^k$ and outputs the lexicographic first $n^{k + 1}$ bits of the truth table computed by $C$ . From dWPHP $(g)$ , we now", + "bbox": [ + 111, + 752, + 885, + 821 + ], + "page_idx": 21 + }, + { + "type": "page_footnote", + "text": "12 Recall from Section 2.1 that $x \\# y \\triangleq 2^{|x| \\cdot |y|}$ . Consequently, if we let $n = |\\phi|$ , then the bound $|\\pi| \\leq |\\phi| \\# |\\phi|$ translates to $|\\pi| \\leq n \\# n$ , where $n \\# n = 2^{|n| \\cdot |n|}$ is of order $n^{\\log n}$ . The proof of Theorem 5.13 works with any reasonable formalization that refers to a super-polynomial size bound.", + "bbox": [ + 111, + 830, + 885, + 875 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 898, + 509, + 909 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "derive in $\\mathsf{PV}_1$ that the prefix of some truth table is not computable by circuits of size $n^k$ , if $n$ is sufficiently large. We can implicitly extend this truth table prefix with zeroes and use the resulting truth table to define a $\\mathsf{PV}_1$ -formula $\\varphi(x)$ with a constant number of bounded quantifiers that defines a language $L$ that is hard against circuits of size $n^k$ , where the hardness is provable in $\\mathsf{PV}_1$ .", + "bbox": [ + 109, + 90, + 883, + 157 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Given that the provability in $\\mathsf{PV}_1$ that $\\mathsf{P} = \\mathsf{NP}$ implies the provability in $\\mathsf{PV}_1$ that $\\mathsf{PH}$ collapses to $\\mathsf{P}$ , it follows that $\\varphi(x)$ is equivalent in $\\mathsf{PV}_1$ to the language defined by some $\\mathcal{L}_{\\mathsf{PV}}$ -function $h$ . In other words, $\\mathsf{PV}_1 \\vdash \\mathsf{LB}_k^{a.e.}(h)$ , which completes the proof of Theorem 5.16.", + "bbox": [ + 109, + 159, + 883, + 210 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "[CLO24b] shows an example of a simple lower bound that is not provable in $\\mathrm{PV}_1$ , under a plausible cryptographic assumption. This indicates that Theorem 5.16 might offer a viable approach towards a solution to Open Problem 5.3.", + "bbox": [ + 109, + 222, + 883, + 272 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "6 Additional Recent Developments", + "text_level": 1, + "bbox": [ + 109, + 297, + 480, + 319 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The provability of the dual Weak Pigeonhole Principle (dWPHP) for polynomial-time functions is closely related to the provability of exponential circuit lower bounds for a language in deterministic exponential time [Jef07]. [Kra21] showed that dWPHP cannot be proved in $\\mathsf{PV}_1$ under the assumption that $\\mathsf{P} \\subseteq \\mathsf{SIZE}[n^k]$ for some constant $k$ . [ILW23] established the same unprovability result assuming subexponentially secure indistinguishability obfuscation and coNP $\\not\\subset$ i.o.AM.", + "bbox": [ + 109, + 332, + 883, + 416 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "[ABM23] established the unprovability of NEXP $\\subseteq$ SIZE[poly] in the theory of bounded arithmetic $V_2^0$ (not covered in this survey). Interestingly, their approach does not employ a witnessing theorem. It proceeds instead by simulating a comprehension axiom scheme assuming the provability of the upper bound sentence, eventually relying on an existing lower bound on the provability of the pigeonhole principle.", + "bbox": [ + 109, + 417, + 883, + 486 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "[CLO24b] systematically investigates the reverse mathematics of complexity lower bounds. They demonstrated that various lower bound statements in communication complexity, error-correcting codes, and for Turing machines are equivalent to well-studied combinatorial principles, such as the weak pigeon-hole principle for polynomial-time functions and its variants. Consequently, complexity lower bounds can be regarded as fundamental axioms with significant implications. They use these equivalences to derive conditional results on the unprovability of simple lower bounds in $\\mathsf{APC}_1$ .", + "bbox": [ + 109, + 486, + 883, + 588 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "$\\left[\\mathrm{CKK}^{+}24\\right]$ investigates the provability of the circuit size hierarchy in bounded arithmetic, captured by a sentence CSH stating that for each $n \\geq n_0$ , there is a circuit of size $n^a$ that does not admit an equivalent circuit of size $n^b$ , where $a > b > 1$ and $n_0$ are fixed. They showed that CSH is provable in $\\mathsf{T}_2^2$ , while its provability in $\\mathsf{T}_2^1$ implies that $\\mathsf{P}^{\\mathsf{NP}} \\not\\subsetneq \\mathsf{SIZE}[n^{1 + \\varepsilon}]$ for some $\\varepsilon > 0$ . Thus a better proof complexity upper bound for the circuit size hierarchy yields new circuit lower bounds.", + "bbox": [ + 109, + 588, + 883, + 672 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "[CRT24] establishes the unprovability of NP $\\neq$ PSPACE in $\\mathsf{APC}_1$ (with a LogLog formalization) under a strong average-case hardness assumption.", + "bbox": [ + 109, + 674, + 883, + 708 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "[Kra24] offers a comprehensive reference on proof complexity generators, whose investigation is closely related to dWPHP and its provability in bounded arithmetic. The theory of proof complexity generators offers tautologies that serve as potential candidates for demonstrating super-polynomial extended Frege lower bounds and consequently the unprovability of $\\mathsf{P} = \\mathsf{NP}$ in $\\mathsf{PV}_1$ .", + "bbox": [ + 109, + 709, + 883, + 776 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We have not covered a number of results connected to the meta-mathematics of complexity lower bounds developed in the context of propositional proof complexity (see, e.g., [Raz15, Kra19, AR23, Kra24] and references therein). It is worth noting that results on the non-automatability of weak proof systems such as [AM20, dRGN $^{+}$ 21] were made possible thanks to the investigation of the meta-mathematics of proof complexity.", + "bbox": [ + 109, + 777, + 883, + 863 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Finally, several other recent papers have investigated directions connected to bounded arithmetic and the meta-mathematics of complexity theory, e.g., [PS22, Kha22, PS23, AKPS24, LLR24]. Due to space constraints, we are not able to cover all recent developments in this survey.", + "bbox": [ + 109, + 90, + 887, + 143 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Acknowledgements. I would like to thank Noel Arteche, Jinqiao Hu, Jan Krajicek, Moritz Müller, Mykyta Narusevych, Ján Pich, and Dimitrios Tsintsilidas for their valuable comments and feedback on an earlier version of this survey. This work received support from the Royal Society University Research Fellowship URF\\R1\\191059; the UKRI Frontier Research Guarantee EP/Y007999/1; and the Centre for Discrete Mathematics and its Applications (DIMAP) at the University of Warwick.", + "bbox": [ + 109, + 156, + 883, + 233 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 255, + 225, + 273 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[AB09] Sanjeev Arora and Boaz Barak. Computational Complexity - A Modern Approach. Cambridge University Press, 2009.", + "[ABM23] Albert Atserias, Samuel R. Buss, and Moritz Müller. On the consistency of circuit lower bounds for non-deterministic time. In Symposium on Theory of Computing (STOC), pages 1257-1270, 2023.", + "[AKPS24] Noel Arteche, Erfan Khaniki, Jan Pich, and Rahul Santhanam. From proof complexity to circuit complexity via interactive protocols. In International Colloquium on Automata, Languages, and Programming (ICALP), 2024.", + "[AM20] Albert Atserias and Moritz Müller. Automating resolution is NP-hard. J. ACM, 67(5):31:1-31:17, 2020.", + "[AR23] Per Austrin and Kilian Risse. Sum-of-squares lower bounds for the minimum circuit size problem. In Computational Complexity Conference (CCC), pages 31:1-31:21, 2023.", + "[AW09] Scott Aaronson and Avi Wigderson. Algebraization: A new barrier in complexity theory. Transactions on Computation Theory (TOCT), 1(1), 2009.", + "[Bey09] Olaf Beyersdorff. On the correspondence between arithmetic theories and propositional proof systems – a survey. Mathematical Logic Quarterly, 55(2):116–137, 2009.", + "[BGS75] Theodore P. Baker, John Gill, and Robert Solovay. Relativizatons of the $\\mathsf{P} = ?$ NP Question. SIAM J. Comput., 4(4):431-442, 1975.", + "[BKKK20] Sam R. Buss, Valentine Kabanets, Antonina Kolokolova, and Michal Koucký. Expander construction in VNC1. Annals of Pure and Applied Logic, 171(7):102796, 2020.", + "[BKO20] Jan Bydzovsky, Jan Krajíček, and Igor C. Oliveira. Consistency of circuit lower bounds with bounded theories. *Logical Methods in Computer Science*, 16(2), 2020.", + "[BKT14] Samuel R. Buss, Leszek A. Kołodziejczyk, and Neil Thapen. Fragments of approximate counting. Journal of Symbolic Logic, 79(2):496-525, 2014.", + "[BM20] Jan Bydzovsky and Moritz Müller. Polynomial time ultrapowers and the consistency of circuit lower bounds. Arch. Math. Log., 59(1-2):127-147, 2020.", + "[Bus86] Samuel R. Buss. Bounded Arithmetic. Bibliopolis, 1986.", + "[Bus90] Samuel R. Buss. On model theory for intuitionistic bounded arithmetic with applications to independence results. In *Feasible Mathematics: A Mathematical Sciences Institute Workshop, Ithaca, New York, June 1989*, pages 27-47. Springer, 1990." + ], + "bbox": [ + 120, + 286, + 883, + 858 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[Bus94] Samuel R. Buss. On herbrand's theorem. In Selected Papers from the Logic and Computational Complexity International Workshop (LCC), pages 195-209, 1994.", + "[Bus97] Samuel R. Buss. Bounded arithmetic and propositional proof complexity. In Logic of Computation, pages 67-121. Springer Berlin Heidelberg, 1997.", + "$\\left[\\mathrm{CHO}^{+}22\\right]$ Lijie Chen, Shuichi Hirahara, Igor C. Oliveira, Jan Pich, Ninad Rajgopal, and Rahul Santhanam. Beyond natural proofs: Hardness magnification and locality. J. ACM, 69(4):25:1-25:49, 2022.", + "[CIKK16] Marco L. Carmosino, Russell Impagliazzo, Valentine Kabanets, and Antonina Kolokolova. Learning algorithms from natural proofs. In Conference on Computational Complexity (CCC), pages 10:1-10:24, 2016.", + "[CJsw21] Lijie Chen, Ce Jin, Rahul Santhanam, and Ryan Williams. Constructive separations and their consequences. In Symposium on Foundations of Computer Science (FOCS), 2021.", + "[CK07] Stephen A. Cook and Jan Krajček. Consequences of the provability of NP $\\subseteq$ P/poly. Journal of Symbolic Logic, 72(4):1353-1371, 2007.", + "$\\left[\\mathrm{CKK}^{+}24\\right]$ Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, Igor C. Oliveira, and Dimitrios Tsintsili-das. Provability of the circuit size hierarchy and its consequences. Preprint, 2024.", + "[CKKO21] Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, and Igor C. Oliveira. Learn-uniform circuit lower bounds and provability in bounded arithmetic. In Symposium on Foundations of Computer Science (FOCS), 2021.", + "[CLO24a] Lijie Chen, Jiatu Li, and Igor C. Oliveira. On the unprovability of circuit size bounds in intuitionistic $S_2^1$ . Preprint: arXiv:2404.11841, 2024.", + "[CLO24b] Lijie Chen, Jiatu Li, and Igor C. Oliveira. Reverse mathematics of complexity lower bounds. In Symposium on Foundations of Computer Science (FOCS), 2024.", + "[CN10] Stephen A. Cook and Phuong Nguyen. Logical Foundations of Proof Complexity. Cambridge University Press, 2010.", + "[Bcob65] Alan Cobham. The intrinsic computational difficulty of functions. Proc. Logic, Methodology and Philosophy of Science, pages 24-30, 1965.", + "[Co075] Stephen A. Cook. Feasibly constructive proofs and the propositional calculus (preliminary version). In Symposium on Theory of Computing (STOC), pages 83-97, 1975.", + "[CRT24] Lijie Chen, Ron D. Rothblum, and Roei Tell. Fiat-Shamir in the plain model from derandomization. Electron. Colloquium Comput. Complex., TR24-116, 2024.", + "[CU93] Stephen Cook and Alasdair Urquhart. Functional interpretations of feasibly constructive arithmetic. Annals of Pure and Applied Logic, 63(2):103-200, 1993.", + "[Din07] Irit Dinur. The PCP theorem by gap amplification. J. ACM, 54(3):12, 2007.", + "[dRGN+21] Susanna F. de Rezende, Mika Göös, Jakob Nordström, Toniann Pitassi, Robert Robere, and Dmitry Sokolov. Automating algebraic proof systems is NP-hard. In Symposium on Theory of Computing (STOC), pages 209-222, 2021.", + "[Gay23] Azza Gaysin. Proof complexity of CSP. ArXiv e-Print arXiv:2201.00913, 2023.", + "[Gay24] Azza Gaysin. Proof complexity of universal algebra in a CSP dichotomy proof. ArXiv e-Print arXiv:2403.06704, 2024." + ], + "bbox": [ + 122, + 90, + 883, + 875 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[HP93] Petr Hajek and Pavel Pudlák. Metamathematics of first-order arithmetic. Springer-Verlag, 1993.", + "[ILW23] Rahul Ilango, Jiatu Li, and Ryan Williams. Indistinguishability obfuscation, range avoidance, and bounded arithmetic. In Symposium on Theory of Computing (STOC), pages 1076–1089. ACM, 2023.", + "[Jer04] Emil Jerabek. Dual weak pigeonhole principle, boolean complexity, and derandomization. Annals of Pure and Applied Logic, 129(1-3):1-37, 2004.", + "[Jef05] Emil Jerabek. Weak pigeonhole principle and randomized computation. PhD thesis, Charles University in Prague, 2005.", + "[Jer06] Emil Jerabek. The strength of sharply bounded induction. Mathematical Logic Quarterly, 52(6):613-624, 2006.", + "[Jer07] Emil Jerabek. Approximate counting in bounded arithmetic. Journal of Symbolic Logic, 72(3):959-993, 2007.", + "[Juk12] Stasys Jukna. Boolean Function Complexity: Advances and Frontiers. Springer, 2012.", + "[Kan82] Ravi Kannan. Circuit-size lower bounds and non-reducibility to sparse sets. Information and Control, 55(1-3):40-56, 1982.", + "[Kha22] Erfan Khaniki. Nisan-Wigderson generators in proof complexity: New lower bounds. In Computational Complexity Conference (CCC), pages 17:1-17:15, 2022.", + "[KO17] Jan Krajíček and Igor C. Oliveira. Unprovability of circuit upper bounds in Cook's theory PV. *Logical Methods in Computer Science*, 13(1), 2017.", + "[KP89] Jan Krajíček and Pavel Pudlák. Propositional provability and models of weak arithmetic. In CSL'89: Proceedings of the 3rd Workshop on Computer Science Logic, pages 193-210, 1989.", + "[KPS90] Jan Krajíček, Pavel Pudlák, and Jíří Sgall. Interactive computations of optimal solutions. In International Symposium on Mathematical Foundations of Computer Science (MFCS), volume 452, pages 48-60, 1990.", + "[KPT91] Jan Krajíček, Pavel Pudlák, and Gaisi Takeuti. Bounded arithmetic and the polynomial hierarchy. Annals of Pure and Applied Logic, 52(1-2):143-153, 1991.", + "[Kra95] Jan Krajíček. Bounded Arithmetic, Propositional Logic, and Complexity Theory. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 1995.", + "[Kra97] Jan Krajicek. Interpolation theorems, lower bounds for proof systems, and independence results for bounded arithmetic. J. Symb. Log., 62(2):457-486, 1997.", + "[Kra11] Jan Krajicek. On the proof complexity of the Nisan-Wigderson generator based on a hard NP $\\cap$ coNP function. Journal of Mathematical Logic, 11(1), 2011.", + "[Kra19] Jan Krajíček. Proof Complexity. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 2019.", + "[Kra21] Jan Krajíček. Small circuits and dual weak PHP in the universal theory of p-time algorithms. ACM Transactions on Computational Logic (TOCL), 22(2):1-4, 2021.", + "[Kra24] Jan Krajicek. Proof Complexity Generators. Monograph available at https://www.karlin.mff.cuni.cz/~krajicek/gdraft.html, 2024." + ], + "bbox": [ + 137, + 90, + 883, + 854 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[Lê14] Dai Tri Man Lê. Bounded Arithmetic and Formalizing Probabilistic Proofs. PhD thesis, University of Toronto, 2014.", + "[LC11] Dai Tri Man Le and Stephen A. Cook. Formalizing randomized matching algorithms. Log. Methods Comput. Sci., 8(3), 2011.", + "[Lip94] Richard J. Lipton. Some consequences of our failure to prove non-linear lower bounds on explicit functions. In Structure in Complexity Theory Conference (CCC), pages 79-87, 1994.", + "[LLR24] Jiawei Li, Yuhao Li, and Hanlin Ren. Meta-mathematics of resolution lower bounds: A TFNP perspective. Preprint, 2024.", + "[LO23] Jiatu Li and Igor C. Oliveira. Unprovability of strong complexity lower bounds in bounded arithmetic. In Symposium on Theory of Computing (STOC), 2023.", + "[McK10] Richard McKinley. A sequent calculus demonstration of Herbrand's theorem. arXiv preprint arXiv:1007.3414, 2010.", + "[MP20] Moritz Müller and Ján Pich. Feasibly constructive proofs of succinct weak circuit lower bounds. Annals of Pure and Applied Logic, 171(2), 2020.", + "[MPW02] Alexis Maciel, Toniann Pitassi, and Alan R. Woods. A new proof of the weak pigeonhole principle. Journal of Computer and System Sciences, 64(4):843-872, 2002.", + "[Oja04] Kerry Ojakian. Combinatorics in Bounded Arithmetic. PhD thesis, Carnegie Mellon University, 2004.", + "[Par71] Rohit Parikh. Existence and feasibility in arithmetic. Journal of Symbolic Logic, 36(3):494-508, 1971.", + "[Pic15a] Jan Pich. Circuit lower bounds in bounded arithmetics. Annals of Pure and Applied Logic, 166(1):29-45, 2015.", + "[Pic15b] Jan Pich. Logical strength of complexity theory and a formalization of the PCP theorem in bounded arithmetic. *Logical Methods in Computer Science*, 11(2), 2015.", + "[PS21] Jan Pich and Rahul Santhanam. Strong co-nondeterministic lower bounds for NP cannot be proved feasibly. In Symposium on Theory of Computing (STOC), pages 223-233, 2021.", + "[PS22] Jan Pich and Rahul Santhanam. Learning algorithms versus automatability of Frege systems. In International Colloquium on Automata, Languages, and Programming (ICALP), pages 101:1-101:20, 2022.", + "[PS23] Ján Pich and Rahul Santhanam. Towards $\\mathrm{P} \\neq \\mathrm{NP}$ from extended Frege lower bounds. *Electron. Colloquium Comput. Complex.*, TR23-199, 2023.", + "[Pud06] Pavel Pudlák. Consistency and games - in search of new combinatorial principles. In V. Stoltenberg-Hansen and J. Väätänen, editors, Logic Colloquium '03, volume 24 of Lecture Notes in Logic, pages 244-281. ASL, 2006.", + "[PWW88] Jeff B. Paris, A. J. Wilkie, and Alan R. Woods. Provability of the pigeonhole principle and the existence of infinitely many primes. J. Symb. Log., 53(4):1235-1244, 1988.", + "[Raz95a] Alexander A. Razborov. Bounded arithmetic and lower bounds in boolean complexity. In P. Clote and J. Remmel, editors, Feasible Mathematics II, pages 344-386. Birkhäuser, 1995.", + "[Raz95b] Alexander A Razborov. Unprovability of lower bounds on circuit size in certain fragments of bounded arithmetic. Izvestiya: mathematics, 59(1):205, 1995." + ], + "bbox": [ + 129, + 90, + 883, + 854 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 898, + 508, + 909 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[Raz15] Alexander A. Razborov. Pseudorandom generators hard for $k$ -DNF resolution and polynomial calculus resolution. Annals of Mathematics, pages 415-472, 2015.", + "[RR97] Alexander A. Razborov and Steven Rudich. Natural proofs. Journal of Computer and System Sciences, 55(1):24-35, 1997.", + "[Sub61] Bella A. Subbotovskaya. Realization of linear functions by formulas using $+, \\cdot, -$ . In Soviet Math. Dokl, 1961.", + "[SW14] Rahul Santhanam and Ryan Williams. On uniformity and circuit lower bounds. Computational Complexity, 23(2):177-205, 2014.", + "[TC21] Iddo Tzameret and Stephen A. Cook. Uniform, integral, and feasible proofs for the determinant identities. J. ACM, 68(2):12:1-12:80, 2021.", + "[Woo81] Alan R. Woods. Some problems in logic and number theory and their connections. PhD thesis, University of Manchester, 1981.", + "[WP87] Alex J. Wilkie and Jeff B. Paris. On the scheme of induction for bounded arithmetic formulas. Ann. Pure Appl. Log., 35:261-302, 1987." + ], + "bbox": [ + 137, + 90, + 883, + 372 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 898, + 506, + 909 + ], + "page_idx": 27 + } +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_model.json b/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_model.json new file mode 100644 index 0000000000000000000000000000000000000000..cefa48ee46666428d5652eb4453c665b989d024c --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_model.json @@ -0,0 +1,5481 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.27, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.04416v1 [cs.CC] 6 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.29, + 0.088, + 0.71, + 0.11 + ], + "angle": 0, + "content": "SIGACT News Complexity Theory Column" + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.116, + 0.788, + 0.139 + ], + "angle": 0, + "content": "Meta-Mathematics of Computational Complexity Theory" + }, + { + "type": "text", + "bbox": [ + 0.418, + 0.149, + 0.578, + 0.17 + ], + "angle": 0, + "content": "Igor C. Oliveira1" + }, + { + "type": "image", + "bbox": [ + 0.443, + 0.179, + 0.555, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.466, + 0.296, + 0.533, + 0.31 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.32, + 0.841, + 0.382 + ], + "angle": 0, + "content": "We survey results on the formalization and independence of mathematical statements related to major open problems in computational complexity theory. Our primary focus is on recent findings concerning the (un)provability of complexity bounds within theories of bounded arithmetic. This includes the techniques employed and related open problems, such as the (non)existence of a feasible proof that \\( \\mathsf{P} = \\mathsf{NP} \\)." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.394, + 0.21, + 0.412 + ], + "angle": 0, + "content": "Contents" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.426, + 0.885, + 0.44 + ], + "angle": 0, + "content": "1 Introduction 2" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.453, + 0.884, + 0.467 + ], + "angle": 0, + "content": "2 Preliminaries 3" + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.426, + 0.885, + 0.467 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.469, + 0.884, + 0.484 + ], + "angle": 0, + "content": "2.1 Complexity Theory 3" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.485, + 0.885, + 0.498 + ], + "angle": 0, + "content": "2.2 Theories of Bounded Arithmetic 3" + }, + { + "type": "list", + "bbox": [ + 0.139, + 0.469, + 0.885, + 0.498 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.499, + 0.884, + 0.513 + ], + "angle": 0, + "content": "2.2.1 \\(\\mathrm{PV}_1\\) 4" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.514, + 0.885, + 0.529 + ], + "angle": 0, + "content": "2.2.2 \\(\\mathbf{S}_2^1,\\mathbf{T}_2^1\\) , and Beyond 4" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.529, + 0.885, + 0.543 + ], + "angle": 0, + "content": "2.2.3 \\(\\mathsf{APC}_1\\) 6" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.499, + 0.885, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.557, + 0.884, + 0.571 + ], + "angle": 0, + "content": "3 Auxiliary Definitions and Results 6" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.573, + 0.884, + 0.587 + ], + "angle": 0, + "content": "3.1 Witnessing Theorems 6" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.588, + 0.885, + 0.601 + ], + "angle": 0, + "content": "3.2 Bounded Arithmetic and Propositional Proofs 7" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.602, + 0.885, + 0.616 + ], + "angle": 0, + "content": "3.3 Cuts of Models of Bounded Arithmetic 8" + }, + { + "type": "list", + "bbox": [ + 0.139, + 0.573, + 0.885, + 0.616 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.63, + 0.884, + 0.644 + ], + "angle": 0, + "content": "4 The Strength of Bounded Arithmetic 9" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.646, + 0.884, + 0.659 + ], + "angle": 0, + "content": "4.1 Formalization of Results from Algorithms and Complexity 9" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.66, + 0.885, + 0.675 + ], + "angle": 0, + "content": "4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in \\(\\mathsf{PV}_1\\) 10" + }, + { + "type": "list", + "bbox": [ + 0.139, + 0.646, + 0.885, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.687, + 0.884, + 0.702 + ], + "angle": 0, + "content": "5 Unprovability of Complexity Bounds 14" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.704, + 0.884, + 0.717 + ], + "angle": 0, + "content": "5.1 Unprovability of Upper Bounds 14" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.719, + 0.885, + 0.732 + ], + "angle": 0, + "content": "5.1.1 LEARN-Uniform Circuits and Unprovability 14" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.733, + 0.885, + 0.748 + ], + "angle": 0, + "content": "5.1.2 \\(\\mathsf{P} = \\mathsf{NP}\\) and Propositional Proof Complexity 17" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.719, + 0.885, + 0.748 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.749, + 0.884, + 0.762 + ], + "angle": 0, + "content": "5.2 Unprovability of Lower Bounds 18" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.764, + 0.884, + 0.777 + ], + "angle": 0, + "content": "5.2.1 Average-Case Circuit Lower Bounds 18" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.778, + 0.885, + 0.792 + ], + "angle": 0, + "content": "5.2.2 Extended Frege Lower Bounds 21" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.764, + 0.885, + 0.792 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.794, + 0.884, + 0.808 + ], + "angle": 0, + "content": "5.3 Connection Between Upper Bounds and Lower Bounds 22" + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.821, + 0.884, + 0.836 + ], + "angle": 0, + "content": "6 Additional Recent Developments 23" + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.846, + 0.802, + 0.861 + ], + "angle": 0, + "content": "1Department of Computer Science, University of Warwick, UK. Email: igor.oliveira@warwick.ac.uk." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.899, + 0.504, + 0.91 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.089, + 0.283, + 0.109 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.123, + 0.884, + 0.226 + ], + "angle": 0, + "content": "The investigation of the inherent complexity of computational tasks is a central research direction in theoretical computer science. While unconditional results are known in a variety of restricted contexts (i.e., with respect to weak models of computation), despite significant efforts, several central questions of the field remain wide open. Prominent examples include the relation between complexity classes P and NP, understanding the power of non-uniform Boolean circuits, and bounding the length of proofs in propositional proof systems such as Frege and extended Frege." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.226, + 0.885, + 0.363 + ], + "angle": 0, + "content": "The investigation of the difficulty of settling these problems has long been an important and influential area of research by itself (e.g., barrier results such as [BGS75, RR97, AW09, \\(\\mathrm{CHO}^{+}22\\)]). Unfortunately, these results tend to be ad-hoc and do not consider a standard and robust notion of proof. In order to build a general theory, several works have considered provability in the usual sense of mathematical logic. Most importantly, this enables a deeper investigation of complexity theory that considers not only the running time of a program or the size of a circuit but also the feasibility of proving their existence and correctness. In particular, we can explore the fundamental question of what can and cannot be feasibly computed, along with the meta-question of what lower and upper bounds can and cannot be feasibly proven." + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.364, + 0.437, + 0.379 + ], + "angle": 0, + "content": "A fundamental goal of this research is to" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.39, + 0.884, + 0.442 + ], + "angle": 0, + "content": "\\((\\star)\\) identify a suitable logical theory capable of formalizing most, if not all, known results in algorithms and complexity, and determine whether the major open problems mentioned above are provable or unprovable within this theory.2" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.452, + 0.885, + 0.674 + ], + "angle": 0, + "content": "Although we are still far from reaching this goal, progress has been made in understanding the (un)provability of statements concerning the complexity of computations within certain fragments of Peano Arithmetic, collectively known as Bounded Arithmetic. These theories are designed to capture proofs that manipulate and reason with concepts from a specified complexity class. For instance, a proof by induction whose inductive hypothesis can be expressed as an NP predicate is one such example. The earliest theory of this kind was \\(\\mathsf{I}\\Delta_0\\), introduced by Parikh [Par71], who explored the intuitive concept of feasibility in arithmetic and addressed the infeasibility of exponentiation. The relationship between Parikh's theory and computational complexity was fully recognized and advanced by Paris and Wilkie in a series of influential papers during the 1980s (see [WP87]). Other significant theories include Cook's theory \\(\\mathsf{PV}_1\\) [Coo75], which formalizes polynomial-time reasoning; Jerabek's theory \\(\\mathsf{APC}_1\\) [Jer04, Jer05, Jer07], which extends \\(\\mathsf{PV}_1\\) by incorporating the dual weak pigeonhole principle for polynomial-time functions and formalizes probabilistic polynomial-time reasoning; and Buss's theories \\(\\mathsf{S}_2^i\\) and \\(\\mathsf{T}_2^i\\) [Bus86], which include induction principles corresponding to various levels of the polynomial-time hierarchy." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.673, + 0.884, + 0.759 + ], + "angle": 0, + "content": "These theories are capable of formalizing advanced results. For instance, it is known that \\(\\mathrm{PV}_1\\) can prove the PCP Theorem [Pic15b], while \\(\\mathrm{APC}_1\\) can establish several significant circuit lower bounds [MP20], including monotone circuit lower bounds for \\(k\\)-Clique and bounded-depth circuit lower bounds for the Parity function. Further examples include the explicit construction of expander graphs [BKKK20] and the correctness of randomized polynomial-time matching algorithms [LC11], among many others." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.759, + 0.884, + 0.829 + ], + "angle": 0, + "content": "Given the expressive power of these theories, even if we are not yet able to establish a breakthrough result of the magnitude of \\((\\star)\\), determining the (un)provability of complexity bounds of interest in theories of bounded arithmetic still represents significant progress towards our understanding of the power and limits of feasible computations and proofs. This survey aims to provide an introduction to some of these results," + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.837, + 0.887, + 0.867 + ], + "angle": 0, + "content": "As we elaborate in Section 5, the unprovability of a statement is equivalent to the consistency of its negation, which can be at least as important." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.506, + 0.91 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.881, + 0.158 + ], + "angle": 0, + "content": "the underlying techniques, and related open problems. While our primary focus is on recent developments, in order to provide a broader perspective we also cover some classical results. Due to space limitations, the survey is not exhaustive, and several references had to be omitted (although some recent developments are mentioned in Section 6)." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.185, + 0.29, + 0.203 + ], + "angle": 0, + "content": "2 Preliminaries" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.219, + 0.325, + 0.236 + ], + "angle": 0, + "content": "2.1 Complexity Theory" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.246, + 0.885, + 0.313 + ], + "angle": 0, + "content": "We will rely on a few additional standard definitions from complexity theory, such as basic complexity classes, Boolean circuits and formulas, and propositional proof systems. These can be found in textbooks such as [AB09] and [Kra19]. Below we only establish notation and review a classical result that offers a convenient way to talk about polynomial-time computations in some logical theories." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.314, + 0.783, + 0.331 + ], + "angle": 0, + "content": "We use \\(\\mathsf{SIZE}[s]\\) to denote the set of languages computed by Boolean circuits of size \\(s(n)\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.332, + 0.884, + 0.485 + ], + "angle": 0, + "content": "In theoretical computer science, one typically considers functions and predicates that operate over binary strings. This is equivalent to operations on integers, by identifying each non-negative integer with its binary representation. Let \\(\\mathbb{N}\\) denote the set of non-negative integers. For \\(a\\in \\mathbb{N}\\), we let \\(|a|\\triangleq \\lceil \\log_2(a + 1)\\rceil\\) denote the length of the binary representation of \\(a\\). For a constant \\(k\\geq 1\\), we say that a function \\(f\\colon \\mathbb{N}^k\\to \\mathbb{N}\\) is computable in polynomial time if \\(f(x_{1},\\ldots ,x_{k})\\) can be computed in time polynomial in \\(|x_{1}|,\\ldots ,|x_{k}|\\). (For convenience, we might write \\(|\\vec{x} |\\triangleq |x_1|,\\dots ,|x_k|.\\)) Recall that FP denotes the set of polynomial time functions. While the definition of polynomial time refers to a machine model, FP can also be introduced in a machine independent way as the closure of a set of base functions under composition and limited recursion on notation. In more detail, we can consider the following class \\(\\mathcal{F}\\) of base functions:" + }, + { + "type": "equation", + "bbox": [ + 0.13, + 0.497, + 0.867, + 0.572 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} c (x) \\triangleq 0, \\quad s (x) \\triangleq x + 1, \\quad a (x) \\triangleq \\lfloor x / 2 \\rfloor , \\quad d (x) \\triangleq 2 \\cdot x, \\quad \\pi_ {\\ell} ^ {i} (x _ {1}, \\ldots , x _ {\\ell}) \\triangleq x _ {i}, \\quad x \\# y \\triangleq 2 ^ {| x | \\cdot | y |}, \\\\ x \\leq y \\triangleq \\left\\{ \\begin{array}{l l} 1 & \\text {i f} x \\leq y \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right. \\quad \\text {C h o i c e} (x, y, z) \\triangleq \\left\\{ \\begin{array}{l l} y & \\text {i f} x > 0 \\\\ z & \\text {o t h e r w i s e .} \\end{array} \\right. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.584, + 0.881, + 0.616 + ], + "angle": 0, + "content": "We say that a function \\( f(\\vec{x},y) \\) is defined from functions \\( g(\\vec{x}) \\), \\( h(\\vec{x},y,z) \\), and \\( k(\\vec{x},y) \\) by limited recursion on notation if" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.632, + 0.51, + 0.648 + ], + "angle": 0, + "content": "\\[\nf (\\vec {x}, 0) = g (\\vec {x})\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.653, + 0.621, + 0.67 + ], + "angle": 0, + "content": "\\[\nf (\\vec {x}, y) = h (\\vec {x}, y, f (\\vec {x}, \\lfloor y / 2 \\rfloor))\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.377, + 0.674, + 0.527, + 0.69 + ], + "angle": 0, + "content": "\\[\nf (\\vec {x}, y) \\leq k (\\vec {x}, y)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.704, + 0.881, + 0.737 + ], + "angle": 0, + "content": "for every sequence \\((\\vec{x},y)\\) of natural numbers. Cobham [Cob65] proved that FP is the least class of functions that contains \\(\\mathcal{F}\\) and is closed under composition and limited recursion on notation." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.758, + 0.432, + 0.774 + ], + "angle": 0, + "content": "2.2 Theories of Bounded Arithmetic" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.786, + 0.881, + 0.87 + ], + "angle": 0, + "content": "Bounded arithmetic has a long and rich history (see [Bus97] for an introduction, and [HP93, Kra95, CN10] for a detailed treatment). The correspondence between the theories and complexity classes manifests in multiple ways. For instance, witnessing results show that every provably total function in a given theory \\(\\mathsf{T}_{\\mathcal{C}}\\) (i.e., when \\(\\forall x \\exists!y \\psi(x,y)\\) is provable, for certain formulas \\(\\psi\\)) is computable within the corresponding complexity class \\(\\mathcal{C}\\) (i.e., the function \\(y = f(x)\\) is in \\(\\mathcal{C}\\)). There is also a close connection between" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.504, + 0.91 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.198 + ], + "angle": 0, + "content": "theories of bounded arithmetic and propositional proof systems, e.g., propositional translations between proofs of certain sentences in \\(\\mathsf{PV}_1\\) or \\(\\mathsf{S}_2^1\\) and polynomial-size proofs in the extended Frege proof system of the corresponding propositional formulas. We review some related results in Section 3.1 and Section 3.2, respectively. In this section, we provide an overview of some widely investigated theories of bounded arithmetic: \\(\\mathsf{PV}_1\\), \\(\\mathsf{S}_2^1\\), \\(\\mathsf{T}_2^1\\), and \\(\\mathsf{APC}_1\\). We assume basic familiarity with first-order logic. Results claimed below without reference can be found in [Kra95]." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.215, + 0.204, + 0.232 + ], + "angle": 0, + "content": "2.2.1 PV" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.241, + 0.885, + 0.326 + ], + "angle": 0, + "content": "\\(\\mathsf{PV}_1\\) [Coo75] (see also [KPT91]) is a first-order theory whose intended model is the set \\(\\mathbb{N}\\) of natural numbers, together with the standard interpretation for constants and functions symbols such as \\(0, +, \\times, \\text{etc.}\\). The vocabulary (language) of \\(\\mathsf{PV}_1\\), denoted \\(\\mathcal{L}_{\\mathsf{PV}_1}\\), contains a function symbol for each polynomial-time algorithm \\(f: \\mathbb{N}^k \\to \\mathbb{N}\\) (where \\(k\\) is any constant). These function symbols, and the axioms defining them, are obtained through Cobham's characterization of polynomial-time functions discussed in Section 2.1." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.327, + 0.884, + 0.377 + ], + "angle": 0, + "content": "\\(\\mathrm{PV}_1\\) also postulates an induction axiom scheme that simulates binary search, and one can show that it admits induction over quantifier-free formulas (i.e., polynomial-time predicates). We discuss induction axioms in more detail in Section 2.2.2." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.378, + 0.884, + 0.412 + ], + "angle": 0, + "content": "We will use later in the text that \\(\\mathsf{PV}_1\\) admits a formulation where all axioms are universal formulas (i.e., \\(\\forall \\vec{x}\\phi (\\vec{x})\\), where \\(\\phi\\) is a quantifier-free formula). In other words, \\(\\mathsf{PV}_1\\) is a universal theory." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.412, + 0.884, + 0.464 + ], + "angle": 0, + "content": "While the details of the definition of \\(\\mathrm{PV}_1\\) are fairly technical (see, e.g., the longer overview in [CLO24b] or the exposition in [Kra95]), such details are often not needed. In particular, \\(\\mathrm{PV}_1\\) has an equivalent formalization that does not require Cobham's result [Jef06]." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.482, + 0.319, + 0.501 + ], + "angle": 0, + "content": "2.2.2 \\(\\mathsf{S}_2^1,\\mathsf{T}_2^1\\) , and Beyond" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.51, + 0.884, + 0.56 + ], + "angle": 0, + "content": "While \\(\\mathrm{PV}_1\\) can be related to polynomial-time computations and feasible proofs, Buss [Bus86] introduced a hierarchy of theories with close ties to the different levels of the polynomial hierarchy. To specify the theories, we will need a few definitions." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.562, + 0.884, + 0.613 + ], + "angle": 0, + "content": "The language \\(\\mathcal{L}_{\\mathsf{B}}\\) of these theories contains the predicate symbols \\(=\\) and \\(\\leq\\), the constant symbols 0 and 1, and function symbols \\(S\\) (successor), \\(+\\), \\(\\cdot\\), \\(\\lfloor x / 2 \\rfloor\\), \\(|x|\\) (interpreted as the length of \\(x\\) as in Section 2.1), and \\(\\#\\) (\"smash\"; interpreted as \\(x \\# y = 2^{|x| \\cdot |y|}\\))." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.613, + 0.884, + 0.664 + ], + "angle": 0, + "content": "A bounded quantifier is a quantifier of the form \\( Qy \\leq t \\), where \\( Q \\in \\{\\exists, \\forall\\} \\) and \\( t \\) is a term not involving \\( y \\). Similarly, a sharply bounded quantifier is one of the form \\( Qy \\leq |t| \\). Formally, such quantifiers are simply abbreviations. For instance," + }, + { + "type": "equation", + "bbox": [ + 0.294, + 0.677, + 0.701, + 0.696 + ], + "angle": 0, + "content": "\\[\n\\forall y \\leq t (\\vec {x}) \\varphi (\\vec {x}, y) \\triangleq \\forall y (y \\leq t (\\vec {x}) \\rightarrow \\varphi (\\vec {x}, y)), a n d\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.698, + 0.66, + 0.716 + ], + "angle": 0, + "content": "\\[\n\\exists y \\leq t (\\vec {x}) \\varphi (\\vec {x}, y) \\triangleq \\exists y (y \\leq t (\\vec {x}) \\wedge \\varphi (\\vec {x}, y)).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.73, + 0.885, + 0.798 + ], + "angle": 0, + "content": "A formula where each quantifier appears bounded (resp., sharply bounded) is said to be a bounded (resp., sharply bounded) formula. It is not hard to show that every sharply bounded formula defines a polynomial-time predicate over the standard model \\(\\mathbb{N}\\) under its usual operations. On the other hand, bounded quantifiers allow us to define predicates in NP, coNP, and beyond." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.798, + 0.884, + 0.869 + ], + "angle": 0, + "content": "We can introduce a hierarchy of formulas by counting alternations of bounded quantifiers. The class \\(\\Pi_0^b = \\Sigma_0^b\\) contains the sharply bounded formulas. We then recursively define, for each \\(i\\geq 1\\), the classes \\(\\Sigma_i^b\\) and \\(\\Pi_{i}^{b}\\) according to the quantifier structure of the sentence, ignoring the appearance of sharply bounded quantifiers. For instance, if \\(\\varphi \\in \\Sigma_0^b\\) and \\(\\psi \\triangleq \\exists y\\leq t(\\vec{x})\\varphi (y,\\vec{x})\\), then \\(\\psi \\in \\Sigma_1^b\\) (see, e.g., [Kra95] for the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.505, + 0.91 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.881, + 0.14 + ], + "angle": 0, + "content": "technical details in the general case). As alluded to above, it is known that, for each \\( i \\geq 1 \\), a predicate \\( P(\\vec{x}) \\) is in \\( \\Sigma_i^p \\) (the \\( i \\)-th level of the polynomial hierarchy) if and only if there is a \\( \\Sigma_i^b \\)-formula that agrees with it over \\( \\mathbb{N} \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.143, + 0.881, + 0.211 + ], + "angle": 0, + "content": "The theories introduced by Buss share a common set BASIC of finitely many axioms postulating the expected arithmetic behavior of the constants, predicates, and function symbols, e.g., \\( x + y = y + x \\) and \\( |1| = 1 \\) (see, e.g., [Kra95, Page 68] for the complete list). The only difference among the theories is the kind of induction axiom scheme that each of them postulates." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.231, + 0.806, + 0.248 + ], + "angle": 0, + "content": "Theory \\(\\mathsf{T}_2^1\\). This is a theory in the language \\(\\mathcal{L}_{\\mathbb{B}}\\) extending BASIC by the induction axiom IND" + }, + { + "type": "equation", + "bbox": [ + 0.339, + 0.26, + 0.657, + 0.279 + ], + "angle": 0, + "content": "\\[\n\\varphi (0) \\wedge \\forall x (\\varphi (x) \\rightarrow \\varphi (x + 1)) \\rightarrow \\forall x \\varphi (x)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.289, + 0.785, + 0.308 + ], + "angle": 0, + "content": "for all \\(\\Sigma_1^b\\)-formulas \\(\\varphi(a)\\). The formula \\(\\varphi(a)\\) may contain other free variables in addition to \\(a\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.323, + 0.881, + 0.392 + ], + "angle": 0, + "content": "We say that \\(\\mathsf{T}_2^1\\) supports induction for NP predicates. Intuitively, this means that we can aim to prove a result in \\(\\mathsf{T}_2^1\\) by induction, provided the induction hypothesis is defined by a predicate computable in NP. This definition can be extended to a theory that postulates induction for \\(\\Sigma_i^b\\)-formulas, which gives rise to the theory \\(\\mathsf{T}_2^i\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.411, + 0.881, + 0.443 + ], + "angle": 0, + "content": "Theory \\(S_2^1\\). This is a theory in the language \\(\\mathcal{L}_{\\mathsf{B}}\\) extending BASIC by the polynomial induction axiom PIND" + }, + { + "type": "equation", + "bbox": [ + 0.338, + 0.445, + 0.659, + 0.464 + ], + "angle": 0, + "content": "\\[\n\\varphi (0) \\wedge \\forall x (\\varphi (\\lfloor x / 2 \\rfloor) \\rightarrow \\varphi (x)) \\rightarrow \\forall x \\varphi (x)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.469, + 0.783, + 0.487 + ], + "angle": 0, + "content": "for all \\(\\Sigma_1^b\\)-formulas \\(\\varphi(a)\\). The formula \\(\\varphi(a)\\) may contain other free variables in addition to \\(a\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.504, + 0.881, + 0.605 + ], + "angle": 0, + "content": "Intuitively, polynomial induction reduces the proof of \\(\\varphi(x)\\) to proving \\(\\varphi(\\lfloor x/2 \\rfloor)\\). Unlike the standard induction axiom, this approach allows us to reach the base case in just \\(\\mathrm{poly}(n)\\) steps when starting with an integer \\(x\\) represented by \\(\\mathrm{poly}(n)\\) bits. This has implications for the efficiency of translating certain proofs in \\(\\mathsf{S}_2^1\\) into sequences of propositional proofs and for the extraction of polynomial-time algorithms from proofs (see Section 3.1 and Section 3.2). Analogously to \\(\\mathsf{T}_2^i\\), we can define the theories \\(\\mathsf{S}_2^i\\) via polynomial induction for \\(\\Sigma_i^b\\)-formulas." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.607, + 0.88, + 0.64 + ], + "angle": 0, + "content": "It is known that \\(\\mathsf{PV}_1\\) is essentially equivalent to \\(\\mathsf{T}_2^0\\) under an appropriate vocabulary and axioms [Jer'06], and that \\(\\mathsf{S}_2^i \\subseteq \\mathsf{T}_2^i \\subseteq \\mathsf{S}_2^{i+1}\\) for every \\(i \\geq 1\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.642, + 0.881, + 0.727 + ], + "angle": 0, + "content": "When stating and proving results in \\( \\mathsf{S}_2^1 \\), it is convenient to employ a more expressive vocabulary under which any polynomial-time function can be easily described. Moreover, it is possible to achieve this in a conservative way, i.e., without increasing the power of the theory. In more detail, let \\( \\Gamma \\) be a set of \\( \\mathcal{L}_{\\mathsf{B}} \\)-formulas. We say that a polynomial-time function \\( f\\colon \\mathbb{N}^k\\to \\mathbb{N} \\) is \\( \\Gamma \\)-definable in \\( \\mathsf{S}_2^1 \\) if there is a formula \\( \\psi (\\vec{x},y)\\in \\Gamma \\) for which the following conditions hold:" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.735, + 0.556, + 0.754 + ], + "angle": 0, + "content": "(i) For every \\(a \\in \\mathbb{N}^k\\), \\(f(\\vec{a}) = b\\) if and only if \\(\\mathbb{N} \\models \\varphi(\\vec{a}, b)\\)." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.762, + 0.507, + 0.781 + ], + "angle": 0, + "content": "(ii) \\(\\mathsf{S}_2^1\\vdash \\forall \\vec{x}\\left(\\exists y\\left(\\varphi (\\vec{x},y)\\land \\forall z\\left(\\varphi (\\vec{x},z)\\to y = z\\right)\\right). \\right.\\)" + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.735, + 0.556, + 0.781 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.789, + 0.881, + 0.876 + ], + "angle": 0, + "content": "Every function \\( f \\in \\mathsf{FP} \\) is \\( \\Sigma_1^b \\)-definable in \\( S_2^1 \\). By adding all functions in \\( \\mathsf{FP} \\) to the vocabulary of \\( S_2^1 \\) and by extending \\( S_2^1 \\) with their defining axioms (i.e., \\( \\forall x \\varphi(x, f(x)) \\)), we obtain a theory \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) that can refer to polynomial-time predicates using quantifier-free formulas. \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) proves the polynomial induction scheme for both \\( \\Sigma_1^b \\)-formulas and \\( \\Pi_1^b \\)-formulas in the extended vocabulary. \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) is conservative over \\( S_2^1 \\), in the sense that any \\( \\mathcal{L}_{\\mathsf{B}} \\)-sentence provable in \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) is also provable in \\( S_2^1 \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.504, + 0.91 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.881, + 0.144 + ], + "angle": 0, + "content": "A \\(\\forall \\Sigma_{i}^{b}\\)-sentence is simply a sentence \\(\\psi = \\forall \\vec{x} \\varphi(\\vec{x})\\) where \\(\\varphi \\in \\Sigma_{i}^{b}\\). Every \\(\\forall \\Sigma_{1}^{b}\\)-sentence provable in \\(S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})\\) is also provable in \\(\\mathsf{PV}_1\\). In other words, \\(S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})\\) is \\(\\forall \\Sigma_{1}^{b}\\)-conservative over \\(\\mathsf{PV}_1\\). On the other hand, it is known that if \\(S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}}) = \\mathsf{PV}_1\\), then the polynomial-time hierarchy collapses." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.163, + 0.214, + 0.179 + ], + "angle": 0, + "content": "2.2.3 APC" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.19, + 0.885, + 0.242 + ], + "angle": 0, + "content": "In order to formalize probabilistic methods and randomized algorithms, Jeřábek [Jeř04, Jeř05, Jeř07] formulated the theory \\(\\mathsf{APC}_1\\) (this terminology is from [BKT14]) by extending \\(\\mathsf{PV}_1\\) with the dual Weak Pigeonhole Principle (dWPHP) for \\(\\mathsf{PV}_1\\) functions:" + }, + { + "type": "equation", + "bbox": [ + 0.342, + 0.254, + 0.655, + 0.273 + ], + "angle": 0, + "content": "\\[\n\\mathsf {A P C} _ {1} \\triangleq \\mathsf {P V} _ {1} \\cup \\{\\mathsf {d W P H P} (f) \\mid f \\in \\mathcal {L} _ {\\mathsf {P V}} \\}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.286, + 0.884, + 0.319 + ], + "angle": 0, + "content": "Informally, each sentence \\(\\mathrm{dWPHP}(f)\\) postulates that, for every length \\(n = |N|\\), there is \\(y < (1 + 1/n) \\cdot N\\) such that \\(f(x) \\neq y\\) for every \\(x < N\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.32, + 0.884, + 0.356 + ], + "angle": 0, + "content": "It is known that the dual Weak Pigeonhole Principle for polynomial-time predicates can be proved in \\(\\mathsf{T}_2^2\\) [MPW02], and consequently \\(\\mathsf{APC}_1 \\subseteq \\mathsf{T}_2^2(\\mathcal{L}_{\\mathsf{PV}})\\)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.379, + 0.486, + 0.399 + ], + "angle": 0, + "content": "3 Auxiliary Definitions and Results" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.413, + 0.345, + 0.431 + ], + "angle": 0, + "content": "3.1 Witnessing Theorems" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.44, + 0.884, + 0.492 + ], + "angle": 0, + "content": "Suppose a sentence \\(\\psi\\) of a certain syntactic form admits a proof in a theory \\(T\\) over a vocabulary \\(\\mathcal{L}\\). A witnessing theorem allows us to extract computational information from any such proof, by showing that an existential quantifier in \\(\\psi\\) can be witnessed by \\(\\mathcal{L}\\)-terms. The simplest example of such a result is stated next." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.503, + 0.884, + 0.555 + ], + "angle": 0, + "content": "Theorem 3.1 (Herbrand's Theorem (see, e.g., [Bus94, McK10])). Let \\( T \\) be a universal theory over a vocabulary \\( \\mathcal{L} \\). Let \\( \\varphi(x,y) \\) be a quantifier-free \\( \\mathcal{L} \\)-formula, and suppose that \\( T \\vdash \\forall x \\exists y \\varphi(x,y) \\). There is a constant \\( k \\geq 1 \\) and \\( \\mathcal{L} \\)-terms \\( t_1(x),\\ldots ,t_k(x) \\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.31, + 0.567, + 0.685, + 0.587 + ], + "angle": 0, + "content": "\\[\nT \\vdash \\varphi (x, t _ {1} (x)) \\lor \\varphi (x, t _ {2} (x)) \\lor \\dots \\lor \\varphi (x, t _ {k} (x)).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.599, + 0.884, + 0.65 + ], + "angle": 0, + "content": "As an immediate consequence, if we apply Theorem 3.1 to \\( T \\triangleq \\mathrm{PV}_1 \\), we obtain \\( \\mathcal{L}_{\\mathrm{PV}} \\)-terms (corresponding to polynomial-time functions over \\( \\mathbb{N} \\)) such that, given \\( a \\in \\mathbb{N} \\), at least one of them produces a witness \\( b \\in \\mathbb{N} \\) such that \\( \\mathbb{N} \\models \\varphi(a, b) \\)." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.651, + 0.738, + 0.668 + ], + "angle": 0, + "content": "Next, we consider the provability of more complex sentences in a universal theory." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.679, + 0.884, + 0.73 + ], + "angle": 0, + "content": "Theorem 3.2 (KPT Theorem [KPT91]). Let \\( T \\) be a universal theory with vocabulary \\( \\mathcal{L} \\), \\( \\varphi(w, u, v) \\) be a quantifier-free \\( \\mathcal{L} \\)-formula, and suppose that \\( T \\vdash \\forall w \\exists u \\forall v \\varphi(w, u, v) \\). Then there exist a constant \\( k \\geq 1 \\) and \\( \\mathcal{L} \\)-terms \\( t_1, \\ldots, t_k \\) such that" + }, + { + "type": "equation", + "bbox": [ + 0.199, + 0.743, + 0.796, + 0.762 + ], + "angle": 0, + "content": "\\[\nT \\vdash \\varphi (w, t _ {1} (w), v _ {1}) \\vee \\varphi (w, t _ {2} (w, v _ {1}), v _ {2}) \\vee \\dots \\vee \\varphi (w, t _ {k} (w, v _ {1}, \\dots , v _ {k - 1}), v _ {k}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.775, + 0.807, + 0.792 + ], + "angle": 0, + "content": "where the notation \\(t_i(w, v_1, \\ldots, v_{i-1})\\) indicates that these are the only variables occurring in \\(t_i\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.804, + 0.884, + 0.838 + ], + "angle": 0, + "content": "Theorem 3.2 has a natural interpretation as an interactive game with finitely many rounds, which we revisit in Section 5.1.1 in the context of the provability of circuit upper bounds." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.847, + 0.795, + 0.862 + ], + "angle": 0, + "content": "3The dWPHP axiom scheme is also referred to as the surjective Weak Pigeonhole Principle in some references." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.504, + 0.91 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.881, + 0.159 + ], + "angle": 0, + "content": "A similar form of Theorem 3.2 holds under the provability of a \\(\\forall \\exists \\forall \\exists\\)-sentence (see, e.g., \\(\\mathrm{[CKK^{+}24]}\\) for a concrete application in the context of circuit lower bounds). In contrast, there is no straightforward analogue of the KPT Theorem for a larger number of quantifier alternations. In this case, more general formulations are needed, such as the ones considered in [Pud06, BKT14, LO23]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.161, + 0.885, + 0.298 + ], + "angle": 0, + "content": "It is also possible to establish witnessing theorems for theories that are not universal. This can be done either by first transforming the theory into a universal theory through the inclusion of new function symbols and quantifier elimination, or via direct approaches (see, e.g., [Kra95, Section 7.3]). Another example is Buss's Theorem for \\( S_2^1 \\), which can be used to show that every \\( \\forall \\Sigma_1^b \\)-sentence provable in \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) is also provable in \\( \\mathsf{PV}_1 \\). This has two implications. First, we can combine this result with Theorem 3.1, which yields polynomial-time algorithms from proofs of \\( \\forall \\Sigma_1^b \\)-sentences in \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\). Second, this means that in some situations we can establish the provability of a sentence in \\( \\mathsf{PV}_1 \\) using the more convenient theory \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) (see Section 4.2 for an example)." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.317, + 0.546, + 0.334 + ], + "angle": 0, + "content": "3.2 Bounded Arithmetic and Propositional Proofs" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.344, + 0.884, + 0.412 + ], + "angle": 0, + "content": "In this section, we explain a connection between \\(\\mathsf{PV}_1\\) and the extended Frege proof system discovered by [Coo75]. In short, it says that if a universal \\(\\mathcal{L}_{\\mathsf{PV}}\\)-sentence \\(\\phi(x)\\) is provable in \\(\\mathsf{PV}_1\\), then there is a translation of \\(\\phi(x)\\) into a sequence \\(\\{G_n\\}_{n \\geq 1}\\) of propositional formulas \\(G_n(p_1, \\ldots, p_n)\\) such that each \\(G_n\\) has an extended Frege proof \\(\\pi_n\\) of size polynomial in \\(n\\).4" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.413, + 0.884, + 0.533 + ], + "angle": 0, + "content": "First, we review some concepts and fix notation, deferring the details to a standard textbook (e.g., [Kra19]). Recall that a propositional formula \\( G(p_{1},\\ldots ,p_{n}) \\) is formed using variables \\( p_1,\\dots ,p_n \\), constants 0 and 1, and logical connectives \\( \\land ,\\lor \\), and \\( \\neg \\). A Frege \\( (\\mathcal{F}) \\) proof system is a \"textbook\" style proof system for propositional logic. It can be formulated as a finite set of axiom schemes together with the modus ponens rule. \\( \\mathcal{F} \\) is known to be sound and complete. The size of a Frege proof is the total number of symbols occurring in the proof. In the extended Frege \\( (e\\mathcal{F}) \\) proof system, we also allow repeated subformulas appearing in a proof to be abbreviated via new variables." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.551, + 0.884, + 0.604 + ], + "angle": 0, + "content": "Cook's Translation [Coo75]. Let \\(\\varphi\\) be a universal \\(\\mathcal{L}_{\\mathrm{PV}}\\)-sentence of the form \\(\\varphi \\triangleq \\forall x \\psi(x)\\), where \\(\\psi(x)\\) is a quantifier-free formula. Cook [Coo75] established that if \\(\\varphi\\) is provable in \\(\\mathrm{PV}_1\\), then there is a sequence \\(\\{G_n\\}_{n \\geq 1}\\) of propositional tautologies such that" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.611, + 0.534, + 0.628 + ], + "angle": 0, + "content": "- Each \\( G_{n}(p_{1},\\ldots ,p_{n}) \\) is a polynomial-size formula." + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.639, + 0.831, + 0.657 + ], + "angle": 0, + "content": "- \\( G_{n} \\) encodes that \\( \\psi(x) \\) is true whenever \\( |x| \\leq n \\), i.e., over all integers encoded as \\( n \\)-bit strings." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.667, + 0.44, + 0.683 + ], + "angle": 0, + "content": "- \\( G_{n} \\) admits polynomial-size \\( e\\mathcal{F} \\)-proofs." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.694, + 0.88, + 0.729 + ], + "angle": 0, + "content": "- Moreover, the existence of polynomial-size \\( e\\mathcal{F} \\)-proofs for each \\( G_{n} \\) is provable in \\( \\mathrm{PV}_1 \\). (We will need this additional property of the translation in Section 5.2.2.)" + }, + { + "type": "list", + "bbox": [ + 0.138, + 0.611, + 0.88, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.738, + 0.881, + 0.772 + ], + "angle": 0, + "content": "For a formula \\(\\psi(x)\\) as above, we often write \\(||\\psi||_n\\) to denote the corresponding propositional formula over inputs of length \\(n\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.789, + 0.881, + 0.823 + ], + "angle": 0, + "content": "For more information about the relation between proofs in bounded arithmetic and propositional proofs, including additional examples of propositional translations, we refer to [Bey09, Kra19]." + }, + { + "type": "page_footnote", + "bbox": [ + 0.111, + 0.832, + 0.884, + 0.86 + ], + "angle": 0, + "content": "Conceptually, this is analogous to the translation of a polynomial-time Turing machine \\(M\\) into a sequence \\(\\{C_n\\}_{n\\geq 1}\\) of polynomial-size Boolean circuits, one for each input length \\(n\\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.134, + 0.86, + 0.631, + 0.875 + ], + "angle": 0, + "content": "5We note that \\( G_{n}(p_{1},\\ldots ,p_{n}) \\) might contain auxiliary variables beyond \\( p_1,\\dots ,p_n \\)." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.832, + 0.884, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.504, + 0.91 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.091, + 0.487, + 0.108 + ], + "angle": 0, + "content": "3.3 Cuts of Models of Bounded Arithmetic" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.119, + 0.884, + 0.169 + ], + "angle": 0, + "content": "Many fundamental results in bounded arithmetic are established using model-theoretic techniques (see, e.g., the exposition of Parikh's Theorem in [Kra95]). We will provide an example in Section 5.2.2. In this section, we include the required background for the result. We assume basic familiarity with model theory." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.17, + 0.884, + 0.205 + ], + "angle": 0, + "content": "While the definitions and results presented below can be adapted to other theories of bounded arithmetic, we focus on the theory \\( S_2^1 \\) for concreteness." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.212, + 0.884, + 0.246 + ], + "angle": 0, + "content": "Definition 3.3 (Cut in a Model of Arithmetic). A cut in a model \\( M \\) of \\( \\mathsf{S}_2^1 \\) is a nonempty set \\( I \\subseteq M \\) such that:" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.256, + 0.525, + 0.273 + ], + "angle": 0, + "content": "1. For every \\(a, b \\in M\\), if \\(b \\in I\\) and \\(a < b\\) then \\(a \\in I\\)." + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.283, + 0.468, + 0.3 + ], + "angle": 0, + "content": "2. For every \\(a \\in M\\), if \\(a \\in I\\) then \\(a + 1 \\in I\\)." + }, + { + "type": "list", + "bbox": [ + 0.135, + 0.256, + 0.525, + 0.3 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.309, + 0.338, + 0.326 + ], + "angle": 0, + "content": "In this case, we write \\( I \\subseteq_{e} M \\)." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.335, + 0.81, + 0.352 + ], + "angle": 0, + "content": "Note that a cut is not necessarily closed under operations such as addition and multiplication." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.361, + 0.886, + 0.413 + ], + "angle": 0, + "content": "Claim 3.4. Let \\( M \\) be a model of \\( S_2^1 \\), and let \\( I \\subseteq_e M \\). Moreover, assume that \\( I \\) is closed under \\( +, \\cdot \\), and # operations. Let \\( \\varphi(a, \\vec{b}) \\) be a bounded formula with all free variables displayed. Let \\( \\vec{v} \\) be elements of \\( I \\). Then for every \\( u \\in I \\)," + }, + { + "type": "equation", + "bbox": [ + 0.36, + 0.414, + 0.637, + 0.432 + ], + "angle": 0, + "content": "\\[\nI \\vDash \\varphi (u, \\vec {v}) \\quad \\Longleftrightarrow \\quad M \\vDash \\varphi (u, \\vec {v}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.44, + 0.884, + 0.473 + ], + "angle": 0, + "content": "Claim 3.4 can be proved by induction on the complexity of \\(\\varphi\\). Using the claim, one can establish the following lemma." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.482, + 0.884, + 0.518 + ], + "angle": 0, + "content": "Lemma 3.5. Let \\( M \\) be a model of \\( \\mathsf{S}_2^1 \\), and let \\( I \\subseteq_{e} M \\). Moreover, assume that \\( I \\) is closed under \\( +, \\cdot, \\) and \\( \\# \\) operations. Then \\( I \\) is a model of \\( \\mathsf{S}_2^1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.526, + 0.884, + 0.576 + ], + "angle": 0, + "content": "Since it is not hard to check that a cut \\( I \\) as above satisfies the BASIC axioms of \\( S_2^1 \\), the proof of Lemma 3.5 essentially amounts to verifying that \\( I \\) satisfies the corresponding induction principle (see, e.g., [Kra95, Lemma 5.1.3] for a similar argument)." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.577, + 0.75, + 0.595 + ], + "angle": 0, + "content": "For a model \\(M\\), we say that \\(n \\in M\\) is a length if there is \\(N \\in M\\) such that \\(n = |N|\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.603, + 0.884, + 0.655 + ], + "angle": 0, + "content": "Lemma 3.6. Let \\( M_0 \\) be a nonstandard countable model of \\( \\mathsf{S}_2^1 \\). Then there is a (countable) cut \\( M \\) of \\( M_0 \\) that is a model of \\( \\mathsf{S}_2^1 \\) and a length \\( n \\in M \\), where \\( n = |e| \\) for some nonstandard \\( e \\in M \\), for which the following holds. For every \\( b \\in M \\) there is a standard number \\( k \\) such that \\( M \\models |b| \\leq n^k \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.663, + 0.609, + 0.68 + ], + "angle": 0, + "content": "Proof. Let \\( e \\in M_0 \\) be nonstandard, and let \\( n \\triangleq |e| \\). Consider the set" + }, + { + "type": "equation", + "bbox": [ + 0.314, + 0.69, + 0.682, + 0.71 + ], + "angle": 0, + "content": "\\[\nI _ {e} \\triangleq \\left\\{a \\in M _ {0} \\mid a \\leq t (e) \\text {f o r s o m e} \\mathcal {L} _ {\\mathrm {B}} \\text {- t e r m} t (x) \\right\\},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.719, + 0.884, + 0.805 + ], + "angle": 0, + "content": "where we compare elements with respect to the interpretation of the relation symbol \\(\\leq\\) in \\(M_0\\). Note that \\(I_e\\) is a cut of \\(M_0\\) and \\(e \\in I_e\\). Moreover, it is not hard to check that it is closed under addition, multiplication, and smash operations. By Lemma 3.5, \\(I_e\\) is a model of \\(\\mathbb{S}_2^1\\). Finally, by construction, for every \\(b \\in I_e\\) we have \\(b \\leq t(e)\\) for some \\(\\mathcal{L}_{\\mathsf{B}}\\)-term \\(t\\). A simple induction on the structure of \\(t\\) shows the existence of a standard number \\(k\\) such that \\(|b| \\leq n^k\\) in \\(I_e\\)." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.816, + 0.472, + 0.832 + ], + "angle": 0, + "content": "Finally, we will need the following definition." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.841, + 0.884, + 0.876 + ], + "angle": 0, + "content": "Definition 3.7 (Cofinal extension). We say that an extension \\( M' \\) of a model \\( M \\) is cofinal (or \\( M \\) is cofinal in \\( M' \\)) if for every \\( a \\in M' \\) there is \\( b \\in M \\) such that \\( a \\leq b \\) in \\( M' \\). If this is the case, we write \\( M' \\supseteq_{\\mathrm{cf}} M \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.505, + 0.91 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.112, + 0.089, + 0.523, + 0.109 + ], + "angle": 0, + "content": "4 The Strength of Bounded Arithmetic" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.123, + 0.884, + 0.191 + ], + "angle": 0, + "content": "In connection with the fundamental research goal mentioned in Section 1, research on the provability of complexity bounds has achieved significant progress on two complementary fronts: the formalization of several established results from algorithms and complexity within theories of bounded arithmetic, and the unprovability of complexity bounds in the same theories, often conditional on a computational assumption." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.192, + 0.884, + 0.242 + ], + "angle": 0, + "content": "In Section 4.1, we explore what it means to formalize results from algorithms and complexity theory within the framework of bounded arithmetic, highlighting some of the nuances involved. In Section 4.2, we present some concrete details of the formalization of a formula lower bound in \\(\\mathsf{PV}_1\\)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.263, + 0.651, + 0.281 + ], + "angle": 0, + "content": "4.1 Formalization of Results from Algorithms and Complexity" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.29, + 0.884, + 0.391 + ], + "angle": 0, + "content": "Several central theorems from mathematics and computer science can be proved in bounded arithmetic. They include results from number theory [Woo81, PWW88], graph theory and extremal combinatorics [Oja04], randomized algorithms and probabilistic arguments [Jer05, LC11, Lé14], probabilistic checkable proofs [Pic15b], circuit lower bounds [MP20], expander graphs [BKKK20], linear algebra [TC21], Zhuk's CSP algorithm [Gay23, Gay24], etc. The reader can find numerous other examples in [CN10, Kra19, MP20] and references therein." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.393, + 0.884, + 0.478 + ], + "angle": 0, + "content": "In some cases, the formalization of an existing result in bounded arithmetic is straightforward, specially once an appropriate framework has been developed (e.g., the approximate counting framework of [Jér07], which enables the use of tools from probability theory in \\(\\mathsf{APC}_1\\)). However, sometimes one needs to discover a new proof whose concepts can be defined in the theory and their associated properties established using the available inductive axioms (e.g., Razborov's formalization of the Switching Lemma [Raz95a])." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.479, + 0.884, + 0.528 + ], + "angle": 0, + "content": "We provide two instructive examples below. The first is a consequence of the formalization of the PCP Theorem in \\(\\mathsf{PV}_1\\), while the second concerns different ways of formulating a circuit lower bound statement in bounded arithmetic." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.55, + 0.885, + 0.653 + ], + "angle": 0, + "content": "The PCP Theorem in \\(\\mathsf{PV}_1\\). Pich [Pic15b] proved the PCP Theorem in \\(\\mathsf{PV}_1\\) by formalizing Dinur's proof [Din07]. Exploiting the standard connection between PCPs and hardness of approximation, Pich's result can be used to show that \\(\\mathsf{PV}_1\\) establishes the NP-hardness of approximating the value of a \\(k\\)-SAT instance. This means in particular that, for a suitable \\(\\mathcal{L}_{\\mathsf{PV}}\\)-function symbol \\(f\\) obtained from Dinur's argument, \\(\\mathsf{PV}_1\\) proves that \\(f\\) is a gap-inducing reduction from the Boolean Formula Satisfiability Problem to \\(k\\)-SAT (for a sufficiently large \\(k\\)):" + }, + { + "type": "equation", + "bbox": [ + 0.192, + 0.664, + 0.8, + 0.723 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathrm {P V} _ {1} \\vdash \\forall \\varphi \\left(\\operatorname {F l a} (\\varphi) \\wedge \\exists y \\operatorname {S a t} (\\varphi , y) \\rightarrow k - C N F (f (\\varphi)) \\wedge \\exists z \\operatorname {S a t} (f (\\varphi), z)\\right) \\\\ \\mathrm {P V} _ {1} \\vdash \\forall \\varphi \\left(\\operatorname {F l a} (\\varphi) \\wedge \\forall y \\neg \\operatorname {S a t} (\\varphi , y) \\rightarrow k - \\operatorname {C N F} (f (\\varphi)) \\wedge \\forall z \\operatorname {V a l u e} _ {\\leq 1 - \\delta} (f (\\varphi), z)\\right) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.733, + 0.884, + 0.801 + ], + "angle": 0, + "content": "where all the expressions are quantifier-free \\(\\mathcal{L}_{\\mathrm{PV}}\\)-formulas: \\(\\mathsf{Fla}(x)\\) checks if \\(x\\) is a valid description of a Boolean formula, \\(k\\)-CNF(x) checks if \\(x\\) is a valid description of a \\(k\\)-CNF, \\(\\mathsf{Sat}(u,v)\\) checks if \\(v\\) is a satisfying assignment for \\(u\\), and \\(\\mathsf{Value}_{\\leq 1 - \\delta}(u,v)\\) holds if \\(v\\) satisfies at most a \\((1 - \\delta)\\)-fraction of the clauses in \\(u\\) (with \\(\\delta > 0\\) being a universal constant from the formalized Dinur's proof)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.802, + 0.884, + 0.87 + ], + "angle": 0, + "content": "In the formalization the key point is that \\(\\mathsf{PV}_1\\) proves that the function symbol \\(f\\) behaves as expected. In practice, in order to achieve this, a typical formalization is presented in a semi-formal way, and might claim on a few occasions that some algorithm \\(f_1\\) constructed in a particular way from another algorithm \\(f_2\\) can be defined in \\(\\mathsf{PV}_1\\). This means that \\(\\mathsf{PV}_1\\) proves that \\(f_1\\) behaves as described in the definition." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.899, + 0.504, + 0.91 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.178 + ], + "angle": 0, + "content": "This is possible thanks to Cobham's characterization of FP and the axioms of \\(\\mathrm{PV}_1\\), which ensure that the theory \"understands\" how different algorithms are constructed from one another. In many cases, the verification that \\(\\mathrm{PV}_1\\) proves the desired properties is straightforward but tedious, requiring some initial setup of basic capabilities of \\(\\mathrm{PV}_1\\) (often referred to as \"bootstrapping\") which is part of the standard background in bounded arithmetic." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.198, + 0.885, + 0.25 + ], + "angle": 0, + "content": "Circuit Lower Bound Statements. We discuss two ways of formalizing a complexity lower bound. In this example, for a given size bound \\( s(n) \\) (e.g., \\( s(n) = n^2 \\)), we consider an \\( \\mathcal{L}_{\\mathrm{PV}} \\)-sentence \\( \\mathsf{FLB}_s^\\oplus \\) stating that Boolean formulas for the parity function on \\( n \\) bits require at least \\( s(n) \\) leaves:" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.262, + 0.851, + 0.281 + ], + "angle": 0, + "content": "\\[\n\\forall N \\forall n \\forall F (n = | N | \\wedge n \\geq 1 \\wedge \\mathsf {F l a} (F) \\wedge \\mathsf {S i z e} (F) < s (n) \\rightarrow \\exists x (| x | \\leq n \\wedge \\mathsf {E v a l} (F, x) \\neq \\oplus (x)),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.294, + 0.884, + 0.345 + ], + "angle": 0, + "content": "where we identify \\( n \\)-bit strings with natural numbers of length at most \\( n \\), and employ a well-behaved \\( \\mathcal{L}_{\\mathrm{PV}} \\)-function symbol \\( \\oplus \\) such that \\( \\mathrm{PV}_1 \\) proves the basic properties of the parity function, e.g., \\( \\mathrm{PV}_1 \\vdash \\oplus (x1) = 1 - \\oplus (x) \\).6" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.346, + 0.884, + 0.431 + ], + "angle": 0, + "content": "Note that \\(\\mathsf{FLB}_s^\\oplus\\) is a \\(\\forall \\Sigma_1^b\\)-sentence. Consequently, if \\(\\mathsf{PV}_1 \\vdash \\mathsf{FLB}_s^\\oplus\\), we obtain via Herbrand's Theorem (Theorem 3.1) a polynomial-time algorithm \\(A\\) that, when given \\(N\\) of length \\(n\\) and the description of an \\(n\\)-bit formula \\(F\\) of size \\(< s(n)\\), \\(A(N,F)\\) outputs a string \\(x \\in \\{0,1\\}^n\\) such that \\(F(x) \\neq \\oplus(x)\\). In other words, circuit lower bounds provable in \\(\\mathsf{PV}_1\\) are constructive in the sense that they also provide an efficient refuter witnessing that \\(F\\) does not compute parity (see [CJSW21] for more on this topic)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.431, + 0.884, + 0.533 + ], + "angle": 0, + "content": "The aforementioned formalization is informally referred to as a \"Log\" formalization of circuit lower bounds. This is because the main parameter \\( n \\) is the length of a variable \\( N \\) and all objects quantified over are of length polynomial in \\( n \\). It is also possible to consider a formalization where \\( n = ||N|| \\) (\\( n \\) is the length of the length of \\( N \\)), which is known as a \"LogLog\" formalization. This allows us to quantify over exponentially larger objects, e.g., under such a formalization the entire truth-table of a formula \\( F \\) has length polynomial in the length of \\( N \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.534, + 0.884, + 0.634 + ], + "angle": 0, + "content": "Obtaining a Log formalization (e.g., [MP20]) is a stronger result than obtaining a LogLog formalization (e.g., [Raz95a]). In particular, in contrast to the discussion above, a witnessing theorem applied to a LogLog formalization provides a refuter with access to \\( N \\) and thus running in time \\( \\mathrm{poly}(N) = \\mathrm{poly}(2^n) \\). Conversely, the unprovability of a LogLog circuit lower bound statement (e.g., [PS21, LO23]) is a stronger result than the unprovability of a Log statement. We refer to the introduction of [MP20] for a more extensive discussion on this matter." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.656, + 0.724, + 0.674 + ], + "angle": 0, + "content": "4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in \\(\\mathsf{PV}_1\\)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.684, + 0.884, + 0.717 + ], + "angle": 0, + "content": "In this section, we explore some details of a formalization in \\(\\mathrm{PV}_1\\) that the parity function \\(\\oplus\\) on \\(n\\) bits requires Boolean formulas of size \\(\\geq n^{3/2}\\) [Sub61]. We follow the notation introduced in Section 4.1." + }, + { + "type": "equation", + "bbox": [ + 0.113, + 0.727, + 0.588, + 0.746 + ], + "angle": 0, + "content": "\\[\n\\text {T h e o r m 4 . 1} \\left(\\left[ C K K ^ {+} 2 4 \\right]\\right). L e t s (n) \\triangleq n ^ {3 / 2}. T h e n P V _ {1} \\vdash F L B _ {s} ^ {\\oplus}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.757, + 0.884, + 0.79 + ], + "angle": 0, + "content": "The formalization is an adaptation of the argument presented in [Juk12, Section 6.3], which proceeds as follows:" + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.803, + 0.884, + 0.838 + ], + "angle": 0, + "content": "1. [Juk12, Lemma 6.8]: For any formula \\( F \\) on \\( n \\)-bit inputs, it is possible to fix one of its variables so that the resulting formula \\( F_{1} \\) satisfies \\( \\mathrm{Size}(F_1) \\leq (1 - 1 / n)^{3 / 2} \\cdot \\mathrm{Size}(F) \\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.847, + 0.581, + 0.862 + ], + "angle": 0, + "content": "We often abuse notation and treat \\( x \\) as a string in semi-formal discussions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.51, + 0.91 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.134, + 0.091, + 0.88, + 0.125 + ], + "angle": 0, + "content": "2. [Juk12, Theorem 6.10]: If we apply this result \\(\\ell \\triangleq n - k\\) times, we obtain a formula \\(F_{\\ell}\\) on \\(k\\)-bit inputs such that" + }, + { + "type": "equation", + "bbox": [ + 0.156, + 0.137, + 0.882, + 0.159 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S i z e} (F _ {\\ell}) \\leq \\operatorname {S i z e} (F) \\cdot (1 - 1 / n) ^ {3 / 2} \\cdot (1 - 1 / (n - 1)) ^ {3 / 2} \\dots (1 - 1 / (k + 1)) ^ {3 / 2} = \\operatorname {S i z e} (F) \\cdot (k / n) ^ {3 / 2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.177, + 0.88, + 0.211 + ], + "angle": 0, + "content": "3. [Juk12, Example 6.11]: Finally, if the initial formula \\( F \\) computes the parity function, by setting \\( \\ell = n - 1 \\) we get \\( 1 \\leq \\operatorname{Size}(F_{\\ell}) \\leq (1/n)^{3/2} \\cdot \\operatorname{Size}(F) \\), and consequently \\( \\operatorname{Size}(F) \\geq n^{3/2} \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.222, + 0.881, + 0.273 + ], + "angle": 0, + "content": "We present the argument in a more constructive way when formalizing the result in \\(\\mathrm{PV}_1\\). In more detail, given a small formula \\(F\\), we recursively construct (and establish correctness by induction) an \\(n\\)-bit input \\(y\\) witnessing that \\(F\\) does not compute the parity function." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.285, + 0.881, + 0.353 + ], + "angle": 0, + "content": "Proof. We follow closely the presentation from \\(\\left[\\mathrm{CKK}^{+}24\\right]\\). For brevity, we only discuss the formalization of the main inductive argument. More details can be found in \\(\\left[\\mathrm{CKK}^{+}24\\right]\\). Given \\(b \\in \\{0,1\\}\\), we introduce the function \\(\\oplus^b(x) \\triangleq \\oplus(x) + b \\pmod{2}\\). In order to prove \\(\\mathsf{FLB}_s^\\oplus\\) in \\(\\mathsf{PV}_1\\), we explicitly consider a polynomial-time function \\(R(1^n, F, b)\\) with the following property:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.36, + 0.836, + 0.379 + ], + "angle": 0, + "content": "If \\(\\operatorname{Size}(F) < s(n)\\) then \\(R(1^n, F, b)\\) outputs an \\(n\\)-bit string \\(y_n^b\\) such that \\(\\operatorname{Eval}(F, y_n^b) \\neq \\oplus^b(y_n^b)\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.385, + 0.88, + 0.419 + ], + "angle": 0, + "content": "In other words, \\( R(1^n,F,b) \\) witnesses that the formula \\( F \\) does not compute the function \\( \\oplus^b \\) over \\( n \\)-bit strings. Note that the correctness of \\( R \\) is captured by a sentence \\( \\operatorname{Ref}_{R,s} \\) described as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.141, + 0.431, + 0.851, + 0.452 + ], + "angle": 0, + "content": "\\[\n\\forall 1 ^ {n} \\forall F (\\mathsf {F l a} (F) \\wedge \\mathsf {S i z e} (F) < s (n) \\rightarrow | y _ {n} ^ {0} | _ {\\ell} = | y _ {n} ^ {1} | _ {\\ell} = n \\wedge F (y _ {n} ^ {0}) \\neq \\oplus^ {0} (y _ {n} ^ {0}) \\wedge F (y _ {n} ^ {1}) \\neq \\oplus^ {1} (y _ {n} ^ {1}))\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.463, + 0.88, + 0.514 + ], + "angle": 0, + "content": "where we employ the abbreviations \\( y_{n}^{0} \\triangleq R(1^{n}, F, 0) \\) and \\( y_{n}^{1} \\triangleq R(1^{n}, F, 1) \\), and for convenience use \\( |z|_{\\ell} \\) to denote the bitlength of \\( z \\). Our plan is to define \\( R \\) and show that \\( \\mathsf{PV}_1 \\vdash \\mathsf{Ref}_{R,s} \\). Note that this implies \\( \\mathsf{FLB}_s^{\\oplus} \\) in \\( \\mathsf{PV}_1 \\) by standard first-order logic reasoning." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.516, + 0.88, + 0.614 + ], + "angle": 0, + "content": "The correctness of \\( R(1^n, F, b) \\) will be established by polynomial induction on \\( N \\) (equivalently, induction on \\( n = |N| \\)). Since \\( \\operatorname{Ref}_{R,s} \\) is a universal sentence and \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) is \\( \\forall \\Sigma_1^b \\)-conservative over \\( \\mathsf{PV}_1 \\) (i.e., provability of such a sentence in \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) implies its provability in \\( \\mathsf{PV}_1 \\)), it is sufficient to describe a formalization in the more convenient theory \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\). For this reason, polynomial induction for NP and coNP predicates (admissible in \\( S_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\); see, e.g., [Kra95, Section 5.2]) is available during the formalization. More details follow." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.619, + 0.88, + 0.652 + ], + "angle": 0, + "content": "The procedure \\( R(1^n, F, b) \\) makes use of a few polynomial-time sub-routines (briefly discussed in the comments in the pseudocode below) and is defined in the following way:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.112, + 0.675, + 0.88, + 0.715 + ], + "angle": 0, + "content": "7 Actually, for technical reasons related to the induction step, we will simultaneously construct an \\(n\\)-bit input \\(y_{n}^{0}\\) witnessing that \\(F\\) does not compute the parity function and an \\(n\\)-bit input \\(y_{n}^{1}\\) witnessing that \\(F\\) does not compute the negation of the parity function." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.718, + 0.881, + 0.746 + ], + "angle": 0, + "content": "For convenience, we often write \\( 1^n \\) instead of explicitly considering parameters \\( N \\) and \\( n = |N| \\). In practice, it means that \\( R \\) gets as input \\( N \\) (together with other parameters) but with respect to \\( N \\) it only depends on \\( n = |N| \\)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.115, + 0.747, + 0.881, + 0.774 + ], + "angle": 0, + "content": "\\( {}^{9} \\) Similarly,the notation \\( {\\forall 1}^{n} \\) denotes \\( \\forall N\\forall n \\) but we add the condition that \\( n = \\left| N\\right| \\) in the subsequent formula. We might also write just \\( F\\left( x\\right) \\) instead of \\( \\operatorname{Eval}\\left( {F,x}\\right) \\)" + }, + { + "type": "list", + "bbox": [ + 0.112, + 0.675, + 0.881, + 0.774 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.898, + 0.507, + 0.909 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.142, + 0.096, + 0.625, + 0.113 + ], + "angle": 0, + "content": "Input: \\(1^n\\) for some \\(n \\geq 1\\), formula \\(F\\) over \\(n\\)-bit inputs, \\(b \\in \\{0,1\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.113, + 0.61, + 0.129 + ], + "angle": 0, + "content": "1 Let \\( s(n) \\triangleq n^{3/2} \\). If \\( \\operatorname{Size}(F) \\geq s(n) \\) or \\( \\neg \\mathsf{Fla}(F) \\) return \"error\";" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.131, + 0.837, + 0.167 + ], + "angle": 0, + "content": "2 If \\(\\operatorname{Size}(F) = 0\\), \\(F\\) computes a constant function \\(b_{F} \\in \\{0,1\\}\\). In this case, return the \\(n\\)-bit string \\(y_{n}^{b} \\triangleq y_{1}^{b} 0^{n-1}\\) such that \\(\\oplus^{b}(y_{1}^{b} 0^{n-1}) \\neq b_{F}\\);" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.168, + 0.841, + 0.226 + ], + "angle": 0, + "content": "3 Let \\(\\widetilde{F} \\triangleq \\text{Normalize}(1^n, F)\\); // \\(\\widetilde{F}\\) satisfies the conditions in the proof of [Juk12, Claim 6.9], \\(\\text{Size}(\\widetilde{F}) \\leq \\text{Size}(F)\\), \\(\\forall x \\in \\{0, 1\\}^n F(x) = \\widetilde{F}(x)\\)." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.228, + 0.812, + 0.279 + ], + "angle": 0, + "content": "4 Let \\(\\rho \\triangleq \\text{Find-Restriction}(1^n, \\widetilde{F})\\), where \\(\\rho: [n] \\to \\{0, 1, \\star\\}\\) and \\(|\\rho^{-1}(\\star)| = n - 1\\); // \\(\\rho\\) restricts a suitable variable \\(x_i\\) to a bit \\(c_i\\), as in [Juk12, Lemma 6.8]." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.281, + 0.753, + 0.32 + ], + "angle": 0, + "content": "5 Let \\( F' \\triangleq \\text{Apply-Restriction}(1^n, \\widetilde{F}, \\rho) \\). Moreover, let \\( b' \\triangleq b \\oplus c_i \\) and \\( n' \\triangleq n - 1 \\); // \\( F' \\) is an \\( n' \\)-bit formula; \\( \\forall z \\in \\{0, 1\\}^{\\rho^{-1}(\\star)} F'(z) = \\widetilde{F}(z \\cup x_i \\mapsto c_i) \\)." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.32, + 0.662, + 0.34 + ], + "angle": 0, + "content": "6 Let \\( y_{n'}^{b'} \\triangleq R(1^{n'}, F', b') \\) and return the \\( n \\)-bit string \\( y_n^b \\triangleq y_{n'}^{b'} \\cup y_i \\mapsto c_i \\);" + }, + { + "type": "list", + "bbox": [ + 0.126, + 0.113, + 0.841, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.264, + 0.344, + 0.678, + 0.361 + ], + "angle": 0, + "content": "Algorithm 1: Refuter Algorithm \\( R(1^n, F, b) \\) [CKK+24]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.37, + 0.885, + 0.437 + ], + "angle": 0, + "content": "(The pseudocode presented above is only an informal specification of \\( R(1^n, F, b) \\). As mentioned in Section 4.1, a completely formal proof in \\( \\mathsf{PV}_1 \\) would employ Cobham's formalism and would specify how \\( R(1^n, F, b) \\) can be defined from previously defined algorithms (e.g., Apply-Restriction) via the allowed operations.)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.438, + 0.884, + 0.507 + ], + "angle": 0, + "content": "We note that \\( R(1^n, F, b) \\) runs in time polynomial in \\( n + |F| + |b| \\) and that it is definable in \\( \\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\). Next, as an instructive example, we establish the correctness \\( R(1^n, F, b) \\) in \\( \\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\) by polynomial induction (PIND) for \\( \\Pi_1^b \\)-formulas, assuming that the subroutines appearing in the pseudocode of \\( R(1^n, F, b) \\) satisfy the necessary properties (provably in \\( \\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\))." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.513, + 0.517, + 0.532 + ], + "angle": 0, + "content": "Lemma 4.2. Let \\(s(n) \\triangleq n^{3/2}\\). Then \\(\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\mathsf{Ref}_{R,s}\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.543, + 0.471, + 0.559 + ], + "angle": 0, + "content": "Proof. We consider the formula \\(\\varphi(N)\\) defined as" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.573, + 0.806, + 0.623 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\forall F \\forall n (n = | N | \\wedge n \\geq 1 \\wedge \\operatorname {F l a} (F) \\wedge \\operatorname {S i z e} (F) < s (n)) \\rightarrow \\\\ \\left(\\left| y _ {n} ^ {0} \\right| _ {\\ell} = \\left| y _ {n} ^ {1} \\right| _ {\\ell} = n \\wedge F \\left(y _ {n} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {n} ^ {0}\\right) \\wedge F \\left(y _ {n} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {n} ^ {1}\\right)\\right), \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.629, + 0.883, + 0.663 + ], + "angle": 0, + "content": "where as before we use \\( y_{n}^{0} \\triangleq R(1^{n}, F, 0) \\) and \\( y_{n}^{1} \\triangleq R(1^{n}, F, 1) \\). Note that \\( \\varphi(N) \\) is a \\( \\Pi_1^b \\)-formula. Below, we argue that" + }, + { + "type": "equation", + "bbox": [ + 0.272, + 0.663, + 0.721, + 0.682 + ], + "angle": 0, + "content": "\\[\n\\mathsf {S} _ {2} ^ {1} (\\mathcal {L} _ {\\mathsf {P V}}) \\vdash \\varphi (1) \\quad \\text {a n d} \\quad \\mathsf {S} _ {2} ^ {1} (\\mathcal {L} _ {\\mathsf {P V}}) \\vdash \\forall N \\varphi (\\lfloor N / 2 \\rfloor) \\rightarrow \\varphi (N).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.689, + 0.884, + 0.725 + ], + "angle": 0, + "content": "Then, by polynomial induction for \\(\\Pi_1^b\\)-formulas (available in \\(\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})\\)) and using that \\(\\varphi(0)\\) trivially holds, it follows that \\(\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\forall N \\varphi(N)\\). In turn, this yields \\(\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\mathsf{Ref}_{R,s}\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.743, + 0.884, + 0.796 + ], + "angle": 0, + "content": "Base Case: \\(\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathrm{PV}})\\vdash \\varphi (1)\\). In this case, for a given formula \\(F\\) and length \\(n\\), the hypothesis of \\(\\varphi (1)\\) is satisfied only if \\(n = 1\\), \\(F\\) is a valid description of a formula, and \\(\\operatorname {Size}(F) = 0\\). Let \\(y_1^0\\triangleq R(1,F,0)\\) and \\(y_{1}^{1}\\triangleq R(1,F,1)\\). We need to prove that" + }, + { + "type": "equation", + "bbox": [ + 0.288, + 0.806, + 0.707, + 0.827 + ], + "angle": 0, + "content": "\\[\n\\left| y _ {1} ^ {0} \\right| _ {\\ell} = \\left| y _ {1} ^ {1} \\right| _ {\\ell} = 1 \\wedge F \\left(y _ {1} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {1} ^ {0}\\right) \\wedge F \\left(y _ {1} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {1} ^ {1}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.839, + 0.884, + 0.874 + ], + "angle": 0, + "content": "Since \\( n = 1 \\) and \\( \\mathrm{Size}(F) = 0 \\), \\( F \\) evaluates to a constant \\( b_{F} \\) on every input bit. The statement above is implied by Line 2 in the definition of \\( R(n,F,b) \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.091, + 0.881, + 0.141 + ], + "angle": 0, + "content": "(Polynomial) Induction Step: \\( \\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})\\vdash \\forall N\\varphi (\\lfloor N / 2\\rfloor)\\to \\varphi (N) \\). Fix an arbitrary \\( N \\), let \\( n\\triangleq |N| \\), and assume that \\( \\varphi (\\lfloor N / 2\\rfloor) \\) holds. By the induction hypothesis, for every valid formula \\( F^{\\prime} \\) with \\( \\mathrm{Size}(F^{\\prime}) < n'^{3 / 2} \\), where \\( n^\\prime \\triangleq n - 1 \\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.255, + 0.153, + 0.882, + 0.173 + ], + "angle": 0, + "content": "\\[\n\\left| y _ {n ^ {\\prime}} ^ {0} \\right| _ {\\ell} = \\left| y _ {n ^ {\\prime}} ^ {1} \\right| _ {\\ell} = n ^ {\\prime} \\wedge F ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {n ^ {\\prime}} ^ {0}\\right) \\wedge F ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {n ^ {\\prime}} ^ {1}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.182, + 0.484, + 0.201 + ], + "angle": 0, + "content": "where \\(y_{n^{\\prime}}^{0}\\triangleq R(1^{n^{\\prime}},F^{\\prime},0)\\) and \\(y_{n^{\\prime}}^{1}\\triangleq R(1^{n^{\\prime}},F^{\\prime},1)\\)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.201, + 0.881, + 0.268 + ], + "angle": 0, + "content": "Now let \\( n \\geq 2 \\), and let \\( F \\) be a valid description of a formula over \\( n \\)-bit inputs with \\( \\mathrm{Size}(F) < n^{3/2} \\). By the size bound on \\( F \\), \\( R(1^n, F, b) \\) ignores Line 1. If \\( \\mathrm{Size}(F) = 0 \\), then similarly to the base case it is trivial to check that the conclusion of \\( \\varphi(N) \\) holds. Therefore, we assume that \\( \\mathrm{Size}(F) \\geq 1 \\) and \\( R(1^n, F, b) \\) does not stop at Line 2." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.27, + 0.393, + 0.285 + ], + "angle": 0, + "content": "Consider the following definitions:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.298, + 0.396, + 0.318 + ], + "angle": 0, + "content": "1. \\(\\widetilde{F} \\triangleq \\mathrm{Normalize}(1^n, F)\\) (Line 3)," + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.299, + 0.882, + 0.318 + ], + "angle": 0, + "content": "5. \\( b' \\triangleq b \\oplus c_i \\) (Line 5), where \\( \\rho \\) restricts \\( x_i \\) to \\( c_i \\)," + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.328, + 0.436, + 0.348 + ], + "angle": 0, + "content": "2. \\(\\rho \\triangleq\\) Find-Restriction \\((1^n,\\widetilde{F})\\) (Line 4)," + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.327, + 0.763, + 0.348 + ], + "angle": 0, + "content": "6. \\(y_{n^{\\prime}}^{b^{\\prime}}\\triangleq R(1^{n^{\\prime}},F^{\\prime},b^{\\prime})\\) (Line 6)," + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.357, + 0.471, + 0.377 + ], + "angle": 0, + "content": "3. \\(F^{\\prime}\\triangleq\\) Apply-Restriction \\((1^{n},\\widetilde{F},\\rho)\\) (Line 5)," + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.356, + 0.76, + 0.377 + ], + "angle": 0, + "content": "7. \\(y_{n}^{b}\\triangleq y_{n^{\\prime}}^{b^{\\prime}}\\cup y_{i}\\mapsto c_{i}\\) (Line 6)," + }, + { + "type": "text", + "bbox": [ + 0.135, + 0.387, + 0.31, + 0.405 + ], + "angle": 0, + "content": "4. \\(n^{\\prime}\\triangleq n - 1\\) (Line 5)," + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.386, + 0.882, + 0.406 + ], + "angle": 0, + "content": "8. \\(s \\triangleq \\operatorname{Size}(F)\\), \\(\\widetilde{s} \\triangleq \\operatorname{Size}(\\widetilde{F})\\), and \\(s' \\triangleq \\operatorname{Size}(F')\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.419, + 0.881, + 0.454 + ], + "angle": 0, + "content": "We rely on the provability in \\(\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})\\) of the following statements about the subroutines of \\(R(1^{n},F,b)\\) (see [CKK+24]):" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.467, + 0.21, + 0.484 + ], + "angle": 0, + "content": "(i) \\(\\widetilde{s}\\leq s\\)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.466, + 0.76, + 0.485 + ], + "angle": 0, + "content": "(iii) \\(\\forall x\\in \\{0,1\\} ^n\\widetilde{F} (x) = F(x)\\)" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.495, + 0.323, + 0.514 + ], + "angle": 0, + "content": "(ii) \\(s' \\leq \\widetilde{s} \\cdot (1 - 1/n)^{3/2}\\)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.494, + 0.869, + 0.514 + ], + "angle": 0, + "content": "(iv) \\(\\forall z\\in \\{0,1\\}^{\\rho^{-1}(\\star)}F'(z) = \\widetilde{F}\\big(z\\cup x_i\\mapsto c_i\\big).\\)" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.527, + 0.508, + 0.545 + ], + "angle": 0, + "content": "By Items (i) and (ii) together with the bound \\( s < n^{3/2} \\)," + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.554, + 0.823, + 0.575 + ], + "angle": 0, + "content": "\\[\n\\mathsf {S} _ {2} ^ {1} \\left(\\mathcal {L} _ {\\mathsf {P V}}\\right) \\vdash s ^ {\\prime} \\leq \\widetilde {s} \\cdot (1 - 1 / n) ^ {3 / 2} \\leq s \\cdot (1 - 1 / n) ^ {3 / 2} < n ^ {3 / 2} \\cdot (1 - 1 / n) ^ {3 / 2} = (n - 1) ^ {3 / 2}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.585, + 0.881, + 0.655 + ], + "angle": 0, + "content": "Thus \\( F' \\) is a valid formula on \\( n' \\)-bit inputs of size \\( < n'^{3/2} \\). By the first condition in the induction hypothesis (Equation (1)) and the definition of each \\( y_{n}^{b} \\), we have \\( |y_{n}^{0}|_{\\ell} = |y_{n}^{1}|_{\\ell} = n \\). Using the definitions listed above, the last two conditions in the induction hypothesis (Equation (1)), and Items (iii) and (iv), we derive in \\( S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}}) \\) the following statements for each \\( b \\in \\{0, 1\\} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.409, + 0.664, + 0.584, + 0.686 + ], + "angle": 0, + "content": "\\[\nF ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) \\neq \\oplus^ {b ^ {\\prime}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.687, + 0.578, + 0.706 + ], + "angle": 0, + "content": "\\[\nF (y _ {n} ^ {b}) = F ^ {\\prime} (y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}),\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.707, + 0.583, + 0.727 + ], + "angle": 0, + "content": "\\[\nF (y _ {n} ^ {b}) \\neq \\oplus^ {b ^ {\\prime}} (y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.737, + 0.6, + 0.755 + ], + "angle": 0, + "content": "Therefore, using basic facts about the function symbols \\(\\oplus^0\\) and \\(\\oplus^1\\)," + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.764, + 0.767, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\oplus^ {b ^ {\\prime}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) = \\oplus^ {b \\oplus c _ {i}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) = c _ {i} \\oplus \\left(\\oplus^ {b} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right)\\right) = c _ {i} \\oplus \\left(\\oplus^ {b} \\left(y _ {n} ^ {b}\\right) \\oplus c _ {i}\\right) = \\oplus^ {b} \\left(y _ {n} ^ {b}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.796, + 0.881, + 0.83 + ], + "angle": 0, + "content": "These statements imply that, for each \\( b \\in \\{0,1\\} \\), \\( F(y_{n}^{b}) \\neq \\oplus^{b}(y_{n}^{b}) \\). In other words, the conclusion of \\( \\varphi(N) \\) holds. This completes the proof of the induction step." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.841, + 0.881, + 0.876 + ], + "angle": 0, + "content": "As explained above, the provability of \\(\\operatorname{Ref}_{R,s}\\) in \\(\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})\\) implies its provability in \\(\\mathsf{PV}_1\\). Since \\(\\mathsf{PV}_1 \\vdash \\operatorname{Ref}_{R,s} \\to \\mathsf{FLB}_s^\\oplus\\), this completes the proof of Theorem 4.1." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.508, + 0.91 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.887, + 0.147 + ], + "angle": 0, + "content": "We have seen that a non-trivial formula size lower bound can be established in \\(\\mathsf{PV}_1\\). More advanced circuit lower bounds are known to be provable assuming additional axioms extending \\(\\mathsf{PV}_1\\) (e.g., [Kra95, Section 15.2] and [MP20]), but their provability in \\(\\mathsf{PV}_1\\) (or equivalently, in \\(\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})\\)) is less clear." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.155, + 0.887, + 0.19 + ], + "angle": 0, + "content": "Open Problem 4.3. For each \\( d \\geq 1 \\) and \\( \\ell \\geq 1 \\), can \\( \\mathsf{PV}_1 \\) prove that the parity function on \\( n \\) bits cannot be computed by depth- \\( d \\) circuits of size \\( n^\\ell \\)?" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.2, + 0.887, + 0.234 + ], + "angle": 0, + "content": "Open Problem 4.4. For each \\(\\ell \\geq 1\\), is there a constant \\(k = k(\\ell)\\) such that \\(\\mathsf{PV}_1\\) proves that every monotone circuit for the \\(k\\)-clique problem on \\(n\\)-vertex graphs must be of size at least \\(n^\\ell\\)?" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.259, + 0.526, + 0.281 + ], + "angle": 0, + "content": "5 Unprovability of Complexity Bounds" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.293, + 0.885, + 0.378 + ], + "angle": 0, + "content": "The investigation of the unprovability of complexity bounds within theories of bounded arithmetic has a long and rich history. Much of the early work took place in the nineties, with significant results obtained by Razborov [Raz95a, Raz95b], Krajicek [Kra97], and other researchers. Since then, and in particular over the last decade, there has been renewed interest and progress in establishing unprovability results (see, e.g., [CK07, PS21, CKKO21, LO23, ABM23] and references therein)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.379, + 0.884, + 0.514 + ], + "angle": 0, + "content": "In Section 5.1, we consider the unprovability of complexity upper bounds. The unprovability of an inclusion such as \\(\\mathsf{NP} \\subseteq \\mathsf{SIZE}[n^k]\\) is equivalent to the consistency of NP \\(\\not\\subseteq \\mathsf{SIZE}[n^k]\\) with the corresponding theory. Such a consistency result establishes that, while we cannot confirm the separation is true in the standard model of natural numbers, we know it holds in a non-standard model of a theory so strong that complexity theory appears almost indistinguishable from the standard one. We stress that establishing the consistency of a lower bound is a necessary step towards showing that the lower bound is true. For this reason, the unprovability of upper bounds can be formally seen as progress towards showing unconditional complexity lower bounds." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.516, + 0.884, + 0.618 + ], + "angle": 0, + "content": "In Section 5.2, we turn our attention to the unprovability of complexity lower bounds. This direction is partly driven by the desire to formally understand why proving complexity lower bounds is challenging, and to explore the possibility of a more fundamental underlying reason for this difficulty. Moreover, it might provide examples of hard sentences for logical theories and of hard propositional tautologies for proof systems. The investigation of the meta-mathematics of lower bounds has also found unexpected applications in algorithms and complexity (e.g., [CIKK16])." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.619, + 0.884, + 0.687 + ], + "angle": 0, + "content": "Finally, in Section 5.3 we connect the two directions and explain how the unprovability of circuit lower bounds in \\(\\mathsf{PV}_1\\) yields the unprovability of \\(\\mathsf{P} = \\mathsf{NP}\\) in \\(\\mathsf{PV}_1\\). The latter can be seen as a weakening of the \\(\\mathsf{P}\\) versus NP problem that considers the existence of feasible proofs that \\(\\mathsf{P} = \\mathsf{NP}\\). This further motivates the investigation of the unprovability of lower bounds." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.707, + 0.429, + 0.725 + ], + "angle": 0, + "content": "5.1 Unprovability of Upper Bounds" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.734, + 0.517, + 0.751 + ], + "angle": 0, + "content": "5.1.1 LEARN-Uniform Circuits and Unprovability" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.76, + 0.885, + 0.864 + ], + "angle": 0, + "content": "Cook and Krajicek [CK07] considered the provability of NP \\(\\subseteq\\) SIZE[poly] in bounded arithmetic and obtained a number of conditional negative results. [KO17], building on techniques from [CK07], showed that for no integer \\(k\\geq 1\\) the theory \\(\\mathsf{PV}_1\\) proves that \\(\\mathsf{P}\\subseteq \\mathsf{SIZE}[n^k ]\\) . Note that this is an unconditional result. Thus, for a natural theory capable of formalizing advanced results from complexity theory, such as the PCP Theorem, we can unconditionally rule out the provability of \\(\\mathsf{P}\\subseteq \\mathsf{SIZE}[n^{k}]\\) . A slightly stronger model-theoretic formulation of the result of [KO17] appears in [BM20]." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.511, + 0.911 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.092, + 0.881, + 0.125 + ], + "angle": 0, + "content": "[BKO20] obtained results for stronger theories and ruled out the provability of infinitely often inclusions. In more detail, for an \\(\\mathcal{L}_{\\mathrm{PV}}\\)-function symbol \\(h\\), consider the sentence" + }, + { + "type": "equation", + "bbox": [ + 0.186, + 0.138, + 0.808, + 0.159 + ], + "angle": 0, + "content": "\\[\n\\left. \\cup B _ {k} ^ {i. o.} [ h ] \\triangleq \\forall 1 ^ {m} \\exists 1 ^ {n} \\exists C _ {n} \\forall x \\left(n \\geq m \\wedge | C _ {n} | \\leq n ^ {k} \\wedge \\left(| x | \\leq n \\rightarrow \\psi (n, C _ {n}, x, h)\\right)\\right), \\right. ^ {1 0}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.171, + 0.884, + 0.256 + ], + "angle": 0, + "content": "where \\(\\psi\\) is a quantifier-free \\(\\mathcal{L}_{\\mathsf{PV}}\\)-formula stating that \\(h(x) \\neq 0\\) if and only if the evaluation of the circuit \\(C_n\\) on \\(x\\) (viewed as an \\(n\\)-bit string) is 1. In other words, \\(\\mathsf{UB}_k^{i.o.}[h]\\) states that the language defined by \\(h\\) (which is in \\(\\mathsf{P}\\)) admits circuits of size at most \\(n^k\\) on infinitely many input lengths \\(n\\). [BKO20] showed that for each \\(k \\geq 1\\), there is an \\(\\mathcal{L}_{\\mathsf{PV}}\\)-function symbol \\(h\\) such that \\(\\mathsf{PV}_1\\) does not prove \\(\\mathsf{UB}_k^{i.o.}[h]\\). Similarly, they established that \\(\\mathsf{S}_2^1 \\not\\vdash \\mathsf{NP} \\subseteq \\text{i.o.SIZE}[n^k]\\) and \\(\\mathsf{T}_2^1 \\not\\vdash \\mathsf{P}^{\\mathsf{NP}} \\subseteq \\text{i.o.SIZE}[n^k]\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.257, + 0.884, + 0.342 + ], + "angle": 0, + "content": "Building on these results, [CKKO21] introduced a modular framework to establish the unprovability of circuit upper bounds in bounded arithmetic using a learning-theoretic perspective. Next, we describe how their approach can be used to show a slightly weaker form of the result from [BKO20] described above. For an \\(\\mathcal{L}_{\\mathrm{PV}}\\)-function symbol \\(h\\), we consider a sentence \\(\\mathsf{UB}_{c,k}[h]\\) stating that \\(L_{h} \\in \\mathsf{SIZE}[c \\cdot n^{k}]\\), where \\(x \\in L_{h}\\) if and only if \\(h(x) \\neq 0\\), i.e.," + }, + { + "type": "equation", + "bbox": [ + 0.167, + 0.354, + 0.884, + 0.374 + ], + "angle": 0, + "content": "\\[\n\\bigcup \\mathrm {B} _ {c, k} [ h ] \\triangleq \\forall 1 ^ {n} \\exists C _ {n} \\forall x \\left(\\left| C _ {n} \\right| \\leq c \\cdot n ^ {k} \\wedge \\left(\\left| x \\right| \\leq n \\rightarrow (\\operatorname {E v a l} \\left(C _ {n}, x, n\\right) = 1 \\leftrightarrow h (x) \\neq 0)\\right)\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.387, + 0.884, + 0.455 + ], + "angle": 0, + "content": "where \\( \\operatorname{Eval}(C_n, x, n) \\) is an \\( \\mathcal{L}_{\\mathrm{PV}} \\)-function that evaluates the circuit \\( C_n \\) on the \\( n \\)-bit string described by \\( x \\). Our goal is to show that for every \\( k \\geq 1 \\) there is a function symbol \\( h \\) such that, for no choice of \\( c \\geq 1 \\), \\( \\mathrm{PV}_1 \\) proves \\( \\mathrm{UB}_{c,k}[h] \\). (Note that in all results discussed in this section, we consider Log formalizations, as explained in Section 4.1.)" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.472, + 0.884, + 0.593 + ], + "angle": 0, + "content": "Overview of the Approach. Note that \\(\\mathsf{UB}_{c,k}[h]\\) claims the existence of circuits for \\(L_{h}\\), i.e., it states a non-uniform upper bound. We explore the constructive aspect of \\(\\mathsf{PV}_1\\) proofs, by extracting computational information from a \\(\\mathsf{PV}_1\\)-proof that such circuits exist. The argument has a logical component, where we extract from a proof of \\(\\mathsf{UB}_{c,k}[h]\\) a \"LEARN-uniform\" construction of a sequence \\(\\{C_n\\}_n\\) of circuits for \\(L_{h}\\) and a complexity-theoretic component, where we unconditionally establish that for each \\(k\\) LEARN-uniform circuits of this form do not exist for some \\(h\\). Altogether, we get that for some \\(h\\) theory \\(\\mathsf{PV}_1\\) does not prove \\(\\mathsf{UB}_{c,k}[h]\\) (no matter the choice of \\(c\\))." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.609, + 0.884, + 0.713 + ], + "angle": 0, + "content": "LEARN-uniform circuits. We will be interested in languages that can be efficiently learned with a bounded number of equivalence queries, in the following sense. For functions \\( s, q \\colon \\mathbb{N} \\to \\mathbb{N} \\), we say that a language \\( L \\subseteq \\{0,1\\}^* \\) is in LEARN-uniform \\( ^{\\mathsf{EQ}[q]} \\) SIZE[s] if there is a polynomial-time algorithm \\( A^{\\mathsf{EQ}(L_n)}(1^n) \\) that outputs a circuit of size at most \\( s(n) \\) for \\( L_n \\) after making at most \\( q(n) \\) equivalence queries to \\( L_n \\), where \\( L_n = L \\cap \\{0,1\\}^n \\). The equivalence query oracle, given the description of an \\( n \\)-bit circuit \\( D \\) of size a most \\( s(n) \\), replies \"yes\" if \\( D \\) computes \\( L_n \\), or provides some counter-example \\( w \\) such that \\( D(w) \\neq L_n(w) \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.729, + 0.884, + 0.798 + ], + "angle": 0, + "content": "Extracting LEARN-uniform circuits from \\(\\mathsf{PV}_1\\) proofs. For convenience, write \\(\\mathsf{UB}_{c,k}[h] = \\forall 1^n \\exists C_n \\forall x \\phi(1^n, C_n, x)\\) in Equation (2), where \\(\\phi(1^n, C_n, x)\\) is a quantifier-free formula. Since \\(\\mathsf{PV}_1\\) is a universal theory, under the assumption that \\(\\mathsf{PV}_1 \\vdash \\mathsf{UB}_{c,k}[h]\\), we can apply Theorem 3.2 (KPT Witnessing Theorem) to obtain the provability in \\(\\mathsf{PV}_1\\) of the disjunction" + }, + { + "type": "equation", + "bbox": [ + 0.12, + 0.809, + 0.884, + 0.838 + ], + "angle": 0, + "content": "\\[\n\\forall 1 ^ {n} \\forall x _ {1} \\dots \\forall x _ {k} (\\phi (1 ^ {n}, t _ {1} (1 ^ {n}), x _ {1}) \\vee \\phi (1 ^ {n}, t _ {2} (1 ^ {n}, x _ {1}), x _ {2}) \\vee \\dots \\vee \\phi (1 ^ {n}, t _ {k} (1 ^ {n}, x _ {1}, \\dots , x _ {k - 1}), x _ {k})) \\tag {3}\n\\]" + }, + { + "type": "page_footnote", + "bbox": [ + 0.128, + 0.847, + 0.774, + 0.862 + ], + "angle": 0, + "content": "10Recall that \\( 1^n \\) is simply a convenient notation to refer to a variable \\( n \\) that is set to \\( |N| \\) for some variable \\( N \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.508, + 0.91 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.884, + 0.159 + ], + "angle": 0, + "content": "where \\( t_1, \\ldots, t_k \\) are \\( \\mathcal{L}_{\\mathsf{PV}} \\)-terms and \\( k = O(1) \\). Most importantly, due to the soundness of \\( \\mathsf{PV}_1 \\), this statement is true over the standard model \\( \\mathbb{N} \\). Additionally, the terms in \\( \\mathsf{PV}_1 \\) correspond to polynomial-time algorithms. Next, we will discuss how to interpret Equation (3) over \\( \\mathbb{N} \\) as an interactive protocol and how this perspective leads to a LEARN-uniform construction." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.161, + 0.885, + 0.348 + ], + "angle": 0, + "content": "The KPT Witnessing Theorem can be intuitively understood as follows [KPS90]. Consider a search problem \\( Q(1^n) \\), where given the input \\( 1^n \\), we need to find \\( D \\) such that \\( \\forall x \\phi(1^n, D, x) \\). The problem \\( Q(1^n) \\) can be solved using a \\( k \\)-round Student-Teacher protocol. In the first round, the student proposes \\( D_1 = t_1(1^n) \\) as a solution to the search problem \\( Q(1^n) \\). This solution is either correct, or there exists a counterexample \\( w_1 \\) such that \\( \\neg \\phi(1^n, t_1(1^n), w_1) \\). The teacher then provides this counterexample value \\( w_1 \\), and the protocol moves to the next round. In each subsequent round \\( 1 \\leq i < k \\), the student computes \\( D_i = t_i(1^n, w_1, \\ldots, w_{i-1}) \\) based on the counterexamples \\( w_1, \\ldots, w_{i-1} \\) received in the previous rounds. This \\( D_i \\) is either a correct solution for \\( Q(1^n) \\), in which case the problem is solved, or there is another counterexample \\( w_i \\) provided by the teacher such that \\( \\neg \\phi(1^n, t_i(1^n, w_1, \\ldots, w_{i-1}), w_i) \\). If the latter is the case, the protocol continues to the next round \\( i + 1 \\). The theorem guarantees that for every input \\( 1^n \\), the student will successfully solve the search problem \\( Q(1^n) \\) within some round \\( 1 \\leq i \\leq k \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.349, + 0.884, + 0.538 + ], + "angle": 0, + "content": "From a \\(\\mathrm{PV}_1\\) proof of a circuit upper bound for a language \\(L_h\\), we can derive a Student-Teacher protocol for the search problem \\(Q(1^n)\\) corresponding to Equation (3). In this protocol, the student proposes a candidate circuit \\(D\\), and the teacher provides a counterexample \\(w\\) to \\(D\\) (an input \\(w\\) such that \\(D(w) \\neq L_h(w)\\)) if one exists. (Note that \\(\\phi(1^n, D, x)\\) might not be true for other reasons, e.g., if \\(|D| > c \\cdot n^k\\), but in such cases there is no need to invoke the equivalence query oracle and we can proceed in the Student-Teacher protocol with, say, \\(w = 0^n\\).) The student is guaranteed to succeed after at most \\(k\\) queries, regardless of the counterexamples provided by the teacher. Finally, for every input \\(n\\), the student computes according to a constant number of fixed \\(\\mathrm{PV}_1\\) terms \\(t_1, \\ldots, t_k\\). Since a \\(\\mathrm{PV}_1\\) term is merely a composition of a finite number of \\(\\mathrm{PV}_1\\) function symbols (polynomial-time algorithms), the student's computation runs in polynomial time. Therefore, from the provability in \\(\\mathrm{PV}_1\\) of a non-uniform circuit upper bound for a language \\(L \\in \\mathsf{P}\\), we can extract a LEARN-uniform family of circuits for \\(L\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.554, + 0.884, + 0.708 + ], + "angle": 0, + "content": "Unconditional lower bound against LEARN-uniform circuits. The argument described above reduces the unprovability of upper bounds to a complexity-theoretic question with no reference to logic. To complete the proof, it is enough to show that for each \\( k \\) there is a language \\( L \\in \\mathbb{P} \\) such that \\( L \\notin \\mathrm{LEARN-uniform}^{\\mathrm{EQ}[O(1)]} \\mathrm{SIZE}[O(n^{k})] \\). This unconditional lower bound against LEARN-uniform circuits is established in [CKKO21] by generalizing a lower bound from [SW14] against P-uniform circuits, which can be interpreted as LEARN-uniform constructions with \\( q = 0 \\) queries. Roughly speaking, [CKKO21] shows that one can eliminate each equivalence query using a small amount of non-uniform advice, and that the base case where no queries are present (as in [SW14]) can be extended to a lower bound against a bounded amount of advice." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.726, + 0.884, + 0.777 + ], + "angle": 0, + "content": "This completes the sketch of the argument. The approach is fairly general and can be adapted to other theories. The strength of the theory affects the learning model against which one needs to obtain lower bounds (e.g., by increasing the number of queries or allowing randomized learners)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.783, + 0.611, + 0.801 + ], + "angle": 0, + "content": "Open Problem 5.1. Show that \\(\\mathsf{S}_2^1\\) does not prove that \\(\\mathsf{P} \\subseteq \\mathsf{SIZE}[n^k]\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.808, + 0.885, + 0.876 + ], + "angle": 0, + "content": "In order to solve Open Problem 5.1, using the connection from [CKKO21] it is sufficient to show that \\(\\mathsf{P} \\not\\subset \\mathsf{LEARN}\\text{-uniform}^{\\mathsf{EQ}[q]} \\mathsf{SIZE}[O(n^{k})]\\) for \\(q = \\mathrm{poly}(n)\\). In other words, this amounts to understanding the class of languages that admit circuits that can be produced with a polynomial number of equivalence queries." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.091, + 0.627, + 0.11 + ], + "angle": 0, + "content": "Open Problem 5.2. Show that \\(\\mathsf{T}_2^1\\) does not prove that \\(\\mathsf{NP} \\subseteq \\mathsf{SIZE}[n^k]\\)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.129, + 0.512, + 0.148 + ], + "angle": 0, + "content": "5.1.2 \\(\\mathsf{P} = \\mathsf{NP}\\) and Propositional Proof Complexity" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.156, + 0.886, + 0.225 + ], + "angle": 0, + "content": "Suppose that \\(\\mathsf{P}\\) is actually equal to NP. In this scenario, there exists a polynomial-time algorithm \\(g\\) (i.e., a \\(\\mathrm{PV}_1\\) function symbol) that can find a satisfying assignment for any given satisfiable formula. In other words, if \\(\\operatorname{Formula}(F, 1^n)\\) denotes an \\(\\mathcal{L}_{\\mathsf{PV}}\\)-formula that checks if \\(F\\) is a valid description of a formula over \\(n\\) input bits, and \\(\\operatorname{Sat}(F, x)\\) is an \\(\\mathcal{L}_{\\mathsf{PV}}\\)-formula that checks if \\(x\\) satisfies the formula encoded by \\(F\\), the sentence" + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.235, + 0.884, + 0.256 + ], + "angle": 0, + "content": "\\[\n\\varphi_ {\\mathrm {P} = \\mathrm {N P}} [ g ] \\triangleq \\forall 1 ^ {n} \\forall F \\forall x \\left(\\left(\\operatorname {F o r m u l a} (F, 1 ^ {n}) \\wedge \\operatorname {S a t} (F, x)\\right)\\rightarrow \\operatorname {S a t} (F, g (F))\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.266, + 0.344, + 0.281 + ], + "angle": 0, + "content": "is true in the standard model \\(\\mathbb{N}\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.293, + 0.886, + 0.329 + ], + "angle": 0, + "content": "Open Problem 5.3. Show that for no polynomial-time function symbol \\( g \\) theory \\( \\mathrm{PV}_1 \\) proves the sentence \\( \\varphi_{\\mathrm{P} = \\mathrm{NP}}[g] \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.337, + 0.884, + 0.442 + ], + "angle": 0, + "content": "Equivalently, Open Problem 5.3 states that \\(\\mathsf{PV}_1\\) (and by standard conservation results \\(S_2^1\\)) is consistent with \\(\\mathsf{P} \\neq \\mathsf{NP}\\). This means that either \\(\\mathsf{P} \\neq \\mathsf{NP}\\), as is commonly assumed, making the conjecture trivially true, or \\(\\mathsf{P} = \\mathsf{NP}\\), but this cannot be proven using only polynomial-time concepts and reasoning. Therefore, Open Problem 5.3 represents a formal weakening of the conjecture that \\(\\mathsf{P} \\neq \\mathsf{NP}\\). The statement is known to follow from the purely combinatorial conjecture that the extended Frege propositional proof system \\(e\\mathcal{F}\\) (see Section 3.2) is not polynomially bounded, which is a major open problem in proof complexity." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.45, + 0.884, + 0.503 + ], + "angle": 0, + "content": "Theorem 5.4 ([Coo75]). Suppose that there is a sequence \\(\\{F_n\\}_{n\\geq 1}\\) of propositional tautologies of size polynomial in \\(n\\) that require eF proofs of size \\(n^{\\omega (1)}\\). Then there is no function symbol \\(g\\) such that \\(\\mathsf{PV}_1\\) proves \\(\\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.511, + 0.884, + 0.562 + ], + "angle": 0, + "content": "Proof. Here we only provide a sketch of the proof. More details and extensions of the result can be found in the textbooks [Kra95, Kra19]. We establish that if \\(\\mathsf{PV}_1 \\vdash \\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]\\) for some \\(g\\), then every tautology has a polynomial size \\(e\\mathcal{F}\\) proof." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.563, + 0.884, + 0.597 + ], + "angle": 0, + "content": "Recall the definitions and results from Section 3.2. For a propositional proof system \\( P \\) (described by an \\( \\mathcal{L}_{\\mathrm{PV}} \\) function symbol), we consider an \\( \\mathcal{L}_{\\mathrm{PV}} \\)-sentence stating the soundness of \\( P \\):" + }, + { + "type": "equation", + "bbox": [ + 0.178, + 0.608, + 0.819, + 0.628 + ], + "angle": 0, + "content": "\\[\n\\mathsf {S o u n d} _ {P} \\triangleq \\forall 1 ^ {n} \\forall F \\forall \\pi (\\mathsf {F o r m u l a} (F, 1 ^ {n}) \\land \\mathsf {P r o o f} _ {P} (F, \\pi)) \\to \\forall x (| x | \\leq n \\to \\mathsf {S a t} (F, x)),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.638, + 0.531, + 0.655 + ], + "angle": 0, + "content": "where \\(\\operatorname{Proof}_P(F, \\pi)\\) states that \\(\\pi\\) is a valid \\(P\\)-proof of \\(F\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.656, + 0.886, + 0.723 + ], + "angle": 0, + "content": "Note that if \\( F \\) is not a tautology then \\( g(\\neg F) \\) outputs a satisfying assignment of \\( \\neg F \\), while if \\( F \\) is a tautology then \\( \\neg F \\) admits no satisfying assignment. We consider a proof system \\( P_g \\) defined as follows: Given a valid description of an \\( n \\)-bit propositional formula \\( F \\) and a candidate proof \\( \\widetilde{\\pi} \\), \\( P_g \\) accepts \\( \\widetilde{\\pi} \\) as a proof of \\( F \\) if and only if" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.724, + 0.625, + 0.743 + ], + "angle": 0, + "content": "\\[\ng (\\neg F) = \\widetilde {\\pi} \\quad \\text {a n d} \\quad \\neg \\operatorname {S a t} (\\neg F, \\widetilde {\\pi}) ,\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.748, + 0.886, + 0.781 + ], + "angle": 0, + "content": "where \\(\\neg F\\) represents the negation of \\(F\\). Observe that for any tautology \\(F\\), \\(\\pi_F \\triangleq g(\\neg F)\\) is a valid \\(P_g\\)-proof of \\(F\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.783, + 0.884, + 0.817 + ], + "angle": 0, + "content": "Note that \\(\\mathsf{PV}_1\\vdash \\mathsf{Sound}_{P_g}\\), which follows from the provability of Equation (4) and the definition of \\(P_{g}\\) using \\(g\\). Now consider the quantifier-free \\(\\mathcal{L}_{\\mathsf{PV}}\\)-formula" + }, + { + "type": "equation", + "bbox": [ + 0.262, + 0.828, + 0.734, + 0.848 + ], + "angle": 0, + "content": "\\[\n\\psi \\triangleq \\neg \\operatorname {F o r m u l a} (F, 1 ^ {n}) \\vee \\neg \\operatorname {P r o o f} _ {P _ {g}} (F, \\pi) \\vee | x | > n \\vee \\operatorname {S a t} (F, x).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.858, + 0.698, + 0.877 + ], + "angle": 0, + "content": "The provability of \\(\\forall 1^n\\forall F\\forall \\pi \\psi\\) in \\(\\mathsf{PV}_1\\) follows from the provability of \\(\\mathsf{Sound}_{P_g}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.51, + 0.911 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.887, + 0.196 + ], + "angle": 0, + "content": "Using Cook's translation (Section 3.2), the sequence of propositional formulas \\( ||\\psi||_m \\) admits \\( e\\mathcal{F} \\)-proofs of polynomial size. Moreover, given an actual \\( n \\)-bit propositional formula \\( F \\) of polynomial size and the corresponding \\( P_g \\)-proof \\( \\pi_F \\) (represented by fixed strings \\( \\langle F\\rangle \\) and \\( \\langle \\pi_F\\rangle \\)), one can show that there are polynomial size \\( e\\mathcal{F} \\) proofs of both \\( ||\\mathrm{Formula}(\\langle F\\rangle,1^n)||_{\\mathrm{poly}(n)} \\) and \\( ||\\mathrm{Proof}_{P_g}(\\langle F\\rangle,\\langle \\pi_F\\rangle)||_{\\mathrm{poly}(n)} \\). (Intuitively, this follows by an evaluation of the expressions on these fixed inputs.) Since \\( e\\mathcal{F} \\) is closed under substitution, we can derive in \\( e\\mathcal{F} \\) with a polynomial size proof the formula \\( ||\\mathrm{Sat}(\\langle F\\rangle,x)||_{\\mathrm{poly}(n)} \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.197, + 0.887, + 0.281 + ], + "angle": 0, + "content": "Finally, for every propositional formula \\( F(x) \\) on \\( n \\)-bit inputs, it is possible to efficiently prove in \\( e\\mathcal{F} \\) the propositional formula \\( ||\\mathrm{Sat}(\\langle F\\rangle ,x)||_{\\mathrm{poly}(n)}\\to F(x) \\). (This can be established by a slightly more general structural induction on formulas \\( F \\) using information about \\( ||\\cdot|| \\) and \\( \\langle \\cdot \\rangle \\).) Overall, since \\( e\\mathcal{F} \\) is closed under implication, it follows from these derivations that there is a polynomial size \\( e\\mathcal{F} \\) proof of \\( F \\). This completes the sketch of the proof of the result." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.291, + 0.887, + 0.342 + ], + "angle": 0, + "content": "Open Problem 5.3 would also follow from a proof that Buss's hierarchy of theories \\(\\mathsf{T}_2^i\\) does not collapse [KPT91], another central problem in bounded arithmetic. More precisely, it is enough to obtain the following separation." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.353, + 0.618, + 0.373 + ], + "angle": 0, + "content": "Open Problem 5.5. Show that for some \\( i > j \\geq 1 \\) we have \\( \\mathsf{T}_2^i \\neq \\mathsf{T}_2^j \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.382, + 0.884, + 0.433 + ], + "angle": 0, + "content": "It is known that \\(\\mathrm{PV}_1\\) proves that \\(\\mathsf{P} = \\mathsf{NP}\\) if and only if it proves that \\(\\mathsf{NP} = \\mathsf{coNP}\\). Consequently, a super-polynomial lower bound on the length of \\(e\\mathcal{F}\\) proofs also yields the consistency of \\(\\mathsf{NP} \\neq \\mathsf{coNP}\\) with \\(\\mathrm{PV}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.434, + 0.884, + 0.538 + ], + "angle": 0, + "content": "Finally, we remark that the use of witnessing theorems alone (as done in Section 5.1.1) is probably not sufficient to settle Open Problem 5.3. This is because these theorems typically also hold when we extend the theory with all true universal statements. Thus an unprovability argument that only employs the witnessing theorem would establish unconditionally that each sentence \\(\\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]\\) is false and therefore \\(\\mathsf{P}\\neq \\mathsf{NP}\\). Some researchers interpret this as evidence that the investigation of propositional proof complexity might be unavoidable. Another approach to Open Problem 5.3 is discussed in Section 5.3." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.557, + 0.429, + 0.574 + ], + "angle": 0, + "content": "5.2 Unprovability of Lower Bounds" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.584, + 0.452, + 0.601 + ], + "angle": 0, + "content": "5.2.1 Average-Case Circuit Lower Bounds" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.61, + 0.885, + 0.661 + ], + "angle": 0, + "content": "In this section, we discuss the unprovability of strong average-case lower bounds in \\(\\mathrm{PV}_1\\). We focus on an unprovability result from [PS21], stated and proved in a slightly stronger form in [LO23]. The proof is based on a technique introduced by [Kra11] and further explored in [Pic15a]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.662, + 0.885, + 0.73 + ], + "angle": 0, + "content": "We consider an average-case separation of co-nondeterministic circuits against non-deterministic circuits of subexponential size. In more detail, we investigate the provability of a sentence \\(\\mathsf{LB}^1 (s_1,s_2,m,n_0)\\) stating that, for every input length \\(n\\geq n_0\\), there is a co-nondeterministic circuit \\(C\\) of size \\(\\leq s_{1}(n)\\) such that, for every nondeterministic circuit \\(D\\) of size \\(\\leq s_2(n)\\), we have" + }, + { + "type": "equation", + "bbox": [ + 0.348, + 0.742, + 0.65, + 0.779 + ], + "angle": 0, + "content": "\\[\n\\operatorname * {P r} _ {x \\sim \\{0, 1 \\} ^ {n}} \\Big [ C (x) = D (x) \\Big ] \\leq 1 - \\frac {m (n)}{2 ^ {n}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.79, + 0.887, + 0.825 + ], + "angle": 0, + "content": "Let \\(\\mathrm{coNSIZE}[s(n)]\\) and \\(\\mathrm{NSIZE}[s(n)]\\) refer to co-nondeterministic circuits and nondeterministic circuits of size \\(s(n)\\), respectively. More formally, \\(\\mathrm{LB}^1(s_1, s_2, m, n_0)\\) is an \\(\\mathcal{L}_{\\mathrm{PV}}\\)-sentence capturing the following lower" + }, + { + "type": "page_footnote", + "bbox": [ + 0.128, + 0.834, + 0.632, + 0.85 + ], + "angle": 0, + "content": "11Due to space constraints, we do not elaborate on the formalization of NP = coNP." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.093, + 0.241, + 0.106 + ], + "angle": 0, + "content": "bound statement:" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.122, + 0.722, + 0.141 + ], + "angle": 0, + "content": "\\[\n\\forall n \\in \\operatorname {L o g L o g} \\text {w i t h} n \\geq n _ {0} \\exists C \\in \\operatorname {c o N S I Z E} \\left[ s _ {1} (n) \\right] \\forall D \\in \\operatorname {N S I Z E} \\left[ s _ {2} (n) \\right]\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.144, + 0.789, + 0.163 + ], + "angle": 0, + "content": "\\[\n\\exists m = m (n) \\text {d i s t i n c t} x ^ {1}, \\dots , x ^ {m} \\text {s . t . E r r o r} (C, D, x ^ {i}) \\text {f o r a l l} i \\in [ m ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.176, + 0.886, + 0.244 + ], + "angle": 0, + "content": "where \\( \\operatorname{Error}(C, D, x) \\) means that the circuits \\( C \\) and \\( D \\) disagree on the input \\( x \\). This statement can be seen as an average-case form of the coNP \\( \\nsubseteq \\mathsf{NP} / \\mathsf{poly} \\) conjecture if we let \\( s_1(n) = n^{O(1)} \\), \\( s_2(n) = n^{\\omega(1)} \\), and \\( m(n) = 2^n / n \\). (Note that we consider in this section a LogLog formalization, according to the notation explained in Section 4.1.)" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.255, + 0.884, + 0.293 + ], + "angle": 0, + "content": "Theorem 5.6 ([PS21, LO23]). Let \\( d \\geq 1 \\), \\( \\delta > 0 \\), and \\( n_0 \\geq 1 \\) be arbitrary parameters, and let \\( s_1(n) = n^d \\), \\( s_2(n) = 2^{n^\\delta} \\), and \\( m(n) = 2^n / n \\). Then \\( \\mathsf{PV}_1 \\) does not prove the sentence \\( \\mathsf{LB}^1(s_1, s_2, m, n_0) \\)." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.303, + 0.774, + 0.319 + ], + "angle": 0, + "content": "In the remainder of this section, we provide some intuition about the proof of this result." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.336, + 0.884, + 0.422 + ], + "angle": 0, + "content": "Overview of the Argument. Suppose, towards a contradiction, that \\(\\mathsf{PV}_1\\vdash \\mathsf{LB}^1 (s_1,s_2,m,n_0)\\) with parameters as above. The central idea of the argument is that establishing a strong complexity lower bound within bounded arithmetic leads to a corresponding complexity upper bound. These lower and upper bounds contradict each other. Consequently, this contradiction implies the unprovability of the lower bound statement. In a bit more detail, the argument proceeds as follows:" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.433, + 0.884, + 0.488 + ], + "angle": 0, + "content": "(i) The provability of the average-case lower bound sentence \\(\\mathsf{LB}^1 (s_1,s_2,m,n_0)\\) implies the provability of a worst-case lower bound for coNSIZE\\([n^d]\\) against NSIZE\\([2^{n^\\delta}]\\). We formalize the latter by a sentence \\(\\mathsf{LB}_{\\mathrm{wst}}^1 (s_1,s_2,n_0)\\)." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.497, + 0.886, + 0.571 + ], + "angle": 0, + "content": "(ii) Given any proof of \\(\\mathsf{LB}_{\\mathsf{wst}}^{1}(s_{1}, s_{2}, n_{0})\\) in \\(\\mathsf{PV}_1\\), we extract a complexity upper bound for an arbitrary co-nondeterministic circuit \\(E_{m}(x)\\) over an input \\(x\\) of length \\(m\\) and of size at most \\(\\mathrm{poly}(m)\\). More precisely, we show that there is a deterministic circuit \\(B_{m}\\) of size \\(\\leq 2^{m^{o(1)}}\\) such that \\(\\operatorname{Pr}_{x \\sim \\{0,1\\}^m}[E_m(x) = B_m(x)] \\geq 1/2 + 2^{-m^{o(1)}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.58, + 0.886, + 0.632 + ], + "angle": 0, + "content": "(iii) We invoke an existing hardness amplification result to conclude that, on any large enough input length \\( n \\), every co-nondeterministic circuit \\( C_n \\) of size \\( \\leq n^d \\) agrees with some nondeterministic circuit \\( D_n \\) of size \\( \\leq 2^{n^\\delta} \\) on more than a \\( 1 - 1/n \\) fraction of the inputs." + }, + { + "type": "list", + "bbox": [ + 0.122, + 0.433, + 0.886, + 0.632 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.642, + 0.884, + 0.678 + ], + "angle": 0, + "content": "Since \\(\\mathsf{PV}_1\\) is a sound theory, i.e., every theorem of \\(\\mathsf{PV}_1\\) is a true sentence, Item (iii) is in contradiction with the complexity lower bound stated in \\(\\mathsf{LB}^1(s_1, s_2, m, n_0)\\). Consequently, \\(\\mathsf{PV}_1\\) does not prove this sentence." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.694, + 0.886, + 0.797 + ], + "angle": 0, + "content": "The most interesting step of the argument is the proof of Item (ii). The key point is that the proof of a lower bound in \\(\\mathrm{PV}_1\\) must be somewhat constructive, in the sense that it not only shows that every small circuit \\(D\\) fails to solve the problem but also produces a string \\(w\\) witnessing this fact. Below we give a simple example of its usefulness, showing a setting where a constructive lower bound yields an upper bound. Note that the application of a witnessing theorem to a LogLog formalization provides algorithms running in time poly\\((2^n)\\). The example provided next shows that this is still useful." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.807, + 0.886, + 0.877 + ], + "angle": 0, + "content": "Lemma 5.7 ([CLO24a]). Let \\( L \\in \\mathsf{NP} \\). Suppose that there is a uniform algorithm \\( R(1^n, D) \\) such that, for every co-nondeterministic circuit \\( D \\) on \\( n \\) input variables and of size at most \\( n^{\\log n} \\), \\( R(1^n, D) \\) runs in time \\( 2^{O(n)} \\) and outputs a string \\( w \\in \\{0, 1\\}^n \\) such that \\( D(w) \\neq L(w) \\). Then, for every language \\( L' \\in \\mathsf{NP} \\) and for every constant \\( \\varepsilon > 0 \\), we have \\( L' \\in \\mathsf{DTIME}[2^{n^\\varepsilon}] \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.51, + 0.91 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.091, + 0.888, + 0.162 + ], + "angle": 0, + "content": "Proof. Suppose that \\( L \\in \\mathsf{NTIME}[n^d] \\) for some \\( d \\in \\mathbb{N} \\). Let \\( M' \\) be a nondeterministic machine that decides \\( L' \\) and runs in time at most \\( n^{c'} \\), where \\( c' \\in \\mathbb{N} \\). Let \\( \\varepsilon > 0 \\) be an arbitrary constant. Let \\( \\gamma = \\gamma(d, \\varepsilon) > 0 \\) be a small enough constant to be defined later. Finally, let \\( R \\) be the algorithm provided by the hypothesis of the lemma. We show that the following deterministic algorithm \\( B^{\\gamma}(x) \\) decides \\( L' \\) in time \\( O(2^{n^{\\varepsilon}}) \\):" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.179, + 0.404, + 0.196 + ], + "angle": 0, + "content": "Input: \\( x \\in \\{0,1\\}^n \\) for some \\( n \\geq 1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.197, + 0.83, + 0.228 + ], + "angle": 0, + "content": "1 Compute the description of a co-nondeterministic circuit \\( E' \\) of size at most \\( n^{2c'} \\) that decides the complement of \\( L' \\);" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.231, + 0.771, + 0.248 + ], + "angle": 0, + "content": "// In other words, \\( E'(u) = 1 - L'(u) \\) for every string \\( u \\in \\{0,1\\}^n \\)." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.248, + 0.816, + 0.282 + ], + "angle": 0, + "content": "2 Produce the description of a co-nondeterministic circuit \\( D_{x}(y) \\), where \\( y \\in \\{0,1\\}^{n^{\\gamma}} \\), such that \\( D_{x}(y) \\) ignores its input \\( y \\) and computes according to \\( E'(x) \\);" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.283, + 0.836, + 0.332 + ], + "angle": 0, + "content": "// While the length of \\( y \\) is smaller than the length of \\( u \\), \\( D_x \\) and \\( E' \\) share the same nondeterministic input string, and \\( E' \\) sets \\( u \\) to be the fixed string \\( x \\)." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.333, + 0.425, + 0.351 + ], + "angle": 0, + "content": "3 Compute \\( w = R(1^{n^{\\gamma}}, D_x) \\in \\{0, 1\\}^{n^{\\gamma}} \\);" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.352, + 0.666, + 0.368 + ], + "angle": 0, + "content": "4 Determine the bit \\( b = L(w) \\) by a brute force computation, then return \\( b \\);" + }, + { + "type": "list", + "bbox": [ + 0.126, + 0.197, + 0.836, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.26, + 0.373, + 0.68, + 0.389 + ], + "angle": 0, + "content": "Algorithm 2: Algorithm \\( B^{\\gamma}(x) \\) for deciding language \\( L' \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.399, + 0.886, + 0.451 + ], + "angle": 0, + "content": "First, we argue that \\( B^{\\gamma} \\) decides \\( L' \\). Since \\( D_x \\) is a co-nondeterministic circuit over inputs of length \\( m \\triangleq n^{\\gamma} \\) and has size at most \\( n^{2c'} = m^{2c'/\\gamma} \\leq m^{\\log m} \\) (for a large enough \\( m \\)), \\( R(1^{n^{\\gamma}}, D_x) \\) outputs a string \\( w \\in \\{0, 1\\}^{n^{\\gamma}} \\) such that \\( L(w) = 1 - D_x(w) \\). Consequently," + }, + { + "type": "equation", + "bbox": [ + 0.25, + 0.463, + 0.744, + 0.481 + ], + "angle": 0, + "content": "\\[\nb = L (w) = 1 - D _ {x} (w) = 1 - E ^ {\\prime} (x) = 1 - \\left(1 - L ^ {\\prime} (x)\\right) = L ^ {\\prime} (x),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.495, + 0.389, + 0.51 + ], + "angle": 0, + "content": "i.e., the output bit of \\( B^{\\gamma}(x) \\) is correct." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.512, + 0.888, + 0.616 + ], + "angle": 0, + "content": "Next, we argue that \\( B^{\\gamma} \\) runs in time at most \\( O(2^{n^{\\varepsilon}}) \\). Clearly, Steps 1-2 run in \\( \\mathrm{poly}(n) \\) time. Moreover, Step 3 runs in time \\( 2^{O(n^{\\gamma})} \\) under the assumption on the running time of \\( R(1^{n^{\\gamma}}, D_x) \\). This is at most \\( 2^{n^{\\varepsilon}} \\) if we set \\( \\gamma \\leq \\varepsilon / 2 \\). Finally, since \\( L \\in \\mathsf{NTIME}[n^d] \\), the brute force computation in Step 4 can be performed in deterministic time \\( 2^{O(\\ell^d)} \\) over an input of length \\( \\ell \\). Since \\( \\ell = n^{\\gamma} = |w| \\) in our case, if \\( \\gamma \\leq \\varepsilon / 2d \\) we get that Step 4 runs in time at most \\( 2^{n^{\\varepsilon}} \\). Overall, if we set \\( \\gamma \\triangleq \\varepsilon / 2d \\), it follows that \\( B^{\\gamma} \\) runs in time at most \\( O(2^{n^{\\varepsilon}}) \\). This completes the proof that \\( L' \\in \\mathsf{DTIME}[2^{n^{\\varepsilon}}] \\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.628, + 0.888, + 0.697 + ], + "angle": 0, + "content": "The proof of Item (ii) is significantly more sophisticated, since one does not get an algorithm \\( R \\) as above from a \\( \\mathrm{PV}_1 \\) proof of the lower bound sentence \\( \\mathsf{LB}^1(s_1, s_2, m, n_0) \\). The argument combines a witnessing theorem for sentences with more than four quantifier alternations and an ingenious technique from [Kra11] that relies on ideas from the theory of computational pseudorandomness." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.707, + 0.845, + 0.724 + ], + "angle": 0, + "content": "Open Problem 5.8. Strengthen the unprovability result from Theorem 5.6 in the following directions:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.735, + 0.848, + 0.753 + ], + "angle": 0, + "content": "(a) show that it holds in the polynomial size regime, i.e., with \\( s_1(n) = n^a \\) and for some \\( s_2(n) = n^b \\);" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.764, + 0.797, + 0.781 + ], + "angle": 0, + "content": "(b) establish the unprovability of worst-case lower bounds against nondeterministic circuits;" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.792, + 0.761, + 0.809 + ], + "angle": 0, + "content": "(c) show the unprovability of average-case lower bounds against deterministic circuits;" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.821, + 0.577, + 0.837 + ], + "angle": 0, + "content": "(d) establish the same result with respect to a stronger theory." + }, + { + "type": "list", + "bbox": [ + 0.126, + 0.735, + 0.848, + 0.837 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.849, + 0.718, + 0.866 + ], + "angle": 0, + "content": "We refer to [LO23, CLO24a, CLO24b] for some related results and partial progress." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.113, + 0.092, + 0.409, + 0.108 + ], + "angle": 0, + "content": "5.2.2 Extended Frege Lower Bounds" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.119, + 0.884, + 0.168 + ], + "angle": 0, + "content": "This section covers a result on the unprovability of super-polynomial size extended Frege \\((e\\mathcal{F})\\) lower bounds in \\(\\mathrm{PV}_1\\) [KP89] (see also [CU93, Bus90]). We refer to Section 3.2 for the necessary background. We will also need the definitions and results from Section 3.3." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.17, + 0.884, + 0.204 + ], + "angle": 0, + "content": "We adapt the presentation from [Kra19]. Consider the theory \\(\\mathsf{PV}_1\\) and its language \\(\\mathcal{L}_{\\mathsf{PV}}\\). We shall use the following \\(\\mathcal{L}_{\\mathsf{PV}}\\) formulas:" + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.214, + 0.88, + 0.245 + ], + "angle": 0, + "content": "- \\(\\operatorname{Sat}(x, y)\\): a quantifier-free formula formalizing that \\(y\\) is a satisfying assignment of the Boolean formula \\(x\\);" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.258, + 0.373, + 0.275 + ], + "angle": 0, + "content": "- \\(\\operatorname{Taut}(x) \\triangleq \\forall y \\leq x \\operatorname{Sat}(x, y)\\);" + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.287, + 0.703, + 0.303 + ], + "angle": 0, + "content": "Proof \\(P(x,z)\\) : a quantifier-free formula formalizing that \\(z\\) is a \\(P\\) -proof of \\(x\\)" + }, + { + "type": "list", + "bbox": [ + 0.141, + 0.214, + 0.88, + 0.303 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.314, + 0.562, + 0.332 + ], + "angle": 0, + "content": "The following lemma is central to the unprovability result." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.341, + 0.884, + 0.375 + ], + "angle": 0, + "content": "Lemma 5.9. Let \\( M \\models \\mathsf{PV}_1 \\), and assume that \\( \\phi \\in M \\) is a propositional formula. The following statements are equivalent:" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.386, + 0.395, + 0.402 + ], + "angle": 0, + "content": "(i) There is no eF-proof of \\(\\phi\\) in \\(M\\):" + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.415, + 0.617, + 0.432 + ], + "angle": 0, + "content": "\\[\nM \\models \\forall z \\neg \\operatorname {P r o o f} _ {e \\mathcal {F}} (\\phi , z).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.449, + 0.71, + 0.466 + ], + "angle": 0, + "content": "(ii) There is an extension \\(M^{\\prime}\\supseteq M\\) (also a model of \\(\\mathsf{PV}_1\\)) in which \\(\\phi\\) is falsified:" + }, + { + "type": "equation", + "bbox": [ + 0.439, + 0.478, + 0.602, + 0.497 + ], + "angle": 0, + "content": "\\[\nM ^ {\\prime} \\vDash \\exists y \\operatorname {S a t} (\\neg \\phi , y).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.514, + 0.884, + 0.548 + ], + "angle": 0, + "content": "The proof of Lemma 5.9 proceeds by compactness and uses that the correctness of the propositional translation from \\(\\mathsf{PV}_1\\) to \\(e\\mathcal{F}\\) (Section 3.2) is also provable in \\(\\mathsf{PV}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.558, + 0.884, + 0.593 + ], + "angle": 0, + "content": "Lemma 5.10. Let \\( M \\) be a nonstandard countable model of \\( \\mathsf{PV}_1 \\). Then it has a cofinal extension \\( M' \\supseteq_{\\mathrm{cf}} M \\) (also a model of \\( \\mathsf{PV}_1 \\)) such that every tautology in \\( M' \\) has an eF-proof in \\( M' \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.602, + 0.884, + 0.652 + ], + "angle": 0, + "content": "The proof of Lemma 5.10 iterates Lemma 5.9 while taking cuts to ensure that the limit extension \\( M' = \\bigcup_{i} M_i \\) (where \\( M_0 = M \\)) is cofinal in \\( M \\). Since each \\( M_i \\models \\mathsf{PV}_1 \\) and \\( \\mathsf{PV}_1 \\) is universal, we also have \\( M' \\models \\mathsf{PV}_1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.141, + 0.654, + 0.578, + 0.67 + ], + "angle": 0, + "content": "We will need the following analogue of Lemma 3.6 for \\(\\mathsf{PV}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.681, + 0.885, + 0.732 + ], + "angle": 0, + "content": "Fact 5.11. Let \\( M_0 \\) be a nonstandard countable model of \\( \\mathsf{PV}_1 \\). Then there is a (countable) cut \\( M \\) of \\( M_0 \\) that is a (nonstandard) model of \\( \\mathsf{PV}_1 \\) and a length \\( n \\in M \\), where \\( n = |a| \\) for some nonstandard \\( a \\in M \\), such that for every \\( b \\in M \\) we have \\( M \\models |b| \\leq n^k \\) for some standard number \\( k \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.743, + 0.884, + 0.775 + ], + "angle": 0, + "content": "The next result is a consequence of the existence of nonstandard countable models, Fact 5.11, and Lemma 5.10." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.786, + 0.714, + 0.803 + ], + "angle": 0, + "content": "Lemma 5.12. There is a model \\(M^{*}\\) of \\(\\mathsf{PV}_1\\) such that the following properties hold:" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.814, + 0.488, + 0.83 + ], + "angle": 0, + "content": "(i) Any tautology in \\(M^{*}\\) has an eF-proof in \\(M^{*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.841, + 0.882, + 0.875 + ], + "angle": 0, + "content": "(ii) There is a nonstandard element \\(a \\in M^*\\) of length \\(n \\triangleq |a|\\) such that for any element \\(b \\in M^*\\) there is a standard number \\(k\\) such that \\(M^* \\models |b| \\leq n^k\\)." + }, + { + "type": "list", + "bbox": [ + 0.126, + 0.814, + 0.882, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.508, + 0.91 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.092, + 0.888, + 0.125 + ], + "angle": 0, + "content": "Theorem 5.13 (Unprovability of super-polynomial size \\( e\\mathcal{F} \\) lower bounds in \\( \\mathrm{PV}_1 \\) [KP89]). Consider the sentence" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.125, + 0.772, + 0.145 + ], + "angle": 0, + "content": "\\[\n\\Psi_ {e \\mathcal {F}} \\triangleq \\forall x \\exists \\phi \\geq x [ \\operatorname {T a u t} (\\phi) \\wedge \\forall \\pi (| \\pi | \\leq | \\phi | \\# | \\phi | \\rightarrow \\neg \\operatorname {P r o o f} _ {e \\mathcal {F}} (\\phi , \\pi)) ]. ^ {1 2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.152, + 0.418, + 0.168 + ], + "angle": 0, + "content": "The sentence \\(\\Psi_{e\\mathcal{F}}\\) is not provable in \\(\\mathsf{PV}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.179, + 0.886, + 0.267 + ], + "angle": 0, + "content": "Proof. Suppose \\(\\mathsf{PV}_1 \\vdash \\Psi_{e\\mathcal{F}}\\). Let \\(M^*\\), \\(a\\), and \\(n \\triangleq |a|\\) be as in Lemma 5.12. Since \\(\\Psi_{e\\mathcal{F}}\\) holds in \\(M^*\\), there is a tautology \\(\\phi \\in M^*\\) with \\(\\phi \\geq a\\) and consequently \\(|\\phi| \\geq n\\) such that \\(\\phi\\) does not have an \\(e\\mathcal{F}\\)-proof of size \\(|\\phi|\\# |\\phi|\\) in \\(M^*\\). On the other hand, by the two properties of \\(M^*\\) given by Lemma 5.12, the formula \\(\\phi\\) has an \\(e\\mathcal{F}\\)-proof of size at most \\(n^k\\) for some standard number \\(k\\). Finally, since the element \\(a\\) is nonstandard, we have \\(n^k \\leq n\\# n \\leq |\\phi|\\# |\\phi|\\) in \\(M^\\star\\). This contradiction implies that \\(\\mathsf{PV}_1\\) does not prove \\(\\Psi_{e\\mathcal{F}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.276, + 0.886, + 0.311 + ], + "angle": 0, + "content": "Open Problem 5.14. Show that \\(\\mathsf{PV}_1\\) cannot prove fixed-polynomial size lower bounds on the length of \\(e\\mathcal{F}\\) proofs." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.321, + 0.714, + 0.34 + ], + "angle": 0, + "content": "Open Problem 5.15. Establish the unprovability of the sentence \\(\\Psi_{e\\mathcal{F}}\\) in theory \\(\\mathsf{S}_2^1\\)." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.358, + 0.624, + 0.377 + ], + "angle": 0, + "content": "5.3 Connection Between Upper Bounds and Lower Bounds" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.386, + 0.886, + 0.437 + ], + "angle": 0, + "content": "In this section, we explain a result from [BKO20] showing that the unprovability of \\(\\mathsf{P} = \\mathsf{NP}\\) (Open Problem 5.3) is related to the unprovability of circuit lower bounds. For a \\(\\mathsf{PV}_1\\) function symbol \\(h\\) and a circuit size parameter \\(k\\in \\mathbb{N}\\), consider the sentence" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.448, + 0.597, + 0.469 + ], + "angle": 0, + "content": "\\[\n\\mathsf {L B} _ {k} ^ {a. e.} (h) \\triangleq \\neg \\mathsf {U B} _ {k} ^ {i. o.} [ h ],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.48, + 0.884, + 0.515 + ], + "angle": 0, + "content": "where \\(\\mathsf{UB}_k^{i.o.}[h]\\) is the sentence defined in Section 5.1.1. The sentence \\(\\mathsf{LB}_k^{a.e.}(h)\\) states that the language defined by \\(h\\) is hard on input length \\(n\\) for circuits of size \\(n^k\\) whenever \\(n\\) is sufficiently large." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.526, + 0.886, + 0.579 + ], + "angle": 0, + "content": "Theorem 5.16 (Unprovability of \\(\\mathsf{P} = \\mathsf{NP}\\) in \\(\\mathsf{PV}_1\\) from the unprovability of lower bounds in \\(\\mathsf{PV}_1\\) [BKO20]). If there exists \\(k\\in \\mathbb{N}\\) such that for no function symbol \\(h\\) theory \\(\\mathsf{PV}_1\\) proves the sentence \\(\\mathsf{LB}_k^{a.e.}(h)\\), then for no function symbol \\(f\\) theory \\(\\mathsf{PV}_1\\) proves the sentence \\(\\varphi_{\\mathsf{P} = \\mathsf{NP}}(f)\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.588, + 0.884, + 0.641 + ], + "angle": 0, + "content": "Theorem 5.16 shows that if \\(\\mathrm{PV}_1\\) does not prove \\(n^k\\)-size lower bounds for a language in \\(\\mathsf{P}\\), then \\(\\mathsf{P} \\neq \\mathsf{NP}\\) is consistent with \\(\\mathrm{PV}_1\\). Note that the hypothesis of Theorem 5.16 is weaker than the assumption that \\(\\mathrm{PV}_1\\) does not prove that \\(\\mathsf{NP} \\not\\subsetneq \\mathsf{SIZE}[n^k]\\) for some \\(k\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.65, + 0.886, + 0.752 + ], + "angle": 0, + "content": "Sketch of the proof of Theorem 5.16. We proceed in the contrapositive. We formalize in \\(\\mathsf{PV}_1\\) the result that if \\(\\mathsf{P} = \\mathsf{NP}\\), then for any parameter \\(k\\), \\(\\mathsf{P} \\not\\subsetneq\\) i.o. \\(\\mathsf{SIZE}[n^k]\\) (see, e.g., [Lip94, Theorem 3]). This result combines the collapse of \\(\\mathsf{PH}\\) to \\(\\mathsf{P}\\) with Kannan's argument [Kan82] that \\(\\mathsf{PH}\\) can define languages that are almost-everywhere hard against circuits of fixed-polynomial size. Typically, proving this claim requires showing the existence of a truth table of size \\(2^n\\) that is hard against circuits of size \\(n^k\\). However, this result might not be provable in \\(\\mathsf{PV}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.753, + 0.886, + 0.823 + ], + "angle": 0, + "content": "We address this issue as follows. From the provability in \\(\\mathsf{PV}_1\\) that \\(\\mathsf{P} = \\mathsf{NP}\\), it follows that for each \\(i\\geq 1\\) theory \\(\\mathsf{T}_2^i\\) collapses to \\(\\mathsf{PV}_1\\) [KPT91]. Recall that the dual weak pigeonhole principle (dWPHP) for \\(\\mathcal{L}_{\\mathsf{PV}}\\)-functions is provable in \\(\\mathsf{T}_2^2\\). Define a \\(\\mathsf{PV}_1\\) function symbol \\(g\\) that takes as input a circuit \\(C\\) of size \\(n^k\\) and outputs the lexicographic first \\(n^{k + 1}\\) bits of the truth table computed by \\(C\\). From dWPHP\\((g)\\), we now" + }, + { + "type": "page_footnote", + "bbox": [ + 0.112, + 0.831, + 0.886, + 0.876 + ], + "angle": 0, + "content": "12 Recall from Section 2.1 that \\( x \\# y \\triangleq 2^{|x| \\cdot |y|} \\). Consequently, if we let \\( n = |\\phi| \\), then the bound \\( |\\pi| \\leq |\\phi| \\# |\\phi| \\) translates to \\( |\\pi| \\leq n \\# n \\), where \\( n \\# n = 2^{|n| \\cdot |n|} \\) is of order \\( n^{\\log n} \\). The proof of Theorem 5.13 works with any reasonable formalization that refers to a super-polynomial size bound." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.51, + 0.91 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.091, + 0.884, + 0.159 + ], + "angle": 0, + "content": "derive in \\(\\mathsf{PV}_1\\) that the prefix of some truth table is not computable by circuits of size \\(n^k\\), if \\(n\\) is sufficiently large. We can implicitly extend this truth table prefix with zeroes and use the resulting truth table to define a \\(\\mathsf{PV}_1\\)-formula \\(\\varphi(x)\\) with a constant number of bounded quantifiers that defines a language \\(L\\) that is hard against circuits of size \\(n^k\\), where the hardness is provable in \\(\\mathsf{PV}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.16, + 0.884, + 0.212 + ], + "angle": 0, + "content": "Given that the provability in \\(\\mathsf{PV}_1\\) that \\(\\mathsf{P} = \\mathsf{NP}\\) implies the provability in \\(\\mathsf{PV}_1\\) that \\(\\mathsf{PH}\\) collapses to \\(\\mathsf{P}\\), it follows that \\(\\varphi(x)\\) is equivalent in \\(\\mathsf{PV}_1\\) to the language defined by some \\(\\mathcal{L}_{\\mathsf{PV}}\\)-function \\(h\\). In other words, \\(\\mathsf{PV}_1 \\vdash \\mathsf{LB}_k^{a.e.}(h)\\), which completes the proof of Theorem 5.16." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.223, + 0.884, + 0.273 + ], + "angle": 0, + "content": "[CLO24b] shows an example of a simple lower bound that is not provable in \\(\\mathrm{PV}_1\\), under a plausible cryptographic assumption. This indicates that Theorem 5.16 might offer a viable approach towards a solution to Open Problem 5.3." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.299, + 0.482, + 0.32 + ], + "angle": 0, + "content": "6 Additional Recent Developments" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.333, + 0.884, + 0.417 + ], + "angle": 0, + "content": "The provability of the dual Weak Pigeonhole Principle (dWPHP) for polynomial-time functions is closely related to the provability of exponential circuit lower bounds for a language in deterministic exponential time [Jef07]. [Kra21] showed that dWPHP cannot be proved in \\(\\mathsf{PV}_1\\) under the assumption that \\(\\mathsf{P} \\subseteq \\mathsf{SIZE}[n^k]\\) for some constant \\(k\\). [ILW23] established the same unprovability result assuming subexponentially secure indistinguishability obfuscation and coNP \\(\\not\\subset\\) i.o.AM." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.418, + 0.884, + 0.487 + ], + "angle": 0, + "content": "[ABM23] established the unprovability of NEXP \\(\\subseteq\\) SIZE[poly] in the theory of bounded arithmetic \\(V_2^0\\) (not covered in this survey). Interestingly, their approach does not employ a witnessing theorem. It proceeds instead by simulating a comprehension axiom scheme assuming the provability of the upper bound sentence, eventually relying on an existing lower bound on the provability of the pigeonhole principle." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.487, + 0.884, + 0.589 + ], + "angle": 0, + "content": "[CLO24b] systematically investigates the reverse mathematics of complexity lower bounds. They demonstrated that various lower bound statements in communication complexity, error-correcting codes, and for Turing machines are equivalent to well-studied combinatorial principles, such as the weak pigeon-hole principle for polynomial-time functions and its variants. Consequently, complexity lower bounds can be regarded as fundamental axioms with significant implications. They use these equivalences to derive conditional results on the unprovability of simple lower bounds in \\(\\mathsf{APC}_1\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.589, + 0.884, + 0.674 + ], + "angle": 0, + "content": "\\(\\left[\\mathrm{CKK}^{+}24\\right]\\) investigates the provability of the circuit size hierarchy in bounded arithmetic, captured by a sentence CSH stating that for each \\(n \\geq n_0\\), there is a circuit of size \\(n^a\\) that does not admit an equivalent circuit of size \\(n^b\\), where \\(a > b > 1\\) and \\(n_0\\) are fixed. They showed that CSH is provable in \\(\\mathsf{T}_2^2\\), while its provability in \\(\\mathsf{T}_2^1\\) implies that \\(\\mathsf{P}^{\\mathsf{NP}} \\not\\subsetneq \\mathsf{SIZE}[n^{1 + \\varepsilon}]\\) for some \\(\\varepsilon > 0\\). Thus a better proof complexity upper bound for the circuit size hierarchy yields new circuit lower bounds." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.675, + 0.884, + 0.709 + ], + "angle": 0, + "content": "[CRT24] establishes the unprovability of NP \\(\\neq\\) PSPACE in \\(\\mathsf{APC}_1\\) (with a LogLog formalization) under a strong average-case hardness assumption." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.71, + 0.884, + 0.777 + ], + "angle": 0, + "content": "[Kra24] offers a comprehensive reference on proof complexity generators, whose investigation is closely related to dWPHP and its provability in bounded arithmetic. The theory of proof complexity generators offers tautologies that serve as potential candidates for demonstrating super-polynomial extended Frege lower bounds and consequently the unprovability of \\( \\mathsf{P} = \\mathsf{NP} \\) in \\( \\mathsf{PV}_1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.778, + 0.884, + 0.864 + ], + "angle": 0, + "content": "We have not covered a number of results connected to the meta-mathematics of complexity lower bounds developed in the context of propositional proof complexity (see, e.g., [Raz15, Kra19, AR23, Kra24] and references therein). It is worth noting that results on the non-automatability of weak proof systems such as [AM20, dRGN\\(^{+}\\)21] were made possible thanks to the investigation of the meta-mathematics of proof complexity." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.888, + 0.144 + ], + "angle": 0, + "content": "Finally, several other recent papers have investigated directions connected to bounded arithmetic and the meta-mathematics of complexity theory, e.g., [PS22, Kha22, PS23, AKPS24, LLR24]. Due to space constraints, we are not able to cover all recent developments in this survey." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.157, + 0.885, + 0.234 + ], + "angle": 0, + "content": "Acknowledgements. I would like to thank Noel Arteche, Jinqiao Hu, Jan Krajicek, Moritz Müller, Mykyta Narusevych, Ján Pich, and Dimitrios Tsintsilidas for their valuable comments and feedback on an earlier version of this survey. This work received support from the Royal Society University Research Fellowship URF\\R1\\191059; the UKRI Frontier Research Guarantee EP/Y007999/1; and the Centre for Discrete Mathematics and its Applications (DIMAP) at the University of Warwick." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.256, + 0.227, + 0.274 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.287, + 0.884, + 0.318 + ], + "angle": 0, + "content": "[AB09] Sanjeev Arora and Boaz Barak. Computational Complexity - A Modern Approach. Cambridge University Press, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.328, + 0.885, + 0.361 + ], + "angle": 0, + "content": "[ABM23] Albert Atserias, Samuel R. Buss, and Moritz Müller. On the consistency of circuit lower bounds for non-deterministic time. In Symposium on Theory of Computing (STOC), pages 1257-1270, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.37, + 0.883, + 0.416 + ], + "angle": 0, + "content": "[AKPS24] Noel Arteche, Erfan Khaniki, Jan Pich, and Rahul Santhanam. From proof complexity to circuit complexity via interactive protocols. In International Colloquium on Automata, Languages, and Programming (ICALP), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.426, + 0.884, + 0.445 + ], + "angle": 0, + "content": "[AM20] Albert Atserias and Moritz Müller. Automating resolution is NP-hard. J. ACM, 67(5):31:1-31:17, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.454, + 0.885, + 0.486 + ], + "angle": 0, + "content": "[AR23] Per Austrin and Kilian Risse. Sum-of-squares lower bounds for the minimum circuit size problem. In Computational Complexity Conference (CCC), pages 31:1-31:21, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.495, + 0.885, + 0.527 + ], + "angle": 0, + "content": "[AW09] Scott Aaronson and Avi Wigderson. Algebraization: A new barrier in complexity theory. Transactions on Computation Theory (TOCT), 1(1), 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.537, + 0.885, + 0.569 + ], + "angle": 0, + "content": "[Bey09] Olaf Beyersdorff. On the correspondence between arithmetic theories and propositional proof systems – a survey. Mathematical Logic Quarterly, 55(2):116–137, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.136, + 0.578, + 0.885, + 0.609 + ], + "angle": 0, + "content": "[BGS75] Theodore P. Baker, John Gill, and Robert Solovay. Relativizatons of the \\( \\mathsf{P} = ? \\) NP Question. SIAM J. Comput., 4(4):431-442, 1975." + }, + { + "type": "ref_text", + "bbox": [ + 0.122, + 0.619, + 0.885, + 0.651 + ], + "angle": 0, + "content": "[BKKK20] Sam R. Buss, Valentine Kabanets, Antonina Kolokolova, and Michal Koucký. Expander construction in VNC1. Annals of Pure and Applied Logic, 171(7):102796, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.134, + 0.661, + 0.885, + 0.693 + ], + "angle": 0, + "content": "[BKO20] Jan Bydzovsky, Jan Krajíček, and Igor C. Oliveira. Consistency of circuit lower bounds with bounded theories. *Logical Methods in Computer Science*, 16(2), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.702, + 0.885, + 0.734 + ], + "angle": 0, + "content": "[BKT14] Samuel R. Buss, Leszek A. Kołodziejczyk, and Neil Thapen. Fragments of approximate counting. Journal of Symbolic Logic, 79(2):496-525, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.744, + 0.885, + 0.775 + ], + "angle": 0, + "content": "[BM20] Jan Bydzovsky and Moritz Müller. Polynomial time ultrapowers and the consistency of circuit lower bounds. Arch. Math. Log., 59(1-2):127-147, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.785, + 0.581, + 0.803 + ], + "angle": 0, + "content": "[Bus86] Samuel R. Buss. Bounded Arithmetic. Bibliopolis, 1986." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.812, + 0.885, + 0.859 + ], + "angle": 0, + "content": "[Bus90] Samuel R. Buss. On model theory for intuitionistic bounded arithmetic with applications to independence results. In *Feasible Mathematics: A Mathematical Sciences Institute Workshop, Ithaca, New York, June 1989*, pages 27-47. Springer, 1990." + }, + { + "type": "list", + "bbox": [ + 0.122, + 0.287, + 0.885, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.141, + 0.092, + 0.885, + 0.124 + ], + "angle": 0, + "content": "[Bus94] Samuel R. Buss. On herbrand's theorem. In Selected Papers from the Logic and Computational Complexity International Workshop (LCC), pages 195-209, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.134, + 0.883, + 0.166 + ], + "angle": 0, + "content": "[Bus97] Samuel R. Buss. Bounded arithmetic and propositional proof complexity. In Logic of Computation, pages 67-121. Springer Berlin Heidelberg, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.174, + 0.883, + 0.207 + ], + "angle": 0, + "content": "\\(\\left[\\mathrm{CHO}^{+}22\\right]\\) Lijie Chen, Shuichi Hirahara, Igor C. Oliveira, Jan Pich, Ninad Rajgopal, and Rahul Santhanam. Beyond natural proofs: Hardness magnification and locality. J. ACM, 69(4):25:1-25:49, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.128, + 0.215, + 0.883, + 0.261 + ], + "angle": 0, + "content": "[CIKK16] Marco L. Carmosino, Russell Impagliazzo, Valentine Kabanets, and Antonina Kolokolova. Learning algorithms from natural proofs. In Conference on Computational Complexity (CCC), pages 10:1-10:24, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.271, + 0.882, + 0.305 + ], + "angle": 0, + "content": "[CJsw21] Lijie Chen, Ce Jin, Rahul Santhanam, and Ryan Williams. Constructive separations and their consequences. In Symposium on Foundations of Computer Science (FOCS), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.311, + 0.885, + 0.344 + ], + "angle": 0, + "content": "[CK07] Stephen A. Cook and Jan Krajček. Consequences of the provability of NP \\(\\subseteq\\) P/poly. Journal of Symbolic Logic, 72(4):1353-1371, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.353, + 0.883, + 0.385 + ], + "angle": 0, + "content": "\\(\\left[\\mathrm{CKK}^{+}24\\right]\\) Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, Igor C. Oliveira, and Dimitrios Tsintsili-das. Provability of the circuit size hierarchy and its consequences. Preprint, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.393, + 0.884, + 0.441 + ], + "angle": 0, + "content": "[CKKO21] Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, and Igor C. Oliveira. Learn-uniform circuit lower bounds and provability in bounded arithmetic. In Symposium on Foundations of Computer Science (FOCS), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.449, + 0.882, + 0.482 + ], + "angle": 0, + "content": "[CLO24a] Lijie Chen, Jiatu Li, and Igor C. Oliveira. On the unprovability of circuit size bounds in intuitionistic \\( S_2^1 \\). Preprint: arXiv:2404.11841, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.129, + 0.491, + 0.882, + 0.523 + ], + "angle": 0, + "content": "[CLO24b] Lijie Chen, Jiatu Li, and Igor C. Oliveira. Reverse mathematics of complexity lower bounds. In Symposium on Foundations of Computer Science (FOCS), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.531, + 0.883, + 0.563 + ], + "angle": 0, + "content": "[CN10] Stephen A. Cook and Phuong Nguyen. Logical Foundations of Proof Complexity. Cambridge University Press, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.573, + 0.882, + 0.605 + ], + "angle": 0, + "content": "[Bcob65] Alan Cobham. The intrinsic computational difficulty of functions. Proc. Logic, Methodology and Philosophy of Science, pages 24-30, 1965." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.614, + 0.882, + 0.646 + ], + "angle": 0, + "content": "[Co075] Stephen A. Cook. Feasibly constructive proofs and the propositional calculus (preliminary version). In Symposium on Theory of Computing (STOC), pages 83-97, 1975." + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.655, + 0.882, + 0.687 + ], + "angle": 0, + "content": "[CRT24] Lijie Chen, Ron D. Rothblum, and Roei Tell. Fiat-Shamir in the plain model from derandomization. Electron. Colloquium Comput. Complex., TR24-116, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.695, + 0.882, + 0.728 + ], + "angle": 0, + "content": "[CU93] Stephen Cook and Alasdair Urquhart. Functional interpretations of feasibly constructive arithmetic. Annals of Pure and Applied Logic, 63(2):103-200, 1993." + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.736, + 0.7, + 0.754 + ], + "angle": 0, + "content": "[Din07] Irit Dinur. The PCP theorem by gap amplification. J. ACM, 54(3):12, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.762, + 0.884, + 0.809 + ], + "angle": 0, + "content": "[dRGN+21] Susanna F. de Rezende, Mika Göös, Jakob Nordström, Toniann Pitassi, Robert Robere, and Dmitry Sokolov. Automating algebraic proof systems is NP-hard. In Symposium on Theory of Computing (STOC), pages 209-222, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.818, + 0.733, + 0.835 + ], + "angle": 0, + "content": "[Gay23] Azza Gaysin. Proof complexity of CSP. ArXiv e-Print arXiv:2201.00913, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.844, + 0.884, + 0.875 + ], + "angle": 0, + "content": "[Gay24] Azza Gaysin. Proof complexity of universal algebra in a CSP dichotomy proof. ArXiv e-Print arXiv:2403.06704, 2024." + }, + { + "type": "list", + "bbox": [ + 0.124, + 0.092, + 0.885, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.144, + 0.091, + 0.837, + 0.11 + ], + "angle": 0, + "content": "[HP93] Petr Hajek and Pavel Pudlák. Metamathematics of first-order arithmetic. Springer-Verlag, 1993." + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.118, + 0.885, + 0.15 + ], + "angle": 0, + "content": "[ILW23] Rahul Ilango, Jiatu Li, and Ryan Williams. Indistinguishability obfuscation, range avoidance, and bounded arithmetic. In Symposium on Theory of Computing (STOC), pages 1076–1089. ACM, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.149, + 0.16, + 0.885, + 0.192 + ], + "angle": 0, + "content": "[Jer04] Emil Jerabek. Dual weak pigeonhole principle, boolean complexity, and derandomization. Annals of Pure and Applied Logic, 129(1-3):1-37, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.149, + 0.201, + 0.885, + 0.234 + ], + "angle": 0, + "content": "[Jef05] Emil Jerabek. Weak pigeonhole principle and randomized computation. PhD thesis, Charles University in Prague, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.15, + 0.244, + 0.885, + 0.274 + ], + "angle": 0, + "content": "[Jer06] Emil Jerabek. The strength of sharply bounded induction. Mathematical Logic Quarterly, 52(6):613-624, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.149, + 0.285, + 0.885, + 0.316 + ], + "angle": 0, + "content": "[Jer07] Emil Jerabek. Approximate counting in bounded arithmetic. Journal of Symbolic Logic, 72(3):959-993, 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.326, + 0.773, + 0.344 + ], + "angle": 0, + "content": "[Juk12] Stasys Jukna. Boolean Function Complexity: Advances and Frontiers. Springer, 2012." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.353, + 0.885, + 0.384 + ], + "angle": 0, + "content": "[Kan82] Ravi Kannan. Circuit-size lower bounds and non-reducibility to sparse sets. Information and Control, 55(1-3):40-56, 1982." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.394, + 0.885, + 0.426 + ], + "angle": 0, + "content": "[Kha22] Erfan Khaniki. Nisan-Wigderson generators in proof complexity: New lower bounds. In Computational Complexity Conference (CCC), pages 17:1-17:15, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.436, + 0.885, + 0.468 + ], + "angle": 0, + "content": "[KO17] Jan Krajíček and Igor C. Oliveira. Unprovability of circuit upper bounds in Cook's theory PV. *Logical Methods in Computer Science*, 13(1), 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.148, + 0.477, + 0.885, + 0.51 + ], + "angle": 0, + "content": "[KP89] Jan Krajíček and Pavel Pudlák. Propositional provability and models of weak arithmetic. In CSL'89: Proceedings of the 3rd Workshop on Computer Science Logic, pages 193-210, 1989." + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.519, + 0.885, + 0.564 + ], + "angle": 0, + "content": "[KPS90] Jan Krajíček, Pavel Pudlák, and Jíří Sgall. Interactive computations of optimal solutions. In International Symposium on Mathematical Foundations of Computer Science (MFCS), volume 452, pages 48-60, 1990." + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.575, + 0.885, + 0.607 + ], + "angle": 0, + "content": "[KPT91] Jan Krajíček, Pavel Pudlák, and Gaisi Takeuti. Bounded arithmetic and the polynomial hierarchy. Annals of Pure and Applied Logic, 52(1-2):143-153, 1991." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.617, + 0.885, + 0.649 + ], + "angle": 0, + "content": "[Kra95] Jan Krajíček. Bounded Arithmetic, Propositional Logic, and Complexity Theory. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 1995." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.659, + 0.885, + 0.691 + ], + "angle": 0, + "content": "[Kra97] Jan Krajicek. Interpolation theorems, lower bounds for proof systems, and independence results for bounded arithmetic. J. Symb. Log., 62(2):457-486, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.7, + 0.885, + 0.732 + ], + "angle": 0, + "content": "[Kra11] Jan Krajicek. On the proof complexity of the Nisan-Wigderson generator based on a hard NP \\(\\cap\\) coNP function. Journal of Mathematical Logic, 11(1), 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.741, + 0.885, + 0.773 + ], + "angle": 0, + "content": "[Kra19] Jan Krajíček. Proof Complexity. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.783, + 0.885, + 0.815 + ], + "angle": 0, + "content": "[Kra21] Jan Krajíček. Small circuits and dual weak PHP in the universal theory of p-time algorithms. ACM Transactions on Computational Logic (TOCL), 22(2):1-4, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.142, + 0.824, + 0.885, + 0.856 + ], + "angle": 0, + "content": "[Kra24] Jan Krajicek. Proof Complexity Generators. Monograph available at https://www.karlin.mff.cuni.cz/~krajicek/gdraft.html, 2024." + }, + { + "type": "list", + "bbox": [ + 0.138, + 0.091, + 0.885, + 0.856 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.149, + 0.091, + 0.885, + 0.123 + ], + "angle": 0, + "content": "[Lê14] Dai Tri Man Lê. Bounded Arithmetic and Formalizing Probabilistic Proofs. PhD thesis, University of Toronto, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.134, + 0.883, + 0.166 + ], + "angle": 0, + "content": "[LC11] Dai Tri Man Le and Stephen A. Cook. Formalizing randomized matching algorithms. Log. Methods Comput. Sci., 8(3), 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.146, + 0.175, + 0.885, + 0.207 + ], + "angle": 0, + "content": "[Lip94] Richard J. Lipton. Some consequences of our failure to prove non-linear lower bounds on explicit functions. In Structure in Complexity Theory Conference (CCC), pages 79-87, 1994." + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.217, + 0.883, + 0.248 + ], + "angle": 0, + "content": "[LLR24] Jiawei Li, Yuhao Li, and Hanlin Ren. Meta-mathematics of resolution lower bounds: A TFNP perspective. Preprint, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.259, + 0.882, + 0.29 + ], + "angle": 0, + "content": "[LO23] Jiatu Li and Igor C. Oliveira. Unprovability of strong complexity lower bounds in bounded arithmetic. In Symposium on Theory of Computing (STOC), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.3, + 0.884, + 0.33 + ], + "angle": 0, + "content": "[McK10] Richard McKinley. A sequent calculus demonstration of Herbrand's theorem. arXiv preprint arXiv:1007.3414, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.341, + 0.884, + 0.373 + ], + "angle": 0, + "content": "[MP20] Moritz Müller and Ján Pich. Feasibly constructive proofs of succinct weak circuit lower bounds. Annals of Pure and Applied Logic, 171(2), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.383, + 0.882, + 0.414 + ], + "angle": 0, + "content": "[MPW02] Alexis Maciel, Toniann Pitassi, and Alan R. Woods. A new proof of the weak pigeonhole principle. Journal of Computer and System Sciences, 64(4):843-872, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.424, + 0.875, + 0.441 + ], + "angle": 0, + "content": "[Oja04] Kerry Ojakian. Combinatorics in Bounded Arithmetic. PhD thesis, Carnegie Mellon University, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.451, + 0.879, + 0.467 + ], + "angle": 0, + "content": "[Par71] Rohit Parikh. Existence and feasibility in arithmetic. Journal of Symbolic Logic, 36(3):494-508, 1971." + }, + { + "type": "ref_text", + "bbox": [ + 0.141, + 0.478, + 0.882, + 0.507 + ], + "angle": 0, + "content": "[Pic15a] Jan Pich. Circuit lower bounds in bounded arithmetics. Annals of Pure and Applied Logic, 166(1):29-45, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.139, + 0.519, + 0.883, + 0.55 + ], + "angle": 0, + "content": "[Pic15b] Jan Pich. Logical strength of complexity theory and a formalization of the PCP theorem in bounded arithmetic. *Logical Methods in Computer Science*, 11(2), 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.15, + 0.56, + 0.884, + 0.592 + ], + "angle": 0, + "content": "[PS21] Jan Pich and Rahul Santhanam. Strong co-nondeterministic lower bounds for NP cannot be proved feasibly. In Symposium on Theory of Computing (STOC), pages 223-233, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.151, + 0.602, + 0.885, + 0.634 + ], + "angle": 0, + "content": "[PS22] Jan Pich and Rahul Santhanam. Learning algorithms versus automatability of Frege systems. In International Colloquium on Automata, Languages, and Programming (ICALP), pages 101:1-101:20, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.151, + 0.644, + 0.884, + 0.675 + ], + "angle": 0, + "content": "[PS23] Ján Pich and Rahul Santhanam. Towards \\(\\mathrm{P} \\neq \\mathrm{NP}\\) from extended Frege lower bounds. *Electron. Colloquium Comput. Complex.*, TR23-199, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.144, + 0.685, + 0.885, + 0.73 + ], + "angle": 0, + "content": "[Pud06] Pavel Pudlák. Consistency and games - in search of new combinatorial principles. In V. Stoltenberg-Hansen and J. Väätänen, editors, Logic Colloquium '03, volume 24 of Lecture Notes in Logic, pages 244-281. ASL, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.13, + 0.741, + 0.883, + 0.773 + ], + "angle": 0, + "content": "[PWW88] Jeff B. Paris, A. J. Wilkie, and Alan R. Woods. Provability of the pigeonhole principle and the existence of infinitely many primes. J. Symb. Log., 53(4):1235-1244, 1988." + }, + { + "type": "ref_text", + "bbox": [ + 0.136, + 0.783, + 0.884, + 0.814 + ], + "angle": 0, + "content": "[Raz95a] Alexander A. Razborov. Bounded arithmetic and lower bounds in boolean complexity. In P. Clote and J. Remmel, editors, Feasible Mathematics II, pages 344-386. Birkhäuser, 1995." + }, + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.825, + 0.884, + 0.855 + ], + "angle": 0, + "content": "[Raz95b] Alexander A Razborov. Unprovability of lower bounds on circuit size in certain fragments of bounded arithmetic. Izvestiya: mathematics, 59(1):205, 1995." + }, + { + "type": "list", + "bbox": [ + 0.13, + 0.091, + 0.885, + 0.855 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.509, + 0.91 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.141, + 0.091, + 0.884, + 0.124 + ], + "angle": 0, + "content": "[Raz15] Alexander A. Razborov. Pseudorandom generators hard for \\( k \\)-DNF resolution and polynomial calculus resolution. Annals of Mathematics, pages 415-472, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.134, + 0.885, + 0.166 + ], + "angle": 0, + "content": "[RR97] Alexander A. Razborov and Steven Rudich. Natural proofs. Journal of Computer and System Sciences, 55(1):24-35, 1997." + }, + { + "type": "ref_text", + "bbox": [ + 0.143, + 0.175, + 0.882, + 0.207 + ], + "angle": 0, + "content": "[Sub61] Bella A. Subbotovskaya. Realization of linear functions by formulas using \\(+, \\cdot, -\\). In Soviet Math. Dokl, 1961." + }, + { + "type": "ref_text", + "bbox": [ + 0.144, + 0.217, + 0.883, + 0.248 + ], + "angle": 0, + "content": "[SW14] Rahul Santhanam and Ryan Williams. On uniformity and circuit lower bounds. Computational Complexity, 23(2):177-205, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.147, + 0.259, + 0.884, + 0.289 + ], + "angle": 0, + "content": "[TC21] Iddo Tzameret and Stephen A. Cook. Uniform, integral, and feasible proofs for the determinant identities. J. ACM, 68(2):12:1-12:80, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.138, + 0.3, + 0.884, + 0.331 + ], + "angle": 0, + "content": "[Woo81] Alan R. Woods. Some problems in logic and number theory and their connections. PhD thesis, University of Manchester, 1981." + }, + { + "type": "ref_text", + "bbox": [ + 0.144, + 0.341, + 0.884, + 0.373 + ], + "angle": 0, + "content": "[WP87] Alex J. Wilkie and Jeff B. Paris. On the scheme of induction for bounded arithmetic formulas. Ann. Pure Appl. Log., 35:261-302, 1987." + }, + { + "type": "list", + "bbox": [ + 0.138, + 0.091, + 0.885, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.899, + 0.508, + 0.91 + ], + "angle": 0, + "content": "28" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_origin.pdf b/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..880ce7bc12c4018f3d4a60f2433efbcc84e52677 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/4ab48d87-f77d-4021-9081-0dbea7a7ea19_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:851016c794f07664b7459734d810b3b5777f5abd0eaad32c3cfa7e3fbad686bd +size 550032 diff --git a/data/2025/2504_04xxx/2504.04416/full.md b/data/2025/2504_04xxx/2504.04416/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7ff66d968f4e07bdfc7ccab4c73cde47f80c88d0 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/full.md @@ -0,0 +1,836 @@ +# SIGACT News Complexity Theory Column + +# Meta-Mathematics of Computational Complexity Theory + +Igor C. Oliveira1 + +![](images/9070a982ca734a985fe9029753fd5664fa385b28312c9e502177b89de18d5c68.jpg) + +# Abstract + +We survey results on the formalization and independence of mathematical statements related to major open problems in computational complexity theory. Our primary focus is on recent findings concerning the (un)provability of complexity bounds within theories of bounded arithmetic. This includes the techniques employed and related open problems, such as the (non)existence of a feasible proof that $\mathsf{P} = \mathsf{NP}$ . + +# Contents + +1 Introduction 2 +2 Preliminaries 3 + +2.1 Complexity Theory 3 +2.2 Theories of Bounded Arithmetic 3 + +2.2.1 $\mathrm{PV}_1$ 4 +2.2.2 $\mathbf{S}_2^1,\mathbf{T}_2^1$ , and Beyond 4 +2.2.3 $\mathsf{APC}_1$ 6 + +3 Auxiliary Definitions and Results 6 + +3.1 Witnessing Theorems 6 +3.2 Bounded Arithmetic and Propositional Proofs 7 +3.3 Cuts of Models of Bounded Arithmetic 8 + +4 The Strength of Bounded Arithmetic 9 + +4.1 Formalization of Results from Algorithms and Complexity 9 +4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in $\mathsf{PV}_1$ 10 + +5 Unprovability of Complexity Bounds 14 + +5.1 Unprovability of Upper Bounds 14 + +5.1.1 LEARN-Uniform Circuits and Unprovability 14 +5.1.2 $\mathsf{P} = \mathsf{NP}$ and Propositional Proof Complexity 17 + +5.2 Unprovability of Lower Bounds 18 + +5.2.1 Average-Case Circuit Lower Bounds 18 +5.2.2 Extended Frege Lower Bounds 21 + +5.3 Connection Between Upper Bounds and Lower Bounds 22 + +6 Additional Recent Developments 23 + +# 1 Introduction + +The investigation of the inherent complexity of computational tasks is a central research direction in theoretical computer science. While unconditional results are known in a variety of restricted contexts (i.e., with respect to weak models of computation), despite significant efforts, several central questions of the field remain wide open. Prominent examples include the relation between complexity classes P and NP, understanding the power of non-uniform Boolean circuits, and bounding the length of proofs in propositional proof systems such as Frege and extended Frege. + +The investigation of the difficulty of settling these problems has long been an important and influential area of research by itself (e.g., barrier results such as [BGS75, RR97, AW09, $\mathrm{CHO}^{+}22$ ]). Unfortunately, these results tend to be ad-hoc and do not consider a standard and robust notion of proof. In order to build a general theory, several works have considered provability in the usual sense of mathematical logic. Most importantly, this enables a deeper investigation of complexity theory that considers not only the running time of a program or the size of a circuit but also the feasibility of proving their existence and correctness. In particular, we can explore the fundamental question of what can and cannot be feasibly computed, along with the meta-question of what lower and upper bounds can and cannot be feasibly proven. + +A fundamental goal of this research is to + +$(\star)$ identify a suitable logical theory capable of formalizing most, if not all, known results in algorithms and complexity, and determine whether the major open problems mentioned above are provable or unprovable within this theory.2 + +Although we are still far from reaching this goal, progress has been made in understanding the (un)provability of statements concerning the complexity of computations within certain fragments of Peano Arithmetic, collectively known as Bounded Arithmetic. These theories are designed to capture proofs that manipulate and reason with concepts from a specified complexity class. For instance, a proof by induction whose inductive hypothesis can be expressed as an NP predicate is one such example. The earliest theory of this kind was $\mathsf{I}\Delta_0$ , introduced by Parikh [Par71], who explored the intuitive concept of feasibility in arithmetic and addressed the infeasibility of exponentiation. The relationship between Parikh's theory and computational complexity was fully recognized and advanced by Paris and Wilkie in a series of influential papers during the 1980s (see [WP87]). Other significant theories include Cook's theory $\mathsf{PV}_1$ [Coo75], which formalizes polynomial-time reasoning; Jerabek's theory $\mathsf{APC}_1$ [Jer04, Jer05, Jer07], which extends $\mathsf{PV}_1$ by incorporating the dual weak pigeonhole principle for polynomial-time functions and formalizes probabilistic polynomial-time reasoning; and Buss's theories $\mathsf{S}_2^i$ and $\mathsf{T}_2^i$ [Bus86], which include induction principles corresponding to various levels of the polynomial-time hierarchy. + +These theories are capable of formalizing advanced results. For instance, it is known that $\mathrm{PV}_1$ can prove the PCP Theorem [Pic15b], while $\mathrm{APC}_1$ can establish several significant circuit lower bounds [MP20], including monotone circuit lower bounds for $k$ -Clique and bounded-depth circuit lower bounds for the Parity function. Further examples include the explicit construction of expander graphs [BKKK20] and the correctness of randomized polynomial-time matching algorithms [LC11], among many others. + +Given the expressive power of these theories, even if we are not yet able to establish a breakthrough result of the magnitude of $(\star)$ , determining the (un)provability of complexity bounds of interest in theories of bounded arithmetic still represents significant progress towards our understanding of the power and limits of feasible computations and proofs. This survey aims to provide an introduction to some of these results, + +the underlying techniques, and related open problems. While our primary focus is on recent developments, in order to provide a broader perspective we also cover some classical results. Due to space limitations, the survey is not exhaustive, and several references had to be omitted (although some recent developments are mentioned in Section 6). + +# 2 Preliminaries + +# 2.1 Complexity Theory + +We will rely on a few additional standard definitions from complexity theory, such as basic complexity classes, Boolean circuits and formulas, and propositional proof systems. These can be found in textbooks such as [AB09] and [Kra19]. Below we only establish notation and review a classical result that offers a convenient way to talk about polynomial-time computations in some logical theories. + +We use $\mathsf{SIZE}[s]$ to denote the set of languages computed by Boolean circuits of size $s(n)$ . + +In theoretical computer science, one typically considers functions and predicates that operate over binary strings. This is equivalent to operations on integers, by identifying each non-negative integer with its binary representation. Let $\mathbb{N}$ denote the set of non-negative integers. For $a\in \mathbb{N}$ , we let $|a|\triangleq \lceil \log_2(a + 1)\rceil$ denote the length of the binary representation of $a$ . For a constant $k\geq 1$ , we say that a function $f\colon \mathbb{N}^k\to \mathbb{N}$ is computable in polynomial time if $f(x_{1},\ldots ,x_{k})$ can be computed in time polynomial in $|x_{1}|,\ldots ,|x_{k}|$ . (For convenience, we might write $|\vec{x} |\triangleq |x_1|,\dots ,|x_k|.$ ) Recall that FP denotes the set of polynomial time functions. While the definition of polynomial time refers to a machine model, FP can also be introduced in a machine independent way as the closure of a set of base functions under composition and limited recursion on notation. In more detail, we can consider the following class $\mathcal{F}$ of base functions: + +$$ +\begin{array}{l} c (x) \triangleq 0, \quad s (x) \triangleq x + 1, \quad a (x) \triangleq \lfloor x / 2 \rfloor , \quad d (x) \triangleq 2 \cdot x, \quad \pi_ {\ell} ^ {i} (x _ {1}, \ldots , x _ {\ell}) \triangleq x _ {i}, \quad x \# y \triangleq 2 ^ {| x | \cdot | y |}, \\ x \leq y \triangleq \left\{ \begin{array}{l l} 1 & \text {i f} x \leq y \\ 0 & \text {o t h e r w i s e ,} \end{array} \right. \quad \text {C h o i c e} (x, y, z) \triangleq \left\{ \begin{array}{l l} y & \text {i f} x > 0 \\ z & \text {o t h e r w i s e .} \end{array} \right. \\ \end{array} +$$ + +We say that a function $f(\vec{x},y)$ is defined from functions $g(\vec{x})$ , $h(\vec{x},y,z)$ , and $k(\vec{x},y)$ by limited recursion on notation if + +$$ +f (\vec {x}, 0) = g (\vec {x}) +$$ + +$$ +f (\vec {x}, y) = h (\vec {x}, y, f (\vec {x}, \lfloor y / 2 \rfloor)) +$$ + +$$ +f (\vec {x}, y) \leq k (\vec {x}, y) +$$ + +for every sequence $(\vec{x},y)$ of natural numbers. Cobham [Cob65] proved that FP is the least class of functions that contains $\mathcal{F}$ and is closed under composition and limited recursion on notation. + +# 2.2 Theories of Bounded Arithmetic + +Bounded arithmetic has a long and rich history (see [Bus97] for an introduction, and [HP93, Kra95, CN10] for a detailed treatment). The correspondence between the theories and complexity classes manifests in multiple ways. For instance, witnessing results show that every provably total function in a given theory $\mathsf{T}_{\mathcal{C}}$ (i.e., when $\forall x \exists!y \psi(x,y)$ is provable, for certain formulas $\psi$ ) is computable within the corresponding complexity class $\mathcal{C}$ (i.e., the function $y = f(x)$ is in $\mathcal{C}$ ). There is also a close connection between + +theories of bounded arithmetic and propositional proof systems, e.g., propositional translations between proofs of certain sentences in $\mathsf{PV}_1$ or $\mathsf{S}_2^1$ and polynomial-size proofs in the extended Frege proof system of the corresponding propositional formulas. We review some related results in Section 3.1 and Section 3.2, respectively. In this section, we provide an overview of some widely investigated theories of bounded arithmetic: $\mathsf{PV}_1$ , $\mathsf{S}_2^1$ , $\mathsf{T}_2^1$ , and $\mathsf{APC}_1$ . We assume basic familiarity with first-order logic. Results claimed below without reference can be found in [Kra95]. + +# 2.2.1 PV + +$\mathsf{PV}_1$ [Coo75] (see also [KPT91]) is a first-order theory whose intended model is the set $\mathbb{N}$ of natural numbers, together with the standard interpretation for constants and functions symbols such as $0, +, \times, \text{etc.}$ . The vocabulary (language) of $\mathsf{PV}_1$ , denoted $\mathcal{L}_{\mathsf{PV}_1}$ , contains a function symbol for each polynomial-time algorithm $f: \mathbb{N}^k \to \mathbb{N}$ (where $k$ is any constant). These function symbols, and the axioms defining them, are obtained through Cobham's characterization of polynomial-time functions discussed in Section 2.1. + +$\mathrm{PV}_1$ also postulates an induction axiom scheme that simulates binary search, and one can show that it admits induction over quantifier-free formulas (i.e., polynomial-time predicates). We discuss induction axioms in more detail in Section 2.2.2. + +We will use later in the text that $\mathsf{PV}_1$ admits a formulation where all axioms are universal formulas (i.e., $\forall \vec{x}\phi (\vec{x})$ , where $\phi$ is a quantifier-free formula). In other words, $\mathsf{PV}_1$ is a universal theory. + +While the details of the definition of $\mathrm{PV}_1$ are fairly technical (see, e.g., the longer overview in [CLO24b] or the exposition in [Kra95]), such details are often not needed. In particular, $\mathrm{PV}_1$ has an equivalent formalization that does not require Cobham's result [Jef06]. + +# 2.2.2 $\mathsf{S}_2^1,\mathsf{T}_2^1$ , and Beyond + +While $\mathrm{PV}_1$ can be related to polynomial-time computations and feasible proofs, Buss [Bus86] introduced a hierarchy of theories with close ties to the different levels of the polynomial hierarchy. To specify the theories, we will need a few definitions. + +The language $\mathcal{L}_{\mathsf{B}}$ of these theories contains the predicate symbols $=$ and $\leq$ , the constant symbols 0 and 1, and function symbols $S$ (successor), $+$ , $\cdot$ , $\lfloor x / 2 \rfloor$ , $|x|$ (interpreted as the length of $x$ as in Section 2.1), and $\#$ ("smash"; interpreted as $x \# y = 2^{|x| \cdot |y|}$ ). + +A bounded quantifier is a quantifier of the form $Qy \leq t$ , where $Q \in \{\exists, \forall\}$ and $t$ is a term not involving $y$ . Similarly, a sharply bounded quantifier is one of the form $Qy \leq |t|$ . Formally, such quantifiers are simply abbreviations. For instance, + +$$ +\forall y \leq t (\vec {x}) \varphi (\vec {x}, y) \triangleq \forall y (y \leq t (\vec {x}) \rightarrow \varphi (\vec {x}, y)), a n d +$$ + +$$ +\exists y \leq t (\vec {x}) \varphi (\vec {x}, y) \triangleq \exists y (y \leq t (\vec {x}) \wedge \varphi (\vec {x}, y)). +$$ + +A formula where each quantifier appears bounded (resp., sharply bounded) is said to be a bounded (resp., sharply bounded) formula. It is not hard to show that every sharply bounded formula defines a polynomial-time predicate over the standard model $\mathbb{N}$ under its usual operations. On the other hand, bounded quantifiers allow us to define predicates in NP, coNP, and beyond. + +We can introduce a hierarchy of formulas by counting alternations of bounded quantifiers. The class $\Pi_0^b = \Sigma_0^b$ contains the sharply bounded formulas. We then recursively define, for each $i\geq 1$ , the classes $\Sigma_i^b$ and $\Pi_{i}^{b}$ according to the quantifier structure of the sentence, ignoring the appearance of sharply bounded quantifiers. For instance, if $\varphi \in \Sigma_0^b$ and $\psi \triangleq \exists y\leq t(\vec{x})\varphi (y,\vec{x})$ , then $\psi \in \Sigma_1^b$ (see, e.g., [Kra95] for the + +technical details in the general case). As alluded to above, it is known that, for each $i \geq 1$ , a predicate $P(\vec{x})$ is in $\Sigma_i^p$ (the $i$ -th level of the polynomial hierarchy) if and only if there is a $\Sigma_i^b$ -formula that agrees with it over $\mathbb{N}$ . + +The theories introduced by Buss share a common set BASIC of finitely many axioms postulating the expected arithmetic behavior of the constants, predicates, and function symbols, e.g., $x + y = y + x$ and $|1| = 1$ (see, e.g., [Kra95, Page 68] for the complete list). The only difference among the theories is the kind of induction axiom scheme that each of them postulates. + +Theory $\mathsf{T}_2^1$ . This is a theory in the language $\mathcal{L}_{\mathbb{B}}$ extending BASIC by the induction axiom IND + +$$ +\varphi (0) \wedge \forall x (\varphi (x) \rightarrow \varphi (x + 1)) \rightarrow \forall x \varphi (x) +$$ + +for all $\Sigma_1^b$ -formulas $\varphi(a)$ . The formula $\varphi(a)$ may contain other free variables in addition to $a$ . + +We say that $\mathsf{T}_2^1$ supports induction for NP predicates. Intuitively, this means that we can aim to prove a result in $\mathsf{T}_2^1$ by induction, provided the induction hypothesis is defined by a predicate computable in NP. This definition can be extended to a theory that postulates induction for $\Sigma_i^b$ -formulas, which gives rise to the theory $\mathsf{T}_2^i$ . + +Theory $S_2^1$ . This is a theory in the language $\mathcal{L}_{\mathsf{B}}$ extending BASIC by the polynomial induction axiom PIND + +$$ +\varphi (0) \wedge \forall x (\varphi (\lfloor x / 2 \rfloor) \rightarrow \varphi (x)) \rightarrow \forall x \varphi (x) +$$ + +for all $\Sigma_1^b$ -formulas $\varphi(a)$ . The formula $\varphi(a)$ may contain other free variables in addition to $a$ . + +Intuitively, polynomial induction reduces the proof of $\varphi(x)$ to proving $\varphi(\lfloor x/2 \rfloor)$ . Unlike the standard induction axiom, this approach allows us to reach the base case in just $\mathrm{poly}(n)$ steps when starting with an integer $x$ represented by $\mathrm{poly}(n)$ bits. This has implications for the efficiency of translating certain proofs in $\mathsf{S}_2^1$ into sequences of propositional proofs and for the extraction of polynomial-time algorithms from proofs (see Section 3.1 and Section 3.2). Analogously to $\mathsf{T}_2^i$ , we can define the theories $\mathsf{S}_2^i$ via polynomial induction for $\Sigma_i^b$ -formulas. + +It is known that $\mathsf{PV}_1$ is essentially equivalent to $\mathsf{T}_2^0$ under an appropriate vocabulary and axioms [Jer'06], and that $\mathsf{S}_2^i \subseteq \mathsf{T}_2^i \subseteq \mathsf{S}_2^{i+1}$ for every $i \geq 1$ . + +When stating and proving results in $\mathsf{S}_2^1$ , it is convenient to employ a more expressive vocabulary under which any polynomial-time function can be easily described. Moreover, it is possible to achieve this in a conservative way, i.e., without increasing the power of the theory. In more detail, let $\Gamma$ be a set of $\mathcal{L}_{\mathsf{B}}$ -formulas. We say that a polynomial-time function $f\colon \mathbb{N}^k\to \mathbb{N}$ is $\Gamma$ -definable in $\mathsf{S}_2^1$ if there is a formula $\psi (\vec{x},y)\in \Gamma$ for which the following conditions hold: + +(i) For every $a \in \mathbb{N}^k$ , $f(\vec{a}) = b$ if and only if $\mathbb{N} \models \varphi(\vec{a}, b)$ . +(ii) $\mathsf{S}_2^1\vdash \forall \vec{x}\left(\exists y\left(\varphi (\vec{x},y)\land \forall z\left(\varphi (\vec{x},z)\to y = z\right)\right). \right.$ + +Every function $f \in \mathsf{FP}$ is $\Sigma_1^b$ -definable in $S_2^1$ . By adding all functions in $\mathsf{FP}$ to the vocabulary of $S_2^1$ and by extending $S_2^1$ with their defining axioms (i.e., $\forall x \varphi(x, f(x))$ ), we obtain a theory $S_2^1(\mathcal{L}_{\mathsf{PV}})$ that can refer to polynomial-time predicates using quantifier-free formulas. $S_2^1(\mathcal{L}_{\mathsf{PV}})$ proves the polynomial induction scheme for both $\Sigma_1^b$ -formulas and $\Pi_1^b$ -formulas in the extended vocabulary. $S_2^1(\mathcal{L}_{\mathsf{PV}})$ is conservative over $S_2^1$ , in the sense that any $\mathcal{L}_{\mathsf{B}}$ -sentence provable in $S_2^1(\mathcal{L}_{\mathsf{PV}})$ is also provable in $S_2^1$ . + +A $\forall \Sigma_{i}^{b}$ -sentence is simply a sentence $\psi = \forall \vec{x} \varphi(\vec{x})$ where $\varphi \in \Sigma_{i}^{b}$ . Every $\forall \Sigma_{1}^{b}$ -sentence provable in $S_{2}^{1}(\mathcal{L}_{\mathsf{PV}})$ is also provable in $\mathsf{PV}_1$ . In other words, $S_{2}^{1}(\mathcal{L}_{\mathsf{PV}})$ is $\forall \Sigma_{1}^{b}$ -conservative over $\mathsf{PV}_1$ . On the other hand, it is known that if $S_{2}^{1}(\mathcal{L}_{\mathsf{PV}}) = \mathsf{PV}_1$ , then the polynomial-time hierarchy collapses. + +# 2.2.3 APC + +In order to formalize probabilistic methods and randomized algorithms, Jeřábek [Jeř04, Jeř05, Jeř07] formulated the theory $\mathsf{APC}_1$ (this terminology is from [BKT14]) by extending $\mathsf{PV}_1$ with the dual Weak Pigeonhole Principle (dWPHP) for $\mathsf{PV}_1$ functions: + +$$ +\mathsf {A P C} _ {1} \triangleq \mathsf {P V} _ {1} \cup \{\mathsf {d W P H P} (f) \mid f \in \mathcal {L} _ {\mathsf {P V}} \}. +$$ + +Informally, each sentence $\mathrm{dWPHP}(f)$ postulates that, for every length $n = |N|$ , there is $y < (1 + 1/n) \cdot N$ such that $f(x) \neq y$ for every $x < N$ . + +It is known that the dual Weak Pigeonhole Principle for polynomial-time predicates can be proved in $\mathsf{T}_2^2$ [MPW02], and consequently $\mathsf{APC}_1 \subseteq \mathsf{T}_2^2(\mathcal{L}_{\mathsf{PV}})$ . + +# 3 Auxiliary Definitions and Results + +# 3.1 Witnessing Theorems + +Suppose a sentence $\psi$ of a certain syntactic form admits a proof in a theory $T$ over a vocabulary $\mathcal{L}$ . A witnessing theorem allows us to extract computational information from any such proof, by showing that an existential quantifier in $\psi$ can be witnessed by $\mathcal{L}$ -terms. The simplest example of such a result is stated next. + +Theorem 3.1 (Herbrand's Theorem (see, e.g., [Bus94, McK10])). Let $T$ be a universal theory over a vocabulary $\mathcal{L}$ . Let $\varphi(x,y)$ be a quantifier-free $\mathcal{L}$ -formula, and suppose that $T \vdash \forall x \exists y \varphi(x,y)$ . There is a constant $k \geq 1$ and $\mathcal{L}$ -terms $t_1(x),\ldots ,t_k(x)$ such that + +$$ +T \vdash \varphi (x, t _ {1} (x)) \lor \varphi (x, t _ {2} (x)) \lor \dots \lor \varphi (x, t _ {k} (x)). +$$ + +As an immediate consequence, if we apply Theorem 3.1 to $T \triangleq \mathrm{PV}_1$ , we obtain $\mathcal{L}_{\mathrm{PV}}$ -terms (corresponding to polynomial-time functions over $\mathbb{N}$ ) such that, given $a \in \mathbb{N}$ , at least one of them produces a witness $b \in \mathbb{N}$ such that $\mathbb{N} \models \varphi(a, b)$ . + +Next, we consider the provability of more complex sentences in a universal theory. + +Theorem 3.2 (KPT Theorem [KPT91]). Let $T$ be a universal theory with vocabulary $\mathcal{L}$ , $\varphi(w, u, v)$ be a quantifier-free $\mathcal{L}$ -formula, and suppose that $T \vdash \forall w \exists u \forall v \varphi(w, u, v)$ . Then there exist a constant $k \geq 1$ and $\mathcal{L}$ -terms $t_1, \ldots, t_k$ such that + +$$ +T \vdash \varphi (w, t _ {1} (w), v _ {1}) \vee \varphi (w, t _ {2} (w, v _ {1}), v _ {2}) \vee \dots \vee \varphi (w, t _ {k} (w, v _ {1}, \dots , v _ {k - 1}), v _ {k}), +$$ + +where the notation $t_i(w, v_1, \ldots, v_{i-1})$ indicates that these are the only variables occurring in $t_i$ . + +Theorem 3.2 has a natural interpretation as an interactive game with finitely many rounds, which we revisit in Section 5.1.1 in the context of the provability of circuit upper bounds. + +A similar form of Theorem 3.2 holds under the provability of a $\forall \exists \forall \exists$ -sentence (see, e.g., $\mathrm{[CKK^{+}24]}$ for a concrete application in the context of circuit lower bounds). In contrast, there is no straightforward analogue of the KPT Theorem for a larger number of quantifier alternations. In this case, more general formulations are needed, such as the ones considered in [Pud06, BKT14, LO23]. + +It is also possible to establish witnessing theorems for theories that are not universal. This can be done either by first transforming the theory into a universal theory through the inclusion of new function symbols and quantifier elimination, or via direct approaches (see, e.g., [Kra95, Section 7.3]). Another example is Buss's Theorem for $S_2^1$ , which can be used to show that every $\forall \Sigma_1^b$ -sentence provable in $S_2^1(\mathcal{L}_{\mathsf{PV}})$ is also provable in $\mathsf{PV}_1$ . This has two implications. First, we can combine this result with Theorem 3.1, which yields polynomial-time algorithms from proofs of $\forall \Sigma_1^b$ -sentences in $S_2^1(\mathcal{L}_{\mathsf{PV}})$ . Second, this means that in some situations we can establish the provability of a sentence in $\mathsf{PV}_1$ using the more convenient theory $S_2^1(\mathcal{L}_{\mathsf{PV}})$ (see Section 4.2 for an example). + +# 3.2 Bounded Arithmetic and Propositional Proofs + +In this section, we explain a connection between $\mathsf{PV}_1$ and the extended Frege proof system discovered by [Coo75]. In short, it says that if a universal $\mathcal{L}_{\mathsf{PV}}$ -sentence $\phi(x)$ is provable in $\mathsf{PV}_1$ , then there is a translation of $\phi(x)$ into a sequence $\{G_n\}_{n \geq 1}$ of propositional formulas $G_n(p_1, \ldots, p_n)$ such that each $G_n$ has an extended Frege proof $\pi_n$ of size polynomial in $n$ .4 + +First, we review some concepts and fix notation, deferring the details to a standard textbook (e.g., [Kra19]). Recall that a propositional formula $G(p_{1},\ldots ,p_{n})$ is formed using variables $p_1,\dots ,p_n$ , constants 0 and 1, and logical connectives $\land ,\lor$ , and $\neg$ . A Frege $(\mathcal{F})$ proof system is a "textbook" style proof system for propositional logic. It can be formulated as a finite set of axiom schemes together with the modus ponens rule. $\mathcal{F}$ is known to be sound and complete. The size of a Frege proof is the total number of symbols occurring in the proof. In the extended Frege $(e\mathcal{F})$ proof system, we also allow repeated subformulas appearing in a proof to be abbreviated via new variables. + +Cook's Translation [Coo75]. Let $\varphi$ be a universal $\mathcal{L}_{\mathrm{PV}}$ -sentence of the form $\varphi \triangleq \forall x \psi(x)$ , where $\psi(x)$ is a quantifier-free formula. Cook [Coo75] established that if $\varphi$ is provable in $\mathrm{PV}_1$ , then there is a sequence $\{G_n\}_{n \geq 1}$ of propositional tautologies such that + +- Each $G_{n}(p_{1},\ldots ,p_{n})$ is a polynomial-size formula. +- $G_{n}$ encodes that $\psi(x)$ is true whenever $|x| \leq n$ , i.e., over all integers encoded as $n$ -bit strings. +- $G_{n}$ admits polynomial-size $e\mathcal{F}$ -proofs. +- Moreover, the existence of polynomial-size $e\mathcal{F}$ -proofs for each $G_{n}$ is provable in $\mathrm{PV}_1$ . (We will need this additional property of the translation in Section 5.2.2.) + +For a formula $\psi(x)$ as above, we often write $||\psi||_n$ to denote the corresponding propositional formula over inputs of length $n$ . + +For more information about the relation between proofs in bounded arithmetic and propositional proofs, including additional examples of propositional translations, we refer to [Bey09, Kra19]. + +# 3.3 Cuts of Models of Bounded Arithmetic + +Many fundamental results in bounded arithmetic are established using model-theoretic techniques (see, e.g., the exposition of Parikh's Theorem in [Kra95]). We will provide an example in Section 5.2.2. In this section, we include the required background for the result. We assume basic familiarity with model theory. + +While the definitions and results presented below can be adapted to other theories of bounded arithmetic, we focus on the theory $S_2^1$ for concreteness. + +Definition 3.3 (Cut in a Model of Arithmetic). A cut in a model $M$ of $\mathsf{S}_2^1$ is a nonempty set $I \subseteq M$ such that: + +1. For every $a, b \in M$ , if $b \in I$ and $a < b$ then $a \in I$ . +2. For every $a \in M$ , if $a \in I$ then $a + 1 \in I$ . + +In this case, we write $I \subseteq_{e} M$ . + +Note that a cut is not necessarily closed under operations such as addition and multiplication. + +Claim 3.4. Let $M$ be a model of $S_2^1$ , and let $I \subseteq_e M$ . Moreover, assume that $I$ is closed under $+, \cdot$ , and # operations. Let $\varphi(a, \vec{b})$ be a bounded formula with all free variables displayed. Let $\vec{v}$ be elements of $I$ . Then for every $u \in I$ , + +$$ +I \vDash \varphi (u, \vec {v}) \quad \Longleftrightarrow \quad M \vDash \varphi (u, \vec {v}). +$$ + +Claim 3.4 can be proved by induction on the complexity of $\varphi$ . Using the claim, one can establish the following lemma. + +Lemma 3.5. Let $M$ be a model of $\mathsf{S}_2^1$ , and let $I \subseteq_{e} M$ . Moreover, assume that $I$ is closed under $+, \cdot,$ and $\#$ operations. Then $I$ is a model of $\mathsf{S}_2^1$ . + +Since it is not hard to check that a cut $I$ as above satisfies the BASIC axioms of $S_2^1$ , the proof of Lemma 3.5 essentially amounts to verifying that $I$ satisfies the corresponding induction principle (see, e.g., [Kra95, Lemma 5.1.3] for a similar argument). + +For a model $M$ , we say that $n \in M$ is a length if there is $N \in M$ such that $n = |N|$ . + +Lemma 3.6. Let $M_0$ be a nonstandard countable model of $\mathsf{S}_2^1$ . Then there is a (countable) cut $M$ of $M_0$ that is a model of $\mathsf{S}_2^1$ and a length $n \in M$ , where $n = |e|$ for some nonstandard $e \in M$ , for which the following holds. For every $b \in M$ there is a standard number $k$ such that $M \models |b| \leq n^k$ . + +Proof. Let $e \in M_0$ be nonstandard, and let $n \triangleq |e|$ . Consider the set + +$$ +I _ {e} \triangleq \left\{a \in M _ {0} \mid a \leq t (e) \text {f o r s o m e} \mathcal {L} _ {\mathrm {B}} \text {- t e r m} t (x) \right\}, +$$ + +where we compare elements with respect to the interpretation of the relation symbol $\leq$ in $M_0$ . Note that $I_e$ is a cut of $M_0$ and $e \in I_e$ . Moreover, it is not hard to check that it is closed under addition, multiplication, and smash operations. By Lemma 3.5, $I_e$ is a model of $\mathbb{S}_2^1$ . Finally, by construction, for every $b \in I_e$ we have $b \leq t(e)$ for some $\mathcal{L}_{\mathsf{B}}$ -term $t$ . A simple induction on the structure of $t$ shows the existence of a standard number $k$ such that $|b| \leq n^k$ in $I_e$ . + +Finally, we will need the following definition. + +Definition 3.7 (Cofinal extension). We say that an extension $M'$ of a model $M$ is cofinal (or $M$ is cofinal in $M'$ ) if for every $a \in M'$ there is $b \in M$ such that $a \leq b$ in $M'$ . If this is the case, we write $M' \supseteq_{\mathrm{cf}} M$ . + +# 4 The Strength of Bounded Arithmetic + +In connection with the fundamental research goal mentioned in Section 1, research on the provability of complexity bounds has achieved significant progress on two complementary fronts: the formalization of several established results from algorithms and complexity within theories of bounded arithmetic, and the unprovability of complexity bounds in the same theories, often conditional on a computational assumption. + +In Section 4.1, we explore what it means to formalize results from algorithms and complexity theory within the framework of bounded arithmetic, highlighting some of the nuances involved. In Section 4.2, we present some concrete details of the formalization of a formula lower bound in $\mathsf{PV}_1$ . + +# 4.1 Formalization of Results from Algorithms and Complexity + +Several central theorems from mathematics and computer science can be proved in bounded arithmetic. They include results from number theory [Woo81, PWW88], graph theory and extremal combinatorics [Oja04], randomized algorithms and probabilistic arguments [Jer05, LC11, Lé14], probabilistic checkable proofs [Pic15b], circuit lower bounds [MP20], expander graphs [BKKK20], linear algebra [TC21], Zhuk's CSP algorithm [Gay23, Gay24], etc. The reader can find numerous other examples in [CN10, Kra19, MP20] and references therein. + +In some cases, the formalization of an existing result in bounded arithmetic is straightforward, specially once an appropriate framework has been developed (e.g., the approximate counting framework of [Jér07], which enables the use of tools from probability theory in $\mathsf{APC}_1$ ). However, sometimes one needs to discover a new proof whose concepts can be defined in the theory and their associated properties established using the available inductive axioms (e.g., Razborov's formalization of the Switching Lemma [Raz95a]). + +We provide two instructive examples below. The first is a consequence of the formalization of the PCP Theorem in $\mathsf{PV}_1$ , while the second concerns different ways of formulating a circuit lower bound statement in bounded arithmetic. + +The PCP Theorem in $\mathsf{PV}_1$ . Pich [Pic15b] proved the PCP Theorem in $\mathsf{PV}_1$ by formalizing Dinur's proof [Din07]. Exploiting the standard connection between PCPs and hardness of approximation, Pich's result can be used to show that $\mathsf{PV}_1$ establishes the NP-hardness of approximating the value of a $k$ -SAT instance. This means in particular that, for a suitable $\mathcal{L}_{\mathsf{PV}}$ -function symbol $f$ obtained from Dinur's argument, $\mathsf{PV}_1$ proves that $f$ is a gap-inducing reduction from the Boolean Formula Satisfiability Problem to $k$ -SAT (for a sufficiently large $k$ ): + +$$ +\begin{array}{l} \mathrm {P V} _ {1} \vdash \forall \varphi \left(\operatorname {F l a} (\varphi) \wedge \exists y \operatorname {S a t} (\varphi , y) \rightarrow k - C N F (f (\varphi)) \wedge \exists z \operatorname {S a t} (f (\varphi), z)\right) \\ \mathrm {P V} _ {1} \vdash \forall \varphi \left(\operatorname {F l a} (\varphi) \wedge \forall y \neg \operatorname {S a t} (\varphi , y) \rightarrow k - \operatorname {C N F} (f (\varphi)) \wedge \forall z \operatorname {V a l u e} _ {\leq 1 - \delta} (f (\varphi), z)\right) \\ \end{array} +$$ + +where all the expressions are quantifier-free $\mathcal{L}_{\mathrm{PV}}$ -formulas: $\mathsf{Fla}(x)$ checks if $x$ is a valid description of a Boolean formula, $k$ -CNF(x) checks if $x$ is a valid description of a $k$ -CNF, $\mathsf{Sat}(u,v)$ checks if $v$ is a satisfying assignment for $u$ , and $\mathsf{Value}_{\leq 1 - \delta}(u,v)$ holds if $v$ satisfies at most a $(1 - \delta)$ -fraction of the clauses in $u$ (with $\delta > 0$ being a universal constant from the formalized Dinur's proof). + +In the formalization the key point is that $\mathsf{PV}_1$ proves that the function symbol $f$ behaves as expected. In practice, in order to achieve this, a typical formalization is presented in a semi-formal way, and might claim on a few occasions that some algorithm $f_1$ constructed in a particular way from another algorithm $f_2$ can be defined in $\mathsf{PV}_1$ . This means that $\mathsf{PV}_1$ proves that $f_1$ behaves as described in the definition. + +This is possible thanks to Cobham's characterization of FP and the axioms of $\mathrm{PV}_1$ , which ensure that the theory "understands" how different algorithms are constructed from one another. In many cases, the verification that $\mathrm{PV}_1$ proves the desired properties is straightforward but tedious, requiring some initial setup of basic capabilities of $\mathrm{PV}_1$ (often referred to as "bootstrapping") which is part of the standard background in bounded arithmetic. + +Circuit Lower Bound Statements. We discuss two ways of formalizing a complexity lower bound. In this example, for a given size bound $s(n)$ (e.g., $s(n) = n^2$ ), we consider an $\mathcal{L}_{\mathrm{PV}}$ -sentence $\mathsf{FLB}_s^\oplus$ stating that Boolean formulas for the parity function on $n$ bits require at least $s(n)$ leaves: + +$$ +\forall N \forall n \forall F (n = | N | \wedge n \geq 1 \wedge \mathsf {F l a} (F) \wedge \mathsf {S i z e} (F) < s (n) \rightarrow \exists x (| x | \leq n \wedge \mathsf {E v a l} (F, x) \neq \oplus (x)), +$$ + +where we identify $n$ -bit strings with natural numbers of length at most $n$ , and employ a well-behaved $\mathcal{L}_{\mathrm{PV}}$ -function symbol $\oplus$ such that $\mathrm{PV}_1$ proves the basic properties of the parity function, e.g., $\mathrm{PV}_1 \vdash \oplus (x1) = 1 - \oplus (x)$ .6 + +Note that $\mathsf{FLB}_s^\oplus$ is a $\forall \Sigma_1^b$ -sentence. Consequently, if $\mathsf{PV}_1 \vdash \mathsf{FLB}_s^\oplus$ , we obtain via Herbrand's Theorem (Theorem 3.1) a polynomial-time algorithm $A$ that, when given $N$ of length $n$ and the description of an $n$ -bit formula $F$ of size $< s(n)$ , $A(N,F)$ outputs a string $x \in \{0,1\}^n$ such that $F(x) \neq \oplus(x)$ . In other words, circuit lower bounds provable in $\mathsf{PV}_1$ are constructive in the sense that they also provide an efficient refuter witnessing that $F$ does not compute parity (see [CJSW21] for more on this topic). + +The aforementioned formalization is informally referred to as a "Log" formalization of circuit lower bounds. This is because the main parameter $n$ is the length of a variable $N$ and all objects quantified over are of length polynomial in $n$ . It is also possible to consider a formalization where $n = ||N||$ ( $n$ is the length of the length of $N$ ), which is known as a "LogLog" formalization. This allows us to quantify over exponentially larger objects, e.g., under such a formalization the entire truth-table of a formula $F$ has length polynomial in the length of $N$ . + +Obtaining a Log formalization (e.g., [MP20]) is a stronger result than obtaining a LogLog formalization (e.g., [Raz95a]). In particular, in contrast to the discussion above, a witnessing theorem applied to a LogLog formalization provides a refuter with access to $N$ and thus running in time $\mathrm{poly}(N) = \mathrm{poly}(2^n)$ . Conversely, the unprovability of a LogLog circuit lower bound statement (e.g., [PS21, LO23]) is a stronger result than the unprovability of a Log statement. We refer to the introduction of [MP20] for a more extensive discussion on this matter. + +# 4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in $\mathsf{PV}_1$ + +In this section, we explore some details of a formalization in $\mathrm{PV}_1$ that the parity function $\oplus$ on $n$ bits requires Boolean formulas of size $\geq n^{3/2}$ [Sub61]. We follow the notation introduced in Section 4.1. + +$$ +\text {T h e o r m 4 . 1} \left(\left[ C K K ^ {+} 2 4 \right]\right). L e t s (n) \triangleq n ^ {3 / 2}. T h e n P V _ {1} \vdash F L B _ {s} ^ {\oplus}. +$$ + +The formalization is an adaptation of the argument presented in [Juk12, Section 6.3], which proceeds as follows: + +1. [Juk12, Lemma 6.8]: For any formula $F$ on $n$ -bit inputs, it is possible to fix one of its variables so that the resulting formula $F_{1}$ satisfies $\mathrm{Size}(F_1) \leq (1 - 1 / n)^{3 / 2} \cdot \mathrm{Size}(F)$ . + +2. [Juk12, Theorem 6.10]: If we apply this result $\ell \triangleq n - k$ times, we obtain a formula $F_{\ell}$ on $k$ -bit inputs such that + +$$ +\operatorname {S i z e} (F _ {\ell}) \leq \operatorname {S i z e} (F) \cdot (1 - 1 / n) ^ {3 / 2} \cdot (1 - 1 / (n - 1)) ^ {3 / 2} \dots (1 - 1 / (k + 1)) ^ {3 / 2} = \operatorname {S i z e} (F) \cdot (k / n) ^ {3 / 2}. +$$ + +3. [Juk12, Example 6.11]: Finally, if the initial formula $F$ computes the parity function, by setting $\ell = n - 1$ we get $1 \leq \operatorname{Size}(F_{\ell}) \leq (1/n)^{3/2} \cdot \operatorname{Size}(F)$ , and consequently $\operatorname{Size}(F) \geq n^{3/2}$ . + +We present the argument in a more constructive way when formalizing the result in $\mathrm{PV}_1$ . In more detail, given a small formula $F$ , we recursively construct (and establish correctness by induction) an $n$ -bit input $y$ witnessing that $F$ does not compute the parity function. + +Proof. We follow closely the presentation from $\left[\mathrm{CKK}^{+}24\right]$ . For brevity, we only discuss the formalization of the main inductive argument. More details can be found in $\left[\mathrm{CKK}^{+}24\right]$ . Given $b \in \{0,1\}$ , we introduce the function $\oplus^b(x) \triangleq \oplus(x) + b \pmod{2}$ . In order to prove $\mathsf{FLB}_s^\oplus$ in $\mathsf{PV}_1$ , we explicitly consider a polynomial-time function $R(1^n, F, b)$ with the following property: + +If $\operatorname{Size}(F) < s(n)$ then $R(1^n, F, b)$ outputs an $n$ -bit string $y_n^b$ such that $\operatorname{Eval}(F, y_n^b) \neq \oplus^b(y_n^b)$ . + +In other words, $R(1^n,F,b)$ witnesses that the formula $F$ does not compute the function $\oplus^b$ over $n$ -bit strings. Note that the correctness of $R$ is captured by a sentence $\operatorname{Ref}_{R,s}$ described as follows: + +$$ +\forall 1 ^ {n} \forall F (\mathsf {F l a} (F) \wedge \mathsf {S i z e} (F) < s (n) \rightarrow | y _ {n} ^ {0} | _ {\ell} = | y _ {n} ^ {1} | _ {\ell} = n \wedge F (y _ {n} ^ {0}) \neq \oplus^ {0} (y _ {n} ^ {0}) \wedge F (y _ {n} ^ {1}) \neq \oplus^ {1} (y _ {n} ^ {1})) +$$ + +where we employ the abbreviations $y_{n}^{0} \triangleq R(1^{n}, F, 0)$ and $y_{n}^{1} \triangleq R(1^{n}, F, 1)$ , and for convenience use $|z|_{\ell}$ to denote the bitlength of $z$ . Our plan is to define $R$ and show that $\mathsf{PV}_1 \vdash \mathsf{Ref}_{R,s}$ . Note that this implies $\mathsf{FLB}_s^{\oplus}$ in $\mathsf{PV}_1$ by standard first-order logic reasoning. + +The correctness of $R(1^n, F, b)$ will be established by polynomial induction on $N$ (equivalently, induction on $n = |N|$ ). Since $\operatorname{Ref}_{R,s}$ is a universal sentence and $S_2^1(\mathcal{L}_{\mathsf{PV}})$ is $\forall \Sigma_1^b$ -conservative over $\mathsf{PV}_1$ (i.e., provability of such a sentence in $S_2^1(\mathcal{L}_{\mathsf{PV}})$ implies its provability in $\mathsf{PV}_1$ ), it is sufficient to describe a formalization in the more convenient theory $S_2^1(\mathcal{L}_{\mathsf{PV}})$ . For this reason, polynomial induction for NP and coNP predicates (admissible in $S_2^1(\mathcal{L}_{\mathsf{PV}})$ ; see, e.g., [Kra95, Section 5.2]) is available during the formalization. More details follow. + +The procedure $R(1^n, F, b)$ makes use of a few polynomial-time sub-routines (briefly discussed in the comments in the pseudocode below) and is defined in the following way: + +Input: $1^n$ for some $n \geq 1$ , formula $F$ over $n$ -bit inputs, $b \in \{0,1\}$ . + +1 Let $s(n) \triangleq n^{3/2}$ . If $\operatorname{Size}(F) \geq s(n)$ or $\neg \mathsf{Fla}(F)$ return "error"; +2 If $\operatorname{Size}(F) = 0$ , $F$ computes a constant function $b_{F} \in \{0,1\}$ . In this case, return the $n$ -bit string $y_{n}^{b} \triangleq y_{1}^{b} 0^{n-1}$ such that $\oplus^{b}(y_{1}^{b} 0^{n-1}) \neq b_{F}$ ; +3 Let $\widetilde{F} \triangleq \text{Normalize}(1^n, F)$ ; // $\widetilde{F}$ satisfies the conditions in the proof of [Juk12, Claim 6.9], $\text{Size}(\widetilde{F}) \leq \text{Size}(F)$ , $\forall x \in \{0, 1\}^n F(x) = \widetilde{F}(x)$ . +4 Let $\rho \triangleq \text{Find-Restriction}(1^n, \widetilde{F})$ , where $\rho: [n] \to \{0, 1, \star\}$ and $|\rho^{-1}(\star)| = n - 1$ ; // $\rho$ restricts a suitable variable $x_i$ to a bit $c_i$ , as in [Juk12, Lemma 6.8]. +5 Let $F' \triangleq \text{Apply-Restriction}(1^n, \widetilde{F}, \rho)$ . Moreover, let $b' \triangleq b \oplus c_i$ and $n' \triangleq n - 1$ ; // $F'$ is an $n'$ -bit formula; $\forall z \in \{0, 1\}^{\rho^{-1}(\star)} F'(z) = \widetilde{F}(z \cup x_i \mapsto c_i)$ . +6 Let $y_{n'}^{b'} \triangleq R(1^{n'}, F', b')$ and return the $n$ -bit string $y_n^b \triangleq y_{n'}^{b'} \cup y_i \mapsto c_i$ ; + +Algorithm 1: Refuter Algorithm $R(1^n, F, b)$ [CKK+24]. + +(The pseudocode presented above is only an informal specification of $R(1^n, F, b)$ . As mentioned in Section 4.1, a completely formal proof in $\mathsf{PV}_1$ would employ Cobham's formalism and would specify how $R(1^n, F, b)$ can be defined from previously defined algorithms (e.g., Apply-Restriction) via the allowed operations.) + +We note that $R(1^n, F, b)$ runs in time polynomial in $n + |F| + |b|$ and that it is definable in $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}})$ . Next, as an instructive example, we establish the correctness $R(1^n, F, b)$ in $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}})$ by polynomial induction (PIND) for $\Pi_1^b$ -formulas, assuming that the subroutines appearing in the pseudocode of $R(1^n, F, b)$ satisfy the necessary properties (provably in $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}})$ ). + +Lemma 4.2. Let $s(n) \triangleq n^{3/2}$ . Then $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}}) \vdash \mathsf{Ref}_{R,s}$ . + +Proof. We consider the formula $\varphi(N)$ defined as + +$$ +\begin{array}{l} \forall F \forall n (n = | N | \wedge n \geq 1 \wedge \operatorname {F l a} (F) \wedge \operatorname {S i z e} (F) < s (n)) \rightarrow \\ \left(\left| y _ {n} ^ {0} \right| _ {\ell} = \left| y _ {n} ^ {1} \right| _ {\ell} = n \wedge F \left(y _ {n} ^ {0}\right) \neq \oplus^ {0} \left(y _ {n} ^ {0}\right) \wedge F \left(y _ {n} ^ {1}\right) \neq \oplus^ {1} \left(y _ {n} ^ {1}\right)\right), \\ \end{array} +$$ + +where as before we use $y_{n}^{0} \triangleq R(1^{n}, F, 0)$ and $y_{n}^{1} \triangleq R(1^{n}, F, 1)$ . Note that $\varphi(N)$ is a $\Pi_1^b$ -formula. Below, we argue that + +$$ +\mathsf {S} _ {2} ^ {1} (\mathcal {L} _ {\mathsf {P V}}) \vdash \varphi (1) \quad \text {a n d} \quad \mathsf {S} _ {2} ^ {1} (\mathcal {L} _ {\mathsf {P V}}) \vdash \forall N \varphi (\lfloor N / 2 \rfloor) \rightarrow \varphi (N). +$$ + +Then, by polynomial induction for $\Pi_1^b$ -formulas (available in $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}})$ ) and using that $\varphi(0)$ trivially holds, it follows that $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}}) \vdash \forall N \varphi(N)$ . In turn, this yields $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}}) \vdash \mathsf{Ref}_{R,s}$ . + +Base Case: $\mathsf{S}_2^1 (\mathcal{L}_{\mathrm{PV}})\vdash \varphi (1)$ . In this case, for a given formula $F$ and length $n$ , the hypothesis of $\varphi (1)$ is satisfied only if $n = 1$ , $F$ is a valid description of a formula, and $\operatorname {Size}(F) = 0$ . Let $y_1^0\triangleq R(1,F,0)$ and $y_{1}^{1}\triangleq R(1,F,1)$ . We need to prove that + +$$ +\left| y _ {1} ^ {0} \right| _ {\ell} = \left| y _ {1} ^ {1} \right| _ {\ell} = 1 \wedge F \left(y _ {1} ^ {0}\right) \neq \oplus^ {0} \left(y _ {1} ^ {0}\right) \wedge F \left(y _ {1} ^ {1}\right) \neq \oplus^ {1} \left(y _ {1} ^ {1}\right). +$$ + +Since $n = 1$ and $\mathrm{Size}(F) = 0$ , $F$ evaluates to a constant $b_{F}$ on every input bit. The statement above is implied by Line 2 in the definition of $R(n,F,b)$ . + +(Polynomial) Induction Step: $\mathsf{S}_2^1 (\mathcal{L}_{\mathsf{PV}})\vdash \forall N\varphi (\lfloor N / 2\rfloor)\to \varphi (N)$ . Fix an arbitrary $N$ , let $n\triangleq |N|$ , and assume that $\varphi (\lfloor N / 2\rfloor)$ holds. By the induction hypothesis, for every valid formula $F^{\prime}$ with $\mathrm{Size}(F^{\prime}) < n'^{3 / 2}$ , where $n^\prime \triangleq n - 1$ , we have + +$$ +\left| y _ {n ^ {\prime}} ^ {0} \right| _ {\ell} = \left| y _ {n ^ {\prime}} ^ {1} \right| _ {\ell} = n ^ {\prime} \wedge F ^ {\prime} \left(y _ {n ^ {\prime}} ^ {0}\right) \neq \oplus^ {0} \left(y _ {n ^ {\prime}} ^ {0}\right) \wedge F ^ {\prime} \left(y _ {n ^ {\prime}} ^ {1}\right) \neq \oplus^ {1} \left(y _ {n ^ {\prime}} ^ {1}\right), \tag {1} +$$ + +where $y_{n^{\prime}}^{0}\triangleq R(1^{n^{\prime}},F^{\prime},0)$ and $y_{n^{\prime}}^{1}\triangleq R(1^{n^{\prime}},F^{\prime},1)$ + +Now let $n \geq 2$ , and let $F$ be a valid description of a formula over $n$ -bit inputs with $\mathrm{Size}(F) < n^{3/2}$ . By the size bound on $F$ , $R(1^n, F, b)$ ignores Line 1. If $\mathrm{Size}(F) = 0$ , then similarly to the base case it is trivial to check that the conclusion of $\varphi(N)$ holds. Therefore, we assume that $\mathrm{Size}(F) \geq 1$ and $R(1^n, F, b)$ does not stop at Line 2. + +Consider the following definitions: + +1. $\widetilde{F} \triangleq \mathrm{Normalize}(1^n, F)$ (Line 3), + +5. $b' \triangleq b \oplus c_i$ (Line 5), where $\rho$ restricts $x_i$ to $c_i$ , + +2. $\rho \triangleq$ Find-Restriction $(1^n,\widetilde{F})$ (Line 4), + +6. $y_{n^{\prime}}^{b^{\prime}}\triangleq R(1^{n^{\prime}},F^{\prime},b^{\prime})$ (Line 6), + +3. $F^{\prime}\triangleq$ Apply-Restriction $(1^{n},\widetilde{F},\rho)$ (Line 5), + +7. $y_{n}^{b}\triangleq y_{n^{\prime}}^{b^{\prime}}\cup y_{i}\mapsto c_{i}$ (Line 6), + +4. $n^{\prime}\triangleq n - 1$ (Line 5), + +8. $s \triangleq \operatorname{Size}(F)$ , $\widetilde{s} \triangleq \operatorname{Size}(\widetilde{F})$ , and $s' \triangleq \operatorname{Size}(F')$ . + +We rely on the provability in $\mathsf{S}_2^1 (\mathcal{L}_{\mathsf{PV}})$ of the following statements about the subroutines of $R(1^{n},F,b)$ (see [CKK+24]): + +(i) $\widetilde{s}\leq s$ + +(iii) $\forall x\in \{0,1\} ^n\widetilde{F} (x) = F(x)$ + +(ii) $s' \leq \widetilde{s} \cdot (1 - 1/n)^{3/2}$ , + +(iv) $\forall z\in \{0,1\}^{\rho^{-1}(\star)}F'(z) = \widetilde{F}\big(z\cup x_i\mapsto c_i\big).$ + +By Items (i) and (ii) together with the bound $s < n^{3/2}$ , + +$$ +\mathsf {S} _ {2} ^ {1} \left(\mathcal {L} _ {\mathsf {P V}}\right) \vdash s ^ {\prime} \leq \widetilde {s} \cdot (1 - 1 / n) ^ {3 / 2} \leq s \cdot (1 - 1 / n) ^ {3 / 2} < n ^ {3 / 2} \cdot (1 - 1 / n) ^ {3 / 2} = (n - 1) ^ {3 / 2}. +$$ + +Thus $F'$ is a valid formula on $n'$ -bit inputs of size $< n'^{3/2}$ . By the first condition in the induction hypothesis (Equation (1)) and the definition of each $y_{n}^{b}$ , we have $|y_{n}^{0}|_{\ell} = |y_{n}^{1}|_{\ell} = n$ . Using the definitions listed above, the last two conditions in the induction hypothesis (Equation (1)), and Items (iii) and (iv), we derive in $S_{2}^{1}(\mathcal{L}_{\mathsf{PV}})$ the following statements for each $b \in \{0, 1\}$ : + +$$ +F ^ {\prime} \left(y _ {n ^ {\prime}} ^ {b ^ {\prime}}\right) \neq \oplus^ {b ^ {\prime}} \left(y _ {n ^ {\prime}} ^ {b ^ {\prime}}\right), +$$ + +$$ +F (y _ {n} ^ {b}) = F ^ {\prime} (y _ {n ^ {\prime}} ^ {b ^ {\prime}}), +$$ + +$$ +F (y _ {n} ^ {b}) \neq \oplus^ {b ^ {\prime}} (y _ {n ^ {\prime}} ^ {b ^ {\prime}}). +$$ + +Therefore, using basic facts about the function symbols $\oplus^0$ and $\oplus^1$ , + +$$ +\oplus^ {b ^ {\prime}} \left(y _ {n ^ {\prime}} ^ {b ^ {\prime}}\right) = \oplus^ {b \oplus c _ {i}} \left(y _ {n ^ {\prime}} ^ {b ^ {\prime}}\right) = c _ {i} \oplus \left(\oplus^ {b} \left(y _ {n ^ {\prime}} ^ {b ^ {\prime}}\right)\right) = c _ {i} \oplus \left(\oplus^ {b} \left(y _ {n} ^ {b}\right) \oplus c _ {i}\right) = \oplus^ {b} \left(y _ {n} ^ {b}\right). +$$ + +These statements imply that, for each $b \in \{0,1\}$ , $F(y_{n}^{b}) \neq \oplus^{b}(y_{n}^{b})$ . In other words, the conclusion of $\varphi(N)$ holds. This completes the proof of the induction step. + +As explained above, the provability of $\operatorname{Ref}_{R,s}$ in $\mathsf{S}_2^1 (\mathcal{L}_{\mathsf{PV}})$ implies its provability in $\mathsf{PV}_1$ . Since $\mathsf{PV}_1 \vdash \operatorname{Ref}_{R,s} \to \mathsf{FLB}_s^\oplus$ , this completes the proof of Theorem 4.1. + +We have seen that a non-trivial formula size lower bound can be established in $\mathsf{PV}_1$ . More advanced circuit lower bounds are known to be provable assuming additional axioms extending $\mathsf{PV}_1$ (e.g., [Kra95, Section 15.2] and [MP20]), but their provability in $\mathsf{PV}_1$ (or equivalently, in $\mathsf{S}_2^1(\mathcal{L}_{\mathsf{PV}})$ ) is less clear. + +Open Problem 4.3. For each $d \geq 1$ and $\ell \geq 1$ , can $\mathsf{PV}_1$ prove that the parity function on $n$ bits cannot be computed by depth- $d$ circuits of size $n^\ell$ ? + +Open Problem 4.4. For each $\ell \geq 1$ , is there a constant $k = k(\ell)$ such that $\mathsf{PV}_1$ proves that every monotone circuit for the $k$ -clique problem on $n$ -vertex graphs must be of size at least $n^\ell$ ? + +# 5 Unprovability of Complexity Bounds + +The investigation of the unprovability of complexity bounds within theories of bounded arithmetic has a long and rich history. Much of the early work took place in the nineties, with significant results obtained by Razborov [Raz95a, Raz95b], Krajicek [Kra97], and other researchers. Since then, and in particular over the last decade, there has been renewed interest and progress in establishing unprovability results (see, e.g., [CK07, PS21, CKKO21, LO23, ABM23] and references therein). + +In Section 5.1, we consider the unprovability of complexity upper bounds. The unprovability of an inclusion such as $\mathsf{NP} \subseteq \mathsf{SIZE}[n^k]$ is equivalent to the consistency of NP $\not\subseteq \mathsf{SIZE}[n^k]$ with the corresponding theory. Such a consistency result establishes that, while we cannot confirm the separation is true in the standard model of natural numbers, we know it holds in a non-standard model of a theory so strong that complexity theory appears almost indistinguishable from the standard one. We stress that establishing the consistency of a lower bound is a necessary step towards showing that the lower bound is true. For this reason, the unprovability of upper bounds can be formally seen as progress towards showing unconditional complexity lower bounds. + +In Section 5.2, we turn our attention to the unprovability of complexity lower bounds. This direction is partly driven by the desire to formally understand why proving complexity lower bounds is challenging, and to explore the possibility of a more fundamental underlying reason for this difficulty. Moreover, it might provide examples of hard sentences for logical theories and of hard propositional tautologies for proof systems. The investigation of the meta-mathematics of lower bounds has also found unexpected applications in algorithms and complexity (e.g., [CIKK16]). + +Finally, in Section 5.3 we connect the two directions and explain how the unprovability of circuit lower bounds in $\mathsf{PV}_1$ yields the unprovability of $\mathsf{P} = \mathsf{NP}$ in $\mathsf{PV}_1$ . The latter can be seen as a weakening of the $\mathsf{P}$ versus NP problem that considers the existence of feasible proofs that $\mathsf{P} = \mathsf{NP}$ . This further motivates the investigation of the unprovability of lower bounds. + +# 5.1 Unprovability of Upper Bounds + +# 5.1.1 LEARN-Uniform Circuits and Unprovability + +Cook and Krajicek [CK07] considered the provability of NP $\subseteq$ SIZE[poly] in bounded arithmetic and obtained a number of conditional negative results. [KO17], building on techniques from [CK07], showed that for no integer $k\geq 1$ the theory $\mathsf{PV}_1$ proves that $\mathsf{P}\subseteq \mathsf{SIZE}[n^k ]$ . Note that this is an unconditional result. Thus, for a natural theory capable of formalizing advanced results from complexity theory, such as the PCP Theorem, we can unconditionally rule out the provability of $\mathsf{P}\subseteq \mathsf{SIZE}[n^{k}]$ . A slightly stronger model-theoretic formulation of the result of [KO17] appears in [BM20]. + +[BKO20] obtained results for stronger theories and ruled out the provability of infinitely often inclusions. In more detail, for an $\mathcal{L}_{\mathrm{PV}}$ -function symbol $h$ , consider the sentence + +$$ +\left. \cup B _ {k} ^ {i. o.} [ h ] \triangleq \forall 1 ^ {m} \exists 1 ^ {n} \exists C _ {n} \forall x \left(n \geq m \wedge | C _ {n} | \leq n ^ {k} \wedge \left(| x | \leq n \rightarrow \psi (n, C _ {n}, x, h)\right)\right), \right. ^ {1 0} +$$ + +where $\psi$ is a quantifier-free $\mathcal{L}_{\mathsf{PV}}$ -formula stating that $h(x) \neq 0$ if and only if the evaluation of the circuit $C_n$ on $x$ (viewed as an $n$ -bit string) is 1. In other words, $\mathsf{UB}_k^{i.o.}[h]$ states that the language defined by $h$ (which is in $\mathsf{P}$ ) admits circuits of size at most $n^k$ on infinitely many input lengths $n$ . [BKO20] showed that for each $k \geq 1$ , there is an $\mathcal{L}_{\mathsf{PV}}$ -function symbol $h$ such that $\mathsf{PV}_1$ does not prove $\mathsf{UB}_k^{i.o.}[h]$ . Similarly, they established that $\mathsf{S}_2^1 \not\vdash \mathsf{NP} \subseteq \text{i.o.SIZE}[n^k]$ and $\mathsf{T}_2^1 \not\vdash \mathsf{P}^{\mathsf{NP}} \subseteq \text{i.o.SIZE}[n^k]$ . + +Building on these results, [CKKO21] introduced a modular framework to establish the unprovability of circuit upper bounds in bounded arithmetic using a learning-theoretic perspective. Next, we describe how their approach can be used to show a slightly weaker form of the result from [BKO20] described above. For an $\mathcal{L}_{\mathrm{PV}}$ -function symbol $h$ , we consider a sentence $\mathsf{UB}_{c,k}[h]$ stating that $L_{h} \in \mathsf{SIZE}[c \cdot n^{k}]$ , where $x \in L_{h}$ if and only if $h(x) \neq 0$ , i.e., + +$$ +\bigcup \mathrm {B} _ {c, k} [ h ] \triangleq \forall 1 ^ {n} \exists C _ {n} \forall x \left(\left| C _ {n} \right| \leq c \cdot n ^ {k} \wedge \left(\left| x \right| \leq n \rightarrow (\operatorname {E v a l} \left(C _ {n}, x, n\right) = 1 \leftrightarrow h (x) \neq 0)\right)\right), \tag {2} +$$ + +where $\operatorname{Eval}(C_n, x, n)$ is an $\mathcal{L}_{\mathrm{PV}}$ -function that evaluates the circuit $C_n$ on the $n$ -bit string described by $x$ . Our goal is to show that for every $k \geq 1$ there is a function symbol $h$ such that, for no choice of $c \geq 1$ , $\mathrm{PV}_1$ proves $\mathrm{UB}_{c,k}[h]$ . (Note that in all results discussed in this section, we consider Log formalizations, as explained in Section 4.1.) + +Overview of the Approach. Note that $\mathsf{UB}_{c,k}[h]$ claims the existence of circuits for $L_{h}$ , i.e., it states a non-uniform upper bound. We explore the constructive aspect of $\mathsf{PV}_1$ proofs, by extracting computational information from a $\mathsf{PV}_1$ -proof that such circuits exist. The argument has a logical component, where we extract from a proof of $\mathsf{UB}_{c,k}[h]$ a "LEARN-uniform" construction of a sequence $\{C_n\}_n$ of circuits for $L_{h}$ and a complexity-theoretic component, where we unconditionally establish that for each $k$ LEARN-uniform circuits of this form do not exist for some $h$ . Altogether, we get that for some $h$ theory $\mathsf{PV}_1$ does not prove $\mathsf{UB}_{c,k}[h]$ (no matter the choice of $c$ ). + +LEARN-uniform circuits. We will be interested in languages that can be efficiently learned with a bounded number of equivalence queries, in the following sense. For functions $s, q \colon \mathbb{N} \to \mathbb{N}$ , we say that a language $L \subseteq \{0,1\}^*$ is in LEARN-uniform $^{\mathsf{EQ}[q]}$ SIZE[s] if there is a polynomial-time algorithm $A^{\mathsf{EQ}(L_n)}(1^n)$ that outputs a circuit of size at most $s(n)$ for $L_n$ after making at most $q(n)$ equivalence queries to $L_n$ , where $L_n = L \cap \{0,1\}^n$ . The equivalence query oracle, given the description of an $n$ -bit circuit $D$ of size a most $s(n)$ , replies "yes" if $D$ computes $L_n$ , or provides some counter-example $w$ such that $D(w) \neq L_n(w)$ . + +Extracting LEARN-uniform circuits from $\mathsf{PV}_1$ proofs. For convenience, write $\mathsf{UB}_{c,k}[h] = \forall 1^n \exists C_n \forall x \phi(1^n, C_n, x)$ in Equation (2), where $\phi(1^n, C_n, x)$ is a quantifier-free formula. Since $\mathsf{PV}_1$ is a universal theory, under the assumption that $\mathsf{PV}_1 \vdash \mathsf{UB}_{c,k}[h]$ , we can apply Theorem 3.2 (KPT Witnessing Theorem) to obtain the provability in $\mathsf{PV}_1$ of the disjunction + +$$ +\forall 1 ^ {n} \forall x _ {1} \dots \forall x _ {k} (\phi (1 ^ {n}, t _ {1} (1 ^ {n}), x _ {1}) \vee \phi (1 ^ {n}, t _ {2} (1 ^ {n}, x _ {1}), x _ {2}) \vee \dots \vee \phi (1 ^ {n}, t _ {k} (1 ^ {n}, x _ {1}, \dots , x _ {k - 1}), x _ {k})) \tag {3} +$$ + +where $t_1, \ldots, t_k$ are $\mathcal{L}_{\mathsf{PV}}$ -terms and $k = O(1)$ . Most importantly, due to the soundness of $\mathsf{PV}_1$ , this statement is true over the standard model $\mathbb{N}$ . Additionally, the terms in $\mathsf{PV}_1$ correspond to polynomial-time algorithms. Next, we will discuss how to interpret Equation (3) over $\mathbb{N}$ as an interactive protocol and how this perspective leads to a LEARN-uniform construction. + +The KPT Witnessing Theorem can be intuitively understood as follows [KPS90]. Consider a search problem $Q(1^n)$ , where given the input $1^n$ , we need to find $D$ such that $\forall x \phi(1^n, D, x)$ . The problem $Q(1^n)$ can be solved using a $k$ -round Student-Teacher protocol. In the first round, the student proposes $D_1 = t_1(1^n)$ as a solution to the search problem $Q(1^n)$ . This solution is either correct, or there exists a counterexample $w_1$ such that $\neg \phi(1^n, t_1(1^n), w_1)$ . The teacher then provides this counterexample value $w_1$ , and the protocol moves to the next round. In each subsequent round $1 \leq i < k$ , the student computes $D_i = t_i(1^n, w_1, \ldots, w_{i-1})$ based on the counterexamples $w_1, \ldots, w_{i-1}$ received in the previous rounds. This $D_i$ is either a correct solution for $Q(1^n)$ , in which case the problem is solved, or there is another counterexample $w_i$ provided by the teacher such that $\neg \phi(1^n, t_i(1^n, w_1, \ldots, w_{i-1}), w_i)$ . If the latter is the case, the protocol continues to the next round $i + 1$ . The theorem guarantees that for every input $1^n$ , the student will successfully solve the search problem $Q(1^n)$ within some round $1 \leq i \leq k$ . + +From a $\mathrm{PV}_1$ proof of a circuit upper bound for a language $L_h$ , we can derive a Student-Teacher protocol for the search problem $Q(1^n)$ corresponding to Equation (3). In this protocol, the student proposes a candidate circuit $D$ , and the teacher provides a counterexample $w$ to $D$ (an input $w$ such that $D(w) \neq L_h(w)$ ) if one exists. (Note that $\phi(1^n, D, x)$ might not be true for other reasons, e.g., if $|D| > c \cdot n^k$ , but in such cases there is no need to invoke the equivalence query oracle and we can proceed in the Student-Teacher protocol with, say, $w = 0^n$ .) The student is guaranteed to succeed after at most $k$ queries, regardless of the counterexamples provided by the teacher. Finally, for every input $n$ , the student computes according to a constant number of fixed $\mathrm{PV}_1$ terms $t_1, \ldots, t_k$ . Since a $\mathrm{PV}_1$ term is merely a composition of a finite number of $\mathrm{PV}_1$ function symbols (polynomial-time algorithms), the student's computation runs in polynomial time. Therefore, from the provability in $\mathrm{PV}_1$ of a non-uniform circuit upper bound for a language $L \in \mathsf{P}$ , we can extract a LEARN-uniform family of circuits for $L$ . + +Unconditional lower bound against LEARN-uniform circuits. The argument described above reduces the unprovability of upper bounds to a complexity-theoretic question with no reference to logic. To complete the proof, it is enough to show that for each $k$ there is a language $L \in \mathbb{P}$ such that $L \notin \mathrm{LEARN-uniform}^{\mathrm{EQ}[O(1)]} \mathrm{SIZE}[O(n^{k})]$ . This unconditional lower bound against LEARN-uniform circuits is established in [CKKO21] by generalizing a lower bound from [SW14] against P-uniform circuits, which can be interpreted as LEARN-uniform constructions with $q = 0$ queries. Roughly speaking, [CKKO21] shows that one can eliminate each equivalence query using a small amount of non-uniform advice, and that the base case where no queries are present (as in [SW14]) can be extended to a lower bound against a bounded amount of advice. + +This completes the sketch of the argument. The approach is fairly general and can be adapted to other theories. The strength of the theory affects the learning model against which one needs to obtain lower bounds (e.g., by increasing the number of queries or allowing randomized learners). + +Open Problem 5.1. Show that $\mathsf{S}_2^1$ does not prove that $\mathsf{P} \subseteq \mathsf{SIZE}[n^k]$ . + +In order to solve Open Problem 5.1, using the connection from [CKKO21] it is sufficient to show that $\mathsf{P} \not\subset \mathsf{LEARN}\text{-uniform}^{\mathsf{EQ}[q]} \mathsf{SIZE}[O(n^{k})]$ for $q = \mathrm{poly}(n)$ . In other words, this amounts to understanding the class of languages that admit circuits that can be produced with a polynomial number of equivalence queries. + +Open Problem 5.2. Show that $\mathsf{T}_2^1$ does not prove that $\mathsf{NP} \subseteq \mathsf{SIZE}[n^k]$ . + +# 5.1.2 $\mathsf{P} = \mathsf{NP}$ and Propositional Proof Complexity + +Suppose that $\mathsf{P}$ is actually equal to NP. In this scenario, there exists a polynomial-time algorithm $g$ (i.e., a $\mathrm{PV}_1$ function symbol) that can find a satisfying assignment for any given satisfiable formula. In other words, if $\operatorname{Formula}(F, 1^n)$ denotes an $\mathcal{L}_{\mathsf{PV}}$ -formula that checks if $F$ is a valid description of a formula over $n$ input bits, and $\operatorname{Sat}(F, x)$ is an $\mathcal{L}_{\mathsf{PV}}$ -formula that checks if $x$ satisfies the formula encoded by $F$ , the sentence + +$$ +\varphi_ {\mathrm {P} = \mathrm {N P}} [ g ] \triangleq \forall 1 ^ {n} \forall F \forall x \left(\left(\operatorname {F o r m u l a} (F, 1 ^ {n}) \wedge \operatorname {S a t} (F, x)\right)\rightarrow \operatorname {S a t} (F, g (F))\right) \tag {4} +$$ + +is true in the standard model $\mathbb{N}$ . + +Open Problem 5.3. Show that for no polynomial-time function symbol $g$ theory $\mathrm{PV}_1$ proves the sentence $\varphi_{\mathrm{P} = \mathrm{NP}}[g]$ . + +Equivalently, Open Problem 5.3 states that $\mathsf{PV}_1$ (and by standard conservation results $S_2^1$ ) is consistent with $\mathsf{P} \neq \mathsf{NP}$ . This means that either $\mathsf{P} \neq \mathsf{NP}$ , as is commonly assumed, making the conjecture trivially true, or $\mathsf{P} = \mathsf{NP}$ , but this cannot be proven using only polynomial-time concepts and reasoning. Therefore, Open Problem 5.3 represents a formal weakening of the conjecture that $\mathsf{P} \neq \mathsf{NP}$ . The statement is known to follow from the purely combinatorial conjecture that the extended Frege propositional proof system $e\mathcal{F}$ (see Section 3.2) is not polynomially bounded, which is a major open problem in proof complexity. + +Theorem 5.4 ([Coo75]). Suppose that there is a sequence $\{F_n\}_{n\geq 1}$ of propositional tautologies of size polynomial in $n$ that require eF proofs of size $n^{\omega (1)}$ . Then there is no function symbol $g$ such that $\mathsf{PV}_1$ proves $\varphi_{\mathsf{P} = \mathsf{NP}}[g]$ . + +Proof. Here we only provide a sketch of the proof. More details and extensions of the result can be found in the textbooks [Kra95, Kra19]. We establish that if $\mathsf{PV}_1 \vdash \varphi_{\mathsf{P} = \mathsf{NP}}[g]$ for some $g$ , then every tautology has a polynomial size $e\mathcal{F}$ proof. + +Recall the definitions and results from Section 3.2. For a propositional proof system $P$ (described by an $\mathcal{L}_{\mathrm{PV}}$ function symbol), we consider an $\mathcal{L}_{\mathrm{PV}}$ -sentence stating the soundness of $P$ : + +$$ +\mathsf {S o u n d} _ {P} \triangleq \forall 1 ^ {n} \forall F \forall \pi (\mathsf {F o r m u l a} (F, 1 ^ {n}) \land \mathsf {P r o o f} _ {P} (F, \pi)) \to \forall x (| x | \leq n \to \mathsf {S a t} (F, x)), +$$ + +where $\operatorname{Proof}_P(F, \pi)$ states that $\pi$ is a valid $P$ -proof of $F$ . + +Note that if $F$ is not a tautology then $g(\neg F)$ outputs a satisfying assignment of $\neg F$ , while if $F$ is a tautology then $\neg F$ admits no satisfying assignment. We consider a proof system $P_g$ defined as follows: Given a valid description of an $n$ -bit propositional formula $F$ and a candidate proof $\widetilde{\pi}$ , $P_g$ accepts $\widetilde{\pi}$ as a proof of $F$ if and only if + +$$ +g (\neg F) = \widetilde {\pi} \quad \text {a n d} \quad \neg \operatorname {S a t} (\neg F, \widetilde {\pi}) , +$$ + +where $\neg F$ represents the negation of $F$ . Observe that for any tautology $F$ , $\pi_F \triangleq g(\neg F)$ is a valid $P_g$ -proof of $F$ . + +Note that $\mathsf{PV}_1\vdash \mathsf{Sound}_{P_g}$ , which follows from the provability of Equation (4) and the definition of $P_{g}$ using $g$ . Now consider the quantifier-free $\mathcal{L}_{\mathsf{PV}}$ -formula + +$$ +\psi \triangleq \neg \operatorname {F o r m u l a} (F, 1 ^ {n}) \vee \neg \operatorname {P r o o f} _ {P _ {g}} (F, \pi) \vee | x | > n \vee \operatorname {S a t} (F, x). +$$ + +The provability of $\forall 1^n\forall F\forall \pi \psi$ in $\mathsf{PV}_1$ follows from the provability of $\mathsf{Sound}_{P_g}$ + +Using Cook's translation (Section 3.2), the sequence of propositional formulas $||\psi||_m$ admits $e\mathcal{F}$ -proofs of polynomial size. Moreover, given an actual $n$ -bit propositional formula $F$ of polynomial size and the corresponding $P_g$ -proof $\pi_F$ (represented by fixed strings $\langle F\rangle$ and $\langle \pi_F\rangle$ ), one can show that there are polynomial size $e\mathcal{F}$ proofs of both $||\mathrm{Formula}(\langle F\rangle,1^n)||_{\mathrm{poly}(n)}$ and $||\mathrm{Proof}_{P_g}(\langle F\rangle,\langle \pi_F\rangle)||_{\mathrm{poly}(n)}$ . (Intuitively, this follows by an evaluation of the expressions on these fixed inputs.) Since $e\mathcal{F}$ is closed under substitution, we can derive in $e\mathcal{F}$ with a polynomial size proof the formula $||\mathrm{Sat}(\langle F\rangle,x)||_{\mathrm{poly}(n)}$ . + +Finally, for every propositional formula $F(x)$ on $n$ -bit inputs, it is possible to efficiently prove in $e\mathcal{F}$ the propositional formula $||\mathrm{Sat}(\langle F\rangle ,x)||_{\mathrm{poly}(n)}\to F(x)$ . (This can be established by a slightly more general structural induction on formulas $F$ using information about $||\cdot||$ and $\langle \cdot \rangle$ .) Overall, since $e\mathcal{F}$ is closed under implication, it follows from these derivations that there is a polynomial size $e\mathcal{F}$ proof of $F$ . This completes the sketch of the proof of the result. + +Open Problem 5.3 would also follow from a proof that Buss's hierarchy of theories $\mathsf{T}_2^i$ does not collapse [KPT91], another central problem in bounded arithmetic. More precisely, it is enough to obtain the following separation. + +Open Problem 5.5. Show that for some $i > j \geq 1$ we have $\mathsf{T}_2^i \neq \mathsf{T}_2^j$ . + +It is known that $\mathrm{PV}_1$ proves that $\mathsf{P} = \mathsf{NP}$ if and only if it proves that $\mathsf{NP} = \mathsf{coNP}$ . Consequently, a super-polynomial lower bound on the length of $e\mathcal{F}$ proofs also yields the consistency of $\mathsf{NP} \neq \mathsf{coNP}$ with $\mathrm{PV}_1$ . + +Finally, we remark that the use of witnessing theorems alone (as done in Section 5.1.1) is probably not sufficient to settle Open Problem 5.3. This is because these theorems typically also hold when we extend the theory with all true universal statements. Thus an unprovability argument that only employs the witnessing theorem would establish unconditionally that each sentence $\varphi_{\mathsf{P} = \mathsf{NP}}[g]$ is false and therefore $\mathsf{P}\neq \mathsf{NP}$ . Some researchers interpret this as evidence that the investigation of propositional proof complexity might be unavoidable. Another approach to Open Problem 5.3 is discussed in Section 5.3. + +# 5.2 Unprovability of Lower Bounds + +# 5.2.1 Average-Case Circuit Lower Bounds + +In this section, we discuss the unprovability of strong average-case lower bounds in $\mathrm{PV}_1$ . We focus on an unprovability result from [PS21], stated and proved in a slightly stronger form in [LO23]. The proof is based on a technique introduced by [Kra11] and further explored in [Pic15a]. + +We consider an average-case separation of co-nondeterministic circuits against non-deterministic circuits of subexponential size. In more detail, we investigate the provability of a sentence $\mathsf{LB}^1 (s_1,s_2,m,n_0)$ stating that, for every input length $n\geq n_0$ , there is a co-nondeterministic circuit $C$ of size $\leq s_{1}(n)$ such that, for every nondeterministic circuit $D$ of size $\leq s_2(n)$ , we have + +$$ +\operatorname * {P r} _ {x \sim \{0, 1 \} ^ {n}} \Big [ C (x) = D (x) \Big ] \leq 1 - \frac {m (n)}{2 ^ {n}}. +$$ + +Let $\mathrm{coNSIZE}[s(n)]$ and $\mathrm{NSIZE}[s(n)]$ refer to co-nondeterministic circuits and nondeterministic circuits of size $s(n)$ , respectively. More formally, $\mathrm{LB}^1(s_1, s_2, m, n_0)$ is an $\mathcal{L}_{\mathrm{PV}}$ -sentence capturing the following lower + +bound statement: + +$$ +\forall n \in \operatorname {L o g L o g} \text {w i t h} n \geq n _ {0} \exists C \in \operatorname {c o N S I Z E} \left[ s _ {1} (n) \right] \forall D \in \operatorname {N S I Z E} \left[ s _ {2} (n) \right] +$$ + +$$ +\exists m = m (n) \text {d i s t i n c t} x ^ {1}, \dots , x ^ {m} \text {s . t . E r r o r} (C, D, x ^ {i}) \text {f o r a l l} i \in [ m ], +$$ + +where $\operatorname{Error}(C, D, x)$ means that the circuits $C$ and $D$ disagree on the input $x$ . This statement can be seen as an average-case form of the coNP $\nsubseteq \mathsf{NP} / \mathsf{poly}$ conjecture if we let $s_1(n) = n^{O(1)}$ , $s_2(n) = n^{\omega(1)}$ , and $m(n) = 2^n / n$ . (Note that we consider in this section a LogLog formalization, according to the notation explained in Section 4.1.) + +Theorem 5.6 ([PS21, LO23]). Let $d \geq 1$ , $\delta > 0$ , and $n_0 \geq 1$ be arbitrary parameters, and let $s_1(n) = n^d$ , $s_2(n) = 2^{n^\delta}$ , and $m(n) = 2^n / n$ . Then $\mathsf{PV}_1$ does not prove the sentence $\mathsf{LB}^1(s_1, s_2, m, n_0)$ . + +In the remainder of this section, we provide some intuition about the proof of this result. + +Overview of the Argument. Suppose, towards a contradiction, that $\mathsf{PV}_1\vdash \mathsf{LB}^1 (s_1,s_2,m,n_0)$ with parameters as above. The central idea of the argument is that establishing a strong complexity lower bound within bounded arithmetic leads to a corresponding complexity upper bound. These lower and upper bounds contradict each other. Consequently, this contradiction implies the unprovability of the lower bound statement. In a bit more detail, the argument proceeds as follows: + +(i) The provability of the average-case lower bound sentence $\mathsf{LB}^1 (s_1,s_2,m,n_0)$ implies the provability of a worst-case lower bound for coNSIZE $[n^d]$ against NSIZE $[2^{n^\delta}]$ . We formalize the latter by a sentence $\mathsf{LB}_{\mathrm{wst}}^1 (s_1,s_2,n_0)$ . +(ii) Given any proof of $\mathsf{LB}_{\mathsf{wst}}^{1}(s_{1}, s_{2}, n_{0})$ in $\mathsf{PV}_1$ , we extract a complexity upper bound for an arbitrary co-nondeterministic circuit $E_{m}(x)$ over an input $x$ of length $m$ and of size at most $\mathrm{poly}(m)$ . More precisely, we show that there is a deterministic circuit $B_{m}$ of size $\leq 2^{m^{o(1)}}$ such that $\operatorname{Pr}_{x \sim \{0,1\}^m}[E_m(x) = B_m(x)] \geq 1/2 + 2^{-m^{o(1)}}$ . +(iii) We invoke an existing hardness amplification result to conclude that, on any large enough input length $n$ , every co-nondeterministic circuit $C_n$ of size $\leq n^d$ agrees with some nondeterministic circuit $D_n$ of size $\leq 2^{n^\delta}$ on more than a $1 - 1/n$ fraction of the inputs. + +Since $\mathsf{PV}_1$ is a sound theory, i.e., every theorem of $\mathsf{PV}_1$ is a true sentence, Item (iii) is in contradiction with the complexity lower bound stated in $\mathsf{LB}^1(s_1, s_2, m, n_0)$ . Consequently, $\mathsf{PV}_1$ does not prove this sentence. + +The most interesting step of the argument is the proof of Item (ii). The key point is that the proof of a lower bound in $\mathrm{PV}_1$ must be somewhat constructive, in the sense that it not only shows that every small circuit $D$ fails to solve the problem but also produces a string $w$ witnessing this fact. Below we give a simple example of its usefulness, showing a setting where a constructive lower bound yields an upper bound. Note that the application of a witnessing theorem to a LogLog formalization provides algorithms running in time poly $(2^n)$ . The example provided next shows that this is still useful. + +Lemma 5.7 ([CLO24a]). Let $L \in \mathsf{NP}$ . Suppose that there is a uniform algorithm $R(1^n, D)$ such that, for every co-nondeterministic circuit $D$ on $n$ input variables and of size at most $n^{\log n}$ , $R(1^n, D)$ runs in time $2^{O(n)}$ and outputs a string $w \in \{0, 1\}^n$ such that $D(w) \neq L(w)$ . Then, for every language $L' \in \mathsf{NP}$ and for every constant $\varepsilon > 0$ , we have $L' \in \mathsf{DTIME}[2^{n^\varepsilon}]$ . + +Proof. Suppose that $L \in \mathsf{NTIME}[n^d]$ for some $d \in \mathbb{N}$ . Let $M'$ be a nondeterministic machine that decides $L'$ and runs in time at most $n^{c'}$ , where $c' \in \mathbb{N}$ . Let $\varepsilon > 0$ be an arbitrary constant. Let $\gamma = \gamma(d, \varepsilon) > 0$ be a small enough constant to be defined later. Finally, let $R$ be the algorithm provided by the hypothesis of the lemma. We show that the following deterministic algorithm $B^{\gamma}(x)$ decides $L'$ in time $O(2^{n^{\varepsilon}})$ : + +Input: $x \in \{0,1\}^n$ for some $n \geq 1$ . + +1 Compute the description of a co-nondeterministic circuit $E'$ of size at most $n^{2c'}$ that decides the complement of $L'$ ; +// In other words, $E'(u) = 1 - L'(u)$ for every string $u \in \{0,1\}^n$ . +2 Produce the description of a co-nondeterministic circuit $D_{x}(y)$ , where $y \in \{0,1\}^{n^{\gamma}}$ , such that $D_{x}(y)$ ignores its input $y$ and computes according to $E'(x)$ ; +// While the length of $y$ is smaller than the length of $u$ , $D_x$ and $E'$ share the same nondeterministic input string, and $E'$ sets $u$ to be the fixed string $x$ . +3 Compute $w = R(1^{n^{\gamma}}, D_x) \in \{0, 1\}^{n^{\gamma}}$ ; +4 Determine the bit $b = L(w)$ by a brute force computation, then return $b$ ; + +Algorithm 2: Algorithm $B^{\gamma}(x)$ for deciding language $L'$ . + +First, we argue that $B^{\gamma}$ decides $L'$ . Since $D_x$ is a co-nondeterministic circuit over inputs of length $m \triangleq n^{\gamma}$ and has size at most $n^{2c'} = m^{2c'/\gamma} \leq m^{\log m}$ (for a large enough $m$ ), $R(1^{n^{\gamma}}, D_x)$ outputs a string $w \in \{0, 1\}^{n^{\gamma}}$ such that $L(w) = 1 - D_x(w)$ . Consequently, + +$$ +b = L (w) = 1 - D _ {x} (w) = 1 - E ^ {\prime} (x) = 1 - \left(1 - L ^ {\prime} (x)\right) = L ^ {\prime} (x), +$$ + +i.e., the output bit of $B^{\gamma}(x)$ is correct. + +Next, we argue that $B^{\gamma}$ runs in time at most $O(2^{n^{\varepsilon}})$ . Clearly, Steps 1-2 run in $\mathrm{poly}(n)$ time. Moreover, Step 3 runs in time $2^{O(n^{\gamma})}$ under the assumption on the running time of $R(1^{n^{\gamma}}, D_x)$ . This is at most $2^{n^{\varepsilon}}$ if we set $\gamma \leq \varepsilon / 2$ . Finally, since $L \in \mathsf{NTIME}[n^d]$ , the brute force computation in Step 4 can be performed in deterministic time $2^{O(\ell^d)}$ over an input of length $\ell$ . Since $\ell = n^{\gamma} = |w|$ in our case, if $\gamma \leq \varepsilon / 2d$ we get that Step 4 runs in time at most $2^{n^{\varepsilon}}$ . Overall, if we set $\gamma \triangleq \varepsilon / 2d$ , it follows that $B^{\gamma}$ runs in time at most $O(2^{n^{\varepsilon}})$ . This completes the proof that $L' \in \mathsf{DTIME}[2^{n^{\varepsilon}}]$ . + +The proof of Item (ii) is significantly more sophisticated, since one does not get an algorithm $R$ as above from a $\mathrm{PV}_1$ proof of the lower bound sentence $\mathsf{LB}^1(s_1, s_2, m, n_0)$ . The argument combines a witnessing theorem for sentences with more than four quantifier alternations and an ingenious technique from [Kra11] that relies on ideas from the theory of computational pseudorandomness. + +Open Problem 5.8. Strengthen the unprovability result from Theorem 5.6 in the following directions: + +(a) show that it holds in the polynomial size regime, i.e., with $s_1(n) = n^a$ and for some $s_2(n) = n^b$ ; +(b) establish the unprovability of worst-case lower bounds against nondeterministic circuits; +(c) show the unprovability of average-case lower bounds against deterministic circuits; +(d) establish the same result with respect to a stronger theory. + +We refer to [LO23, CLO24a, CLO24b] for some related results and partial progress. + +# 5.2.2 Extended Frege Lower Bounds + +This section covers a result on the unprovability of super-polynomial size extended Frege $(e\mathcal{F})$ lower bounds in $\mathrm{PV}_1$ [KP89] (see also [CU93, Bus90]). We refer to Section 3.2 for the necessary background. We will also need the definitions and results from Section 3.3. + +We adapt the presentation from [Kra19]. Consider the theory $\mathsf{PV}_1$ and its language $\mathcal{L}_{\mathsf{PV}}$ . We shall use the following $\mathcal{L}_{\mathsf{PV}}$ formulas: + +- $\operatorname{Sat}(x, y)$ : a quantifier-free formula formalizing that $y$ is a satisfying assignment of the Boolean formula $x$ ; +- $\operatorname{Taut}(x) \triangleq \forall y \leq x \operatorname{Sat}(x, y)$ ; +Proof $P(x,z)$ : a quantifier-free formula formalizing that $z$ is a $P$ -proof of $x$ + +The following lemma is central to the unprovability result. + +Lemma 5.9. Let $M \models \mathsf{PV}_1$ , and assume that $\phi \in M$ is a propositional formula. The following statements are equivalent: + +(i) There is no eF-proof of $\phi$ in $M$ : + +$$ +M \models \forall z \neg \operatorname {P r o o f} _ {e \mathcal {F}} (\phi , z). +$$ + +(ii) There is an extension $M^{\prime}\supseteq M$ (also a model of $\mathsf{PV}_1$ ) in which $\phi$ is falsified: + +$$ +M ^ {\prime} \vDash \exists y \operatorname {S a t} (\neg \phi , y). +$$ + +The proof of Lemma 5.9 proceeds by compactness and uses that the correctness of the propositional translation from $\mathsf{PV}_1$ to $e\mathcal{F}$ (Section 3.2) is also provable in $\mathsf{PV}_1$ . + +Lemma 5.10. Let $M$ be a nonstandard countable model of $\mathsf{PV}_1$ . Then it has a cofinal extension $M' \supseteq_{\mathrm{cf}} M$ (also a model of $\mathsf{PV}_1$ ) such that every tautology in $M'$ has an eF-proof in $M'$ . + +The proof of Lemma 5.10 iterates Lemma 5.9 while taking cuts to ensure that the limit extension $M' = \bigcup_{i} M_i$ (where $M_0 = M$ ) is cofinal in $M$ . Since each $M_i \models \mathsf{PV}_1$ and $\mathsf{PV}_1$ is universal, we also have $M' \models \mathsf{PV}_1$ . + +We will need the following analogue of Lemma 3.6 for $\mathsf{PV}_1$ . + +Fact 5.11. Let $M_0$ be a nonstandard countable model of $\mathsf{PV}_1$ . Then there is a (countable) cut $M$ of $M_0$ that is a (nonstandard) model of $\mathsf{PV}_1$ and a length $n \in M$ , where $n = |a|$ for some nonstandard $a \in M$ , such that for every $b \in M$ we have $M \models |b| \leq n^k$ for some standard number $k$ . + +The next result is a consequence of the existence of nonstandard countable models, Fact 5.11, and Lemma 5.10. + +Lemma 5.12. There is a model $M^{*}$ of $\mathsf{PV}_1$ such that the following properties hold: + +(i) Any tautology in $M^{*}$ has an eF-proof in $M^{*}$ +(ii) There is a nonstandard element $a \in M^*$ of length $n \triangleq |a|$ such that for any element $b \in M^*$ there is a standard number $k$ such that $M^* \models |b| \leq n^k$ . + +Theorem 5.13 (Unprovability of super-polynomial size $e\mathcal{F}$ lower bounds in $\mathrm{PV}_1$ [KP89]). Consider the sentence + +$$ +\Psi_ {e \mathcal {F}} \triangleq \forall x \exists \phi \geq x [ \operatorname {T a u t} (\phi) \wedge \forall \pi (| \pi | \leq | \phi | \# | \phi | \rightarrow \neg \operatorname {P r o o f} _ {e \mathcal {F}} (\phi , \pi)) ]. ^ {1 2} +$$ + +The sentence $\Psi_{e\mathcal{F}}$ is not provable in $\mathsf{PV}_1$ . + +Proof. Suppose $\mathsf{PV}_1 \vdash \Psi_{e\mathcal{F}}$ . Let $M^*$ , $a$ , and $n \triangleq |a|$ be as in Lemma 5.12. Since $\Psi_{e\mathcal{F}}$ holds in $M^*$ , there is a tautology $\phi \in M^*$ with $\phi \geq a$ and consequently $|\phi| \geq n$ such that $\phi$ does not have an $e\mathcal{F}$ -proof of size $|\phi|\# |\phi|$ in $M^*$ . On the other hand, by the two properties of $M^*$ given by Lemma 5.12, the formula $\phi$ has an $e\mathcal{F}$ -proof of size at most $n^k$ for some standard number $k$ . Finally, since the element $a$ is nonstandard, we have $n^k \leq n\# n \leq |\phi|\# |\phi|$ in $M^\star$ . This contradiction implies that $\mathsf{PV}_1$ does not prove $\Psi_{e\mathcal{F}}$ . + +Open Problem 5.14. Show that $\mathsf{PV}_1$ cannot prove fixed-polynomial size lower bounds on the length of $e\mathcal{F}$ proofs. + +Open Problem 5.15. Establish the unprovability of the sentence $\Psi_{e\mathcal{F}}$ in theory $\mathsf{S}_2^1$ . + +# 5.3 Connection Between Upper Bounds and Lower Bounds + +In this section, we explain a result from [BKO20] showing that the unprovability of $\mathsf{P} = \mathsf{NP}$ (Open Problem 5.3) is related to the unprovability of circuit lower bounds. For a $\mathsf{PV}_1$ function symbol $h$ and a circuit size parameter $k\in \mathbb{N}$ , consider the sentence + +$$ +\mathsf {L B} _ {k} ^ {a. e.} (h) \triangleq \neg \mathsf {U B} _ {k} ^ {i. o.} [ h ], +$$ + +where $\mathsf{UB}_k^{i.o.}[h]$ is the sentence defined in Section 5.1.1. The sentence $\mathsf{LB}_k^{a.e.}(h)$ states that the language defined by $h$ is hard on input length $n$ for circuits of size $n^k$ whenever $n$ is sufficiently large. + +Theorem 5.16 (Unprovability of $\mathsf{P} = \mathsf{NP}$ in $\mathsf{PV}_1$ from the unprovability of lower bounds in $\mathsf{PV}_1$ [BKO20]). If there exists $k\in \mathbb{N}$ such that for no function symbol $h$ theory $\mathsf{PV}_1$ proves the sentence $\mathsf{LB}_k^{a.e.}(h)$ , then for no function symbol $f$ theory $\mathsf{PV}_1$ proves the sentence $\varphi_{\mathsf{P} = \mathsf{NP}}(f)$ . + +Theorem 5.16 shows that if $\mathrm{PV}_1$ does not prove $n^k$ -size lower bounds for a language in $\mathsf{P}$ , then $\mathsf{P} \neq \mathsf{NP}$ is consistent with $\mathrm{PV}_1$ . Note that the hypothesis of Theorem 5.16 is weaker than the assumption that $\mathrm{PV}_1$ does not prove that $\mathsf{NP} \not\subsetneq \mathsf{SIZE}[n^k]$ for some $k$ . + +Sketch of the proof of Theorem 5.16. We proceed in the contrapositive. We formalize in $\mathsf{PV}_1$ the result that if $\mathsf{P} = \mathsf{NP}$ , then for any parameter $k$ , $\mathsf{P} \not\subsetneq$ i.o. $\mathsf{SIZE}[n^k]$ (see, e.g., [Lip94, Theorem 3]). This result combines the collapse of $\mathsf{PH}$ to $\mathsf{P}$ with Kannan's argument [Kan82] that $\mathsf{PH}$ can define languages that are almost-everywhere hard against circuits of fixed-polynomial size. Typically, proving this claim requires showing the existence of a truth table of size $2^n$ that is hard against circuits of size $n^k$ . However, this result might not be provable in $\mathsf{PV}_1$ . + +We address this issue as follows. From the provability in $\mathsf{PV}_1$ that $\mathsf{P} = \mathsf{NP}$ , it follows that for each $i\geq 1$ theory $\mathsf{T}_2^i$ collapses to $\mathsf{PV}_1$ [KPT91]. Recall that the dual weak pigeonhole principle (dWPHP) for $\mathcal{L}_{\mathsf{PV}}$ -functions is provable in $\mathsf{T}_2^2$ . Define a $\mathsf{PV}_1$ function symbol $g$ that takes as input a circuit $C$ of size $n^k$ and outputs the lexicographic first $n^{k + 1}$ bits of the truth table computed by $C$ . From dWPHP $(g)$ , we now + +derive in $\mathsf{PV}_1$ that the prefix of some truth table is not computable by circuits of size $n^k$ , if $n$ is sufficiently large. We can implicitly extend this truth table prefix with zeroes and use the resulting truth table to define a $\mathsf{PV}_1$ -formula $\varphi(x)$ with a constant number of bounded quantifiers that defines a language $L$ that is hard against circuits of size $n^k$ , where the hardness is provable in $\mathsf{PV}_1$ . + +Given that the provability in $\mathsf{PV}_1$ that $\mathsf{P} = \mathsf{NP}$ implies the provability in $\mathsf{PV}_1$ that $\mathsf{PH}$ collapses to $\mathsf{P}$ , it follows that $\varphi(x)$ is equivalent in $\mathsf{PV}_1$ to the language defined by some $\mathcal{L}_{\mathsf{PV}}$ -function $h$ . In other words, $\mathsf{PV}_1 \vdash \mathsf{LB}_k^{a.e.}(h)$ , which completes the proof of Theorem 5.16. + +[CLO24b] shows an example of a simple lower bound that is not provable in $\mathrm{PV}_1$ , under a plausible cryptographic assumption. This indicates that Theorem 5.16 might offer a viable approach towards a solution to Open Problem 5.3. + +# 6 Additional Recent Developments + +The provability of the dual Weak Pigeonhole Principle (dWPHP) for polynomial-time functions is closely related to the provability of exponential circuit lower bounds for a language in deterministic exponential time [Jef07]. [Kra21] showed that dWPHP cannot be proved in $\mathsf{PV}_1$ under the assumption that $\mathsf{P} \subseteq \mathsf{SIZE}[n^k]$ for some constant $k$ . [ILW23] established the same unprovability result assuming subexponentially secure indistinguishability obfuscation and coNP $\not\subset$ i.o.AM. + +[ABM23] established the unprovability of NEXP $\subseteq$ SIZE[poly] in the theory of bounded arithmetic $V_2^0$ (not covered in this survey). Interestingly, their approach does not employ a witnessing theorem. It proceeds instead by simulating a comprehension axiom scheme assuming the provability of the upper bound sentence, eventually relying on an existing lower bound on the provability of the pigeonhole principle. + +[CLO24b] systematically investigates the reverse mathematics of complexity lower bounds. They demonstrated that various lower bound statements in communication complexity, error-correcting codes, and for Turing machines are equivalent to well-studied combinatorial principles, such as the weak pigeon-hole principle for polynomial-time functions and its variants. Consequently, complexity lower bounds can be regarded as fundamental axioms with significant implications. They use these equivalences to derive conditional results on the unprovability of simple lower bounds in $\mathsf{APC}_1$ . + +$\left[\mathrm{CKK}^{+}24\right]$ investigates the provability of the circuit size hierarchy in bounded arithmetic, captured by a sentence CSH stating that for each $n \geq n_0$ , there is a circuit of size $n^a$ that does not admit an equivalent circuit of size $n^b$ , where $a > b > 1$ and $n_0$ are fixed. They showed that CSH is provable in $\mathsf{T}_2^2$ , while its provability in $\mathsf{T}_2^1$ implies that $\mathsf{P}^{\mathsf{NP}} \not\subsetneq \mathsf{SIZE}[n^{1 + \varepsilon}]$ for some $\varepsilon > 0$ . Thus a better proof complexity upper bound for the circuit size hierarchy yields new circuit lower bounds. + +[CRT24] establishes the unprovability of NP $\neq$ PSPACE in $\mathsf{APC}_1$ (with a LogLog formalization) under a strong average-case hardness assumption. + +[Kra24] offers a comprehensive reference on proof complexity generators, whose investigation is closely related to dWPHP and its provability in bounded arithmetic. The theory of proof complexity generators offers tautologies that serve as potential candidates for demonstrating super-polynomial extended Frege lower bounds and consequently the unprovability of $\mathsf{P} = \mathsf{NP}$ in $\mathsf{PV}_1$ . + +We have not covered a number of results connected to the meta-mathematics of complexity lower bounds developed in the context of propositional proof complexity (see, e.g., [Raz15, Kra19, AR23, Kra24] and references therein). It is worth noting that results on the non-automatability of weak proof systems such as [AM20, dRGN $^{+}$ 21] were made possible thanks to the investigation of the meta-mathematics of proof complexity. + +Finally, several other recent papers have investigated directions connected to bounded arithmetic and the meta-mathematics of complexity theory, e.g., [PS22, Kha22, PS23, AKPS24, LLR24]. Due to space constraints, we are not able to cover all recent developments in this survey. + +Acknowledgements. I would like to thank Noel Arteche, Jinqiao Hu, Jan Krajicek, Moritz Müller, Mykyta Narusevych, Ján Pich, and Dimitrios Tsintsilidas for their valuable comments and feedback on an earlier version of this survey. This work received support from the Royal Society University Research Fellowship URF\R1\191059; the UKRI Frontier Research Guarantee EP/Y007999/1; and the Centre for Discrete Mathematics and its Applications (DIMAP) at the University of Warwick. + +# References + +[AB09] Sanjeev Arora and Boaz Barak. Computational Complexity - A Modern Approach. Cambridge University Press, 2009. +[ABM23] Albert Atserias, Samuel R. Buss, and Moritz Müller. On the consistency of circuit lower bounds for non-deterministic time. In Symposium on Theory of Computing (STOC), pages 1257-1270, 2023. +[AKPS24] Noel Arteche, Erfan Khaniki, Jan Pich, and Rahul Santhanam. From proof complexity to circuit complexity via interactive protocols. In International Colloquium on Automata, Languages, and Programming (ICALP), 2024. +[AM20] Albert Atserias and Moritz Müller. Automating resolution is NP-hard. J. ACM, 67(5):31:1-31:17, 2020. +[AR23] Per Austrin and Kilian Risse. Sum-of-squares lower bounds for the minimum circuit size problem. In Computational Complexity Conference (CCC), pages 31:1-31:21, 2023. +[AW09] Scott Aaronson and Avi Wigderson. Algebraization: A new barrier in complexity theory. Transactions on Computation Theory (TOCT), 1(1), 2009. +[Bey09] Olaf Beyersdorff. On the correspondence between arithmetic theories and propositional proof systems – a survey. Mathematical Logic Quarterly, 55(2):116–137, 2009. +[BGS75] Theodore P. Baker, John Gill, and Robert Solovay. Relativizatons of the $\mathsf{P} = ?$ NP Question. SIAM J. Comput., 4(4):431-442, 1975. +[BKKK20] Sam R. Buss, Valentine Kabanets, Antonina Kolokolova, and Michal Koucký. Expander construction in VNC1. Annals of Pure and Applied Logic, 171(7):102796, 2020. +[BKO20] Jan Bydzovsky, Jan Krajíček, and Igor C. Oliveira. Consistency of circuit lower bounds with bounded theories. *Logical Methods in Computer Science*, 16(2), 2020. +[BKT14] Samuel R. Buss, Leszek A. Kołodziejczyk, and Neil Thapen. Fragments of approximate counting. Journal of Symbolic Logic, 79(2):496-525, 2014. +[BM20] Jan Bydzovsky and Moritz Müller. Polynomial time ultrapowers and the consistency of circuit lower bounds. Arch. Math. Log., 59(1-2):127-147, 2020. +[Bus86] Samuel R. Buss. Bounded Arithmetic. Bibliopolis, 1986. +[Bus90] Samuel R. Buss. On model theory for intuitionistic bounded arithmetic with applications to independence results. In *Feasible Mathematics: A Mathematical Sciences Institute Workshop, Ithaca, New York, June 1989*, pages 27-47. Springer, 1990. + +[Bus94] Samuel R. Buss. On herbrand's theorem. In Selected Papers from the Logic and Computational Complexity International Workshop (LCC), pages 195-209, 1994. +[Bus97] Samuel R. Buss. Bounded arithmetic and propositional proof complexity. In Logic of Computation, pages 67-121. Springer Berlin Heidelberg, 1997. +$\left[\mathrm{CHO}^{+}22\right]$ Lijie Chen, Shuichi Hirahara, Igor C. Oliveira, Jan Pich, Ninad Rajgopal, and Rahul Santhanam. Beyond natural proofs: Hardness magnification and locality. J. ACM, 69(4):25:1-25:49, 2022. +[CIKK16] Marco L. Carmosino, Russell Impagliazzo, Valentine Kabanets, and Antonina Kolokolova. Learning algorithms from natural proofs. In Conference on Computational Complexity (CCC), pages 10:1-10:24, 2016. +[CJsw21] Lijie Chen, Ce Jin, Rahul Santhanam, and Ryan Williams. Constructive separations and their consequences. In Symposium on Foundations of Computer Science (FOCS), 2021. +[CK07] Stephen A. Cook and Jan Krajček. Consequences of the provability of NP $\subseteq$ P/poly. Journal of Symbolic Logic, 72(4):1353-1371, 2007. +$\left[\mathrm{CKK}^{+}24\right]$ Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, Igor C. Oliveira, and Dimitrios Tsintsili-das. Provability of the circuit size hierarchy and its consequences. Preprint, 2024. +[CKKO21] Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, and Igor C. Oliveira. Learn-uniform circuit lower bounds and provability in bounded arithmetic. In Symposium on Foundations of Computer Science (FOCS), 2021. +[CLO24a] Lijie Chen, Jiatu Li, and Igor C. Oliveira. On the unprovability of circuit size bounds in intuitionistic $S_2^1$ . Preprint: arXiv:2404.11841, 2024. +[CLO24b] Lijie Chen, Jiatu Li, and Igor C. Oliveira. Reverse mathematics of complexity lower bounds. In Symposium on Foundations of Computer Science (FOCS), 2024. +[CN10] Stephen A. Cook and Phuong Nguyen. Logical Foundations of Proof Complexity. Cambridge University Press, 2010. +[Bcob65] Alan Cobham. The intrinsic computational difficulty of functions. Proc. Logic, Methodology and Philosophy of Science, pages 24-30, 1965. +[Co075] Stephen A. Cook. Feasibly constructive proofs and the propositional calculus (preliminary version). In Symposium on Theory of Computing (STOC), pages 83-97, 1975. +[CRT24] Lijie Chen, Ron D. Rothblum, and Roei Tell. Fiat-Shamir in the plain model from derandomization. Electron. Colloquium Comput. Complex., TR24-116, 2024. +[CU93] Stephen Cook and Alasdair Urquhart. Functional interpretations of feasibly constructive arithmetic. Annals of Pure and Applied Logic, 63(2):103-200, 1993. +[Din07] Irit Dinur. The PCP theorem by gap amplification. J. ACM, 54(3):12, 2007. +[dRGN+21] Susanna F. de Rezende, Mika Göös, Jakob Nordström, Toniann Pitassi, Robert Robere, and Dmitry Sokolov. Automating algebraic proof systems is NP-hard. In Symposium on Theory of Computing (STOC), pages 209-222, 2021. +[Gay23] Azza Gaysin. Proof complexity of CSP. ArXiv e-Print arXiv:2201.00913, 2023. +[Gay24] Azza Gaysin. Proof complexity of universal algebra in a CSP dichotomy proof. ArXiv e-Print arXiv:2403.06704, 2024. + +[HP93] Petr Hajek and Pavel Pudlák. Metamathematics of first-order arithmetic. Springer-Verlag, 1993. +[ILW23] Rahul Ilango, Jiatu Li, and Ryan Williams. Indistinguishability obfuscation, range avoidance, and bounded arithmetic. In Symposium on Theory of Computing (STOC), pages 1076–1089. ACM, 2023. +[Jer04] Emil Jerabek. Dual weak pigeonhole principle, boolean complexity, and derandomization. Annals of Pure and Applied Logic, 129(1-3):1-37, 2004. +[Jef05] Emil Jerabek. Weak pigeonhole principle and randomized computation. PhD thesis, Charles University in Prague, 2005. +[Jer06] Emil Jerabek. The strength of sharply bounded induction. Mathematical Logic Quarterly, 52(6):613-624, 2006. +[Jer07] Emil Jerabek. Approximate counting in bounded arithmetic. Journal of Symbolic Logic, 72(3):959-993, 2007. +[Juk12] Stasys Jukna. Boolean Function Complexity: Advances and Frontiers. Springer, 2012. +[Kan82] Ravi Kannan. Circuit-size lower bounds and non-reducibility to sparse sets. Information and Control, 55(1-3):40-56, 1982. +[Kha22] Erfan Khaniki. Nisan-Wigderson generators in proof complexity: New lower bounds. In Computational Complexity Conference (CCC), pages 17:1-17:15, 2022. +[KO17] Jan Krajíček and Igor C. Oliveira. Unprovability of circuit upper bounds in Cook's theory PV. *Logical Methods in Computer Science*, 13(1), 2017. +[KP89] Jan Krajíček and Pavel Pudlák. Propositional provability and models of weak arithmetic. In CSL'89: Proceedings of the 3rd Workshop on Computer Science Logic, pages 193-210, 1989. +[KPS90] Jan Krajíček, Pavel Pudlák, and Jíří Sgall. Interactive computations of optimal solutions. In International Symposium on Mathematical Foundations of Computer Science (MFCS), volume 452, pages 48-60, 1990. +[KPT91] Jan Krajíček, Pavel Pudlák, and Gaisi Takeuti. Bounded arithmetic and the polynomial hierarchy. Annals of Pure and Applied Logic, 52(1-2):143-153, 1991. +[Kra95] Jan Krajíček. Bounded Arithmetic, Propositional Logic, and Complexity Theory. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 1995. +[Kra97] Jan Krajicek. Interpolation theorems, lower bounds for proof systems, and independence results for bounded arithmetic. J. Symb. Log., 62(2):457-486, 1997. +[Kra11] Jan Krajicek. On the proof complexity of the Nisan-Wigderson generator based on a hard NP $\cap$ coNP function. Journal of Mathematical Logic, 11(1), 2011. +[Kra19] Jan Krajíček. Proof Complexity. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 2019. +[Kra21] Jan Krajíček. Small circuits and dual weak PHP in the universal theory of p-time algorithms. ACM Transactions on Computational Logic (TOCL), 22(2):1-4, 2021. +[Kra24] Jan Krajicek. Proof Complexity Generators. Monograph available at https://www.karlin.mff.cuni.cz/~krajicek/gdraft.html, 2024. + +[Lê14] Dai Tri Man Lê. Bounded Arithmetic and Formalizing Probabilistic Proofs. PhD thesis, University of Toronto, 2014. +[LC11] Dai Tri Man Le and Stephen A. Cook. Formalizing randomized matching algorithms. Log. Methods Comput. Sci., 8(3), 2011. +[Lip94] Richard J. Lipton. Some consequences of our failure to prove non-linear lower bounds on explicit functions. In Structure in Complexity Theory Conference (CCC), pages 79-87, 1994. +[LLR24] Jiawei Li, Yuhao Li, and Hanlin Ren. Meta-mathematics of resolution lower bounds: A TFNP perspective. Preprint, 2024. +[LO23] Jiatu Li and Igor C. Oliveira. Unprovability of strong complexity lower bounds in bounded arithmetic. In Symposium on Theory of Computing (STOC), 2023. +[McK10] Richard McKinley. A sequent calculus demonstration of Herbrand's theorem. arXiv preprint arXiv:1007.3414, 2010. +[MP20] Moritz Müller and Ján Pich. Feasibly constructive proofs of succinct weak circuit lower bounds. Annals of Pure and Applied Logic, 171(2), 2020. +[MPW02] Alexis Maciel, Toniann Pitassi, and Alan R. Woods. A new proof of the weak pigeonhole principle. Journal of Computer and System Sciences, 64(4):843-872, 2002. +[Oja04] Kerry Ojakian. Combinatorics in Bounded Arithmetic. PhD thesis, Carnegie Mellon University, 2004. +[Par71] Rohit Parikh. Existence and feasibility in arithmetic. Journal of Symbolic Logic, 36(3):494-508, 1971. +[Pic15a] Jan Pich. Circuit lower bounds in bounded arithmetics. Annals of Pure and Applied Logic, 166(1):29-45, 2015. +[Pic15b] Jan Pich. Logical strength of complexity theory and a formalization of the PCP theorem in bounded arithmetic. *Logical Methods in Computer Science*, 11(2), 2015. +[PS21] Jan Pich and Rahul Santhanam. Strong co-nondeterministic lower bounds for NP cannot be proved feasibly. In Symposium on Theory of Computing (STOC), pages 223-233, 2021. +[PS22] Jan Pich and Rahul Santhanam. Learning algorithms versus automatability of Frege systems. In International Colloquium on Automata, Languages, and Programming (ICALP), pages 101:1-101:20, 2022. +[PS23] Ján Pich and Rahul Santhanam. Towards $\mathrm{P} \neq \mathrm{NP}$ from extended Frege lower bounds. *Electron. Colloquium Comput. Complex.*, TR23-199, 2023. +[Pud06] Pavel Pudlák. Consistency and games - in search of new combinatorial principles. In V. Stoltenberg-Hansen and J. Väätänen, editors, Logic Colloquium '03, volume 24 of Lecture Notes in Logic, pages 244-281. ASL, 2006. +[PWW88] Jeff B. Paris, A. J. Wilkie, and Alan R. Woods. Provability of the pigeonhole principle and the existence of infinitely many primes. J. Symb. Log., 53(4):1235-1244, 1988. +[Raz95a] Alexander A. Razborov. Bounded arithmetic and lower bounds in boolean complexity. In P. Clote and J. Remmel, editors, Feasible Mathematics II, pages 344-386. Birkhäuser, 1995. +[Raz95b] Alexander A Razborov. Unprovability of lower bounds on circuit size in certain fragments of bounded arithmetic. Izvestiya: mathematics, 59(1):205, 1995. + +[Raz15] Alexander A. Razborov. Pseudorandom generators hard for $k$ -DNF resolution and polynomial calculus resolution. Annals of Mathematics, pages 415-472, 2015. +[RR97] Alexander A. Razborov and Steven Rudich. Natural proofs. Journal of Computer and System Sciences, 55(1):24-35, 1997. +[Sub61] Bella A. Subbotovskaya. Realization of linear functions by formulas using $+, \cdot, -$ . In Soviet Math. Dokl, 1961. +[SW14] Rahul Santhanam and Ryan Williams. On uniformity and circuit lower bounds. Computational Complexity, 23(2):177-205, 2014. +[TC21] Iddo Tzameret and Stephen A. Cook. Uniform, integral, and feasible proofs for the determinant identities. J. ACM, 68(2):12:1-12:80, 2021. +[Woo81] Alan R. Woods. Some problems in logic and number theory and their connections. PhD thesis, University of Manchester, 1981. +[WP87] Alex J. Wilkie and Jeff B. Paris. On the scheme of induction for bounded arithmetic formulas. Ann. Pure Appl. Log., 35:261-302, 1987. \ No newline at end of file diff --git a/data/2025/2504_04xxx/2504.04416/images/0b7cc6ae545f43e15552a3e812577062636b59aaf9178ecc7da4e43b7e5c6e22.jpg b/data/2025/2504_04xxx/2504.04416/images/0b7cc6ae545f43e15552a3e812577062636b59aaf9178ecc7da4e43b7e5c6e22.jpg new file mode 100644 index 0000000000000000000000000000000000000000..826a5b75bf60e237ef189e6c97036b1537c0b196 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/0b7cc6ae545f43e15552a3e812577062636b59aaf9178ecc7da4e43b7e5c6e22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e12569a4f77d1b0ca97232e38a1dee2b743967e4f6dd40ee909c81d2645c69e2 +size 8959 diff --git a/data/2025/2504_04xxx/2504.04416/images/1722095f7b04eddf62a8433353d1f5dce7d563215069760618d773acd4b5bfae.jpg b/data/2025/2504_04xxx/2504.04416/images/1722095f7b04eddf62a8433353d1f5dce7d563215069760618d773acd4b5bfae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1c02b8dcd67cd3391c57993b2fee91fc6129096 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/1722095f7b04eddf62a8433353d1f5dce7d563215069760618d773acd4b5bfae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be7f6b3f276ea3d7bce73ff85d12b95bf4c972570ad94454c7d3aee9cdc9afb8 +size 3613 diff --git a/data/2025/2504_04xxx/2504.04416/images/19428c7bc08f88c4b13a686b11ea2e157a026aa70c539069b4339f8f9f77ff5c.jpg b/data/2025/2504_04xxx/2504.04416/images/19428c7bc08f88c4b13a686b11ea2e157a026aa70c539069b4339f8f9f77ff5c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03b33f7cf5d8ecdea4f3b9d5ae1180a4c6965cca --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/19428c7bc08f88c4b13a686b11ea2e157a026aa70c539069b4339f8f9f77ff5c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64c0b374f346b60e447c1a7cc29ae45816f972699b5631ab17b0113ab615928c +size 10105 diff --git a/data/2025/2504_04xxx/2504.04416/images/1e719365c9b8487be9ebe15f82e9d7c05674d60514e56e0d115615219d9d1d93.jpg b/data/2025/2504_04xxx/2504.04416/images/1e719365c9b8487be9ebe15f82e9d7c05674d60514e56e0d115615219d9d1d93.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ed90930f4b0eff6582b3b0e5d606be4dafd8e07 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/1e719365c9b8487be9ebe15f82e9d7c05674d60514e56e0d115615219d9d1d93.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebe70df97fb400c4a67aea93b32857e76990829737700022ff15ebbfc3e040ae +size 9215 diff --git a/data/2025/2504_04xxx/2504.04416/images/1ea9e01081d47172c398dfa6f9930dc757af6069b9c122853a9410bf9e9309a1.jpg b/data/2025/2504_04xxx/2504.04416/images/1ea9e01081d47172c398dfa6f9930dc757af6069b9c122853a9410bf9e9309a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df11ef81999c7ded92248337925e3f7c9e9cdcc9 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/1ea9e01081d47172c398dfa6f9930dc757af6069b9c122853a9410bf9e9309a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2052a16b99cfd6bf78d6514c1785bded27febdff452b8ff2865b4b796bbd84f8 +size 11138 diff --git a/data/2025/2504_04xxx/2504.04416/images/1eaa6e0d10a6bef8d8ba21407da2a82fb6e533d4ab45bfd8bf39b85cb2fc26aa.jpg b/data/2025/2504_04xxx/2504.04416/images/1eaa6e0d10a6bef8d8ba21407da2a82fb6e533d4ab45bfd8bf39b85cb2fc26aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a5788409e0d73222712e0a78eb674a25967993f --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/1eaa6e0d10a6bef8d8ba21407da2a82fb6e533d4ab45bfd8bf39b85cb2fc26aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c74ee4c83c27727c61f9dc77a40e455292f90622ad78b31ace0cee7c4acb3abc +size 3715 diff --git a/data/2025/2504_04xxx/2504.04416/images/235d6586c0ce6341c4f21d8c4228405d2b57882e2c57d5575f5f71bd8ed9efa9.jpg b/data/2025/2504_04xxx/2504.04416/images/235d6586c0ce6341c4f21d8c4228405d2b57882e2c57d5575f5f71bd8ed9efa9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d0dfc0e7b9a3e241418e7b5bce025567a17606f --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/235d6586c0ce6341c4f21d8c4228405d2b57882e2c57d5575f5f71bd8ed9efa9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2e32b3fde2de964f6567083773d808636ce56584b1a06e925e95d119209fe05 +size 22464 diff --git a/data/2025/2504_04xxx/2504.04416/images/267e5f08e0159c02b9d2a45e85a5b30719fb24bc8fbcb5fa0bcb3f348900839a.jpg b/data/2025/2504_04xxx/2504.04416/images/267e5f08e0159c02b9d2a45e85a5b30719fb24bc8fbcb5fa0bcb3f348900839a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4be6d59fc1530aa5790603051b6cbffbeaa11929 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/267e5f08e0159c02b9d2a45e85a5b30719fb24bc8fbcb5fa0bcb3f348900839a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7437ca537348d87db26284931ce351c234201dd9b3f926d15745290345a99669 +size 3073 diff --git a/data/2025/2504_04xxx/2504.04416/images/27e37c0bfa938324d20725c153383fe2e008ba8d7c523465b4cde0d3211f1a38.jpg b/data/2025/2504_04xxx/2504.04416/images/27e37c0bfa938324d20725c153383fe2e008ba8d7c523465b4cde0d3211f1a38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97761ed3646e15c3658485ed81b83a5d58332811 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/27e37c0bfa938324d20725c153383fe2e008ba8d7c523465b4cde0d3211f1a38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d67ce57ac17f9f873a1be22ff1ea0778827e999d4f016c8f195b87b74dcae450 +size 9065 diff --git a/data/2025/2504_04xxx/2504.04416/images/2d22254fa53ed9c0bfb4f7a3721e172f45299ac18002cfe7b6c71e2c693406ce.jpg b/data/2025/2504_04xxx/2504.04416/images/2d22254fa53ed9c0bfb4f7a3721e172f45299ac18002cfe7b6c71e2c693406ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77325ea0f28b32d738f77434341ec5729961e271 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/2d22254fa53ed9c0bfb4f7a3721e172f45299ac18002cfe7b6c71e2c693406ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08f24b6500ddbc043330ebee685283621857e68c2791614f79065f49a9e7b71e +size 15592 diff --git a/data/2025/2504_04xxx/2504.04416/images/361d2eabb4fbcc8569f1abf8fa96a20860733cd7792ec1b3bcbc2b9e891263e2.jpg b/data/2025/2504_04xxx/2504.04416/images/361d2eabb4fbcc8569f1abf8fa96a20860733cd7792ec1b3bcbc2b9e891263e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbb08a4afc23ca10bdfaca37321a28b53c7cb702 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/361d2eabb4fbcc8569f1abf8fa96a20860733cd7792ec1b3bcbc2b9e891263e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62ff946b7ef67767d51131b0bb32b080c75111370c1ecdf9b6fe80befc7aaf66 +size 7591 diff --git a/data/2025/2504_04xxx/2504.04416/images/38010ea5f5cc138fddb5b73510aa383621fffbca94c78430904f64e876ec2f19.jpg b/data/2025/2504_04xxx/2504.04416/images/38010ea5f5cc138fddb5b73510aa383621fffbca94c78430904f64e876ec2f19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..885e27880e16fc3fbc9c69de3eff4320b2dc5a14 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/38010ea5f5cc138fddb5b73510aa383621fffbca94c78430904f64e876ec2f19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1374f0d8c09cebd58ab2cf2a53e98f1ead2071fed58cc9a7b8a845963bed363 +size 3134 diff --git a/data/2025/2504_04xxx/2504.04416/images/47aefef7c299135f8cd4a10560540de952da8874e8d904415659cff411b2bca0.jpg b/data/2025/2504_04xxx/2504.04416/images/47aefef7c299135f8cd4a10560540de952da8874e8d904415659cff411b2bca0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc6ee9bf6d15fb68caa38ae21739d7e74fa83c48 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/47aefef7c299135f8cd4a10560540de952da8874e8d904415659cff411b2bca0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61cdb3647abe60c6fa778728672536c7b9caf54253b4e44edc65d44ca968a40f +size 9343 diff --git a/data/2025/2504_04xxx/2504.04416/images/56ac5bb88cdef5b62d57e1f085108ca60d496943a43c6de07136626b9a27bc0b.jpg b/data/2025/2504_04xxx/2504.04416/images/56ac5bb88cdef5b62d57e1f085108ca60d496943a43c6de07136626b9a27bc0b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ac32989895e3eefa58b8d8032e3f47f1b7e82c8 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/56ac5bb88cdef5b62d57e1f085108ca60d496943a43c6de07136626b9a27bc0b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93065d294af2b2ce6dafb5b4ba1ed020f9b9d9c97c90c5c277780b43b845f2c7 +size 4066 diff --git a/data/2025/2504_04xxx/2504.04416/images/61ab6dc3ffda86376e6eedf888d5e7150209d299534efacbd943d531c38369f0.jpg b/data/2025/2504_04xxx/2504.04416/images/61ab6dc3ffda86376e6eedf888d5e7150209d299534efacbd943d531c38369f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a21c9dcc0e40809951502e7be04c22213b87e46 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/61ab6dc3ffda86376e6eedf888d5e7150209d299534efacbd943d531c38369f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43efda077b6d196c335c172a881fa0dd8d2ce9e87563e58de8bf3eb810cdc7e0 +size 8066 diff --git a/data/2025/2504_04xxx/2504.04416/images/61b2543cf59e3a45cc3cc0df307cd1ce6e20839aa6386a8f2cbb2bede5fccc80.jpg b/data/2025/2504_04xxx/2504.04416/images/61b2543cf59e3a45cc3cc0df307cd1ce6e20839aa6386a8f2cbb2bede5fccc80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36b8f6b498a384e118b0eb014d8a0d80e9bd6cbf --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/61b2543cf59e3a45cc3cc0df307cd1ce6e20839aa6386a8f2cbb2bede5fccc80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b7fe900f15d035dea37a66ccfa315b9397cc17a3ffb0fb67575d435af101c01 +size 21903 diff --git a/data/2025/2504_04xxx/2504.04416/images/68a518a72601171095d2ba1dea9ce54ca02f92cd0f23c4c4b899735da48a3aee.jpg b/data/2025/2504_04xxx/2504.04416/images/68a518a72601171095d2ba1dea9ce54ca02f92cd0f23c4c4b899735da48a3aee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ec0f3166f763f1dc913c482a78c4999eda2f7a90 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/68a518a72601171095d2ba1dea9ce54ca02f92cd0f23c4c4b899735da48a3aee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ef7a378f0d1a916849e65fb9d18c5102e431ce535219e42d5361d88732593d8 +size 13397 diff --git a/data/2025/2504_04xxx/2504.04416/images/6b4e6f119043345bb1f74729ca0f428dc004dced8f494f5cbf1d55ad04908e8b.jpg b/data/2025/2504_04xxx/2504.04416/images/6b4e6f119043345bb1f74729ca0f428dc004dced8f494f5cbf1d55ad04908e8b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d19e88fb3f4082ee6c79009fbcc4b202fd288a8a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/6b4e6f119043345bb1f74729ca0f428dc004dced8f494f5cbf1d55ad04908e8b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd81c03448d193ee3d725d5aeaade495ded399e291f8383667a88caa6edf3a4b +size 3820 diff --git a/data/2025/2504_04xxx/2504.04416/images/729d05ea8781301143b559b94849dadea509519b5a2544b3ea844ebee55114e4.jpg b/data/2025/2504_04xxx/2504.04416/images/729d05ea8781301143b559b94849dadea509519b5a2544b3ea844ebee55114e4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46d0b39d39e76d0312741596f9fe8c19e673a692 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/729d05ea8781301143b559b94849dadea509519b5a2544b3ea844ebee55114e4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32be5ba02f9cc22b360ba1d441ec73bad60529ce2794618a40e4ac2918b3bc1d +size 9591 diff --git a/data/2025/2504_04xxx/2504.04416/images/729decb105fb40164b936bb66a56656c3f63060a5e881226effea4eccac1ac9b.jpg b/data/2025/2504_04xxx/2504.04416/images/729decb105fb40164b936bb66a56656c3f63060a5e881226effea4eccac1ac9b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7249076ba111de7d47f31765efc86496b142ea2a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/729decb105fb40164b936bb66a56656c3f63060a5e881226effea4eccac1ac9b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7350077aaa920c20324d6c28bbaab254d322c88d87f558f933fde930b4d9e244 +size 5627 diff --git a/data/2025/2504_04xxx/2504.04416/images/784fab6a74a130c6131dd8f85eae445ec182d7ab4a62fcc23bd371da93a48fba.jpg b/data/2025/2504_04xxx/2504.04416/images/784fab6a74a130c6131dd8f85eae445ec182d7ab4a62fcc23bd371da93a48fba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..532998322f4ba10deab5e90793077cc3b517f020 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/784fab6a74a130c6131dd8f85eae445ec182d7ab4a62fcc23bd371da93a48fba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8be08aab1dceb6cbd13fd0d309d87dcc382dc479252ac293ed57ef1896abb1ec +size 4038 diff --git a/data/2025/2504_04xxx/2504.04416/images/84a6feded65b490e5d36ea059c84406fff3021b7a55b5662776fb58139b9c1d2.jpg b/data/2025/2504_04xxx/2504.04416/images/84a6feded65b490e5d36ea059c84406fff3021b7a55b5662776fb58139b9c1d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de832765903d3e68bbc94785d74a42f6fc8053d2 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/84a6feded65b490e5d36ea059c84406fff3021b7a55b5662776fb58139b9c1d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d853456140a559f26e4b22d3782d4bbdbba08e9b45f9dd829e7f75997d95ffc +size 13006 diff --git a/data/2025/2504_04xxx/2504.04416/images/8736034b0bdaf59f539e97d72f6d54ac9a8a29fe6f511b800a01aafd1ae829b2.jpg b/data/2025/2504_04xxx/2504.04416/images/8736034b0bdaf59f539e97d72f6d54ac9a8a29fe6f511b800a01aafd1ae829b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9d8e7f3329bde4a34b5e3941c4611c08dbafc7f --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/8736034b0bdaf59f539e97d72f6d54ac9a8a29fe6f511b800a01aafd1ae829b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56e16db0c6b28615ef53a6d1b7d401ffb22bdad0adb43e875c5cdd29957bdc0d +size 7174 diff --git a/data/2025/2504_04xxx/2504.04416/images/9070a982ca734a985fe9029753fd5664fa385b28312c9e502177b89de18d5c68.jpg b/data/2025/2504_04xxx/2504.04416/images/9070a982ca734a985fe9029753fd5664fa385b28312c9e502177b89de18d5c68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8528fe7da045c978c838c42f6d44f25b7669277 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/9070a982ca734a985fe9029753fd5664fa385b28312c9e502177b89de18d5c68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f1ac766852229fac45577aded60f0e4f47dfd68131ef4c2c5762d9ddabd87fe +size 6075 diff --git a/data/2025/2504_04xxx/2504.04416/images/9584c77630f177f4418c28a0f4cd991c9f3bf4a3f003fd5be36438ed24623c50.jpg b/data/2025/2504_04xxx/2504.04416/images/9584c77630f177f4418c28a0f4cd991c9f3bf4a3f003fd5be36438ed24623c50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43ef09e3bfebe0b36296459228f0cd260ffe2a74 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/9584c77630f177f4418c28a0f4cd991c9f3bf4a3f003fd5be36438ed24623c50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7249d0f6107dff38dba689ecffe67670ab6681106b4cc811d53a6a4abb7e6593 +size 12196 diff --git a/data/2025/2504_04xxx/2504.04416/images/9681b22a56be492cc7b99827f95021b06a5aa66bc3d1a10fdc5984ba903abee6.jpg b/data/2025/2504_04xxx/2504.04416/images/9681b22a56be492cc7b99827f95021b06a5aa66bc3d1a10fdc5984ba903abee6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8013f4c891f9aa45f556c70491349e1caf161334 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/9681b22a56be492cc7b99827f95021b06a5aa66bc3d1a10fdc5984ba903abee6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bc7ecbd601c8df535546eb00157b8aedcc5717e03e81691168b2ef826cc2e03 +size 2703 diff --git a/data/2025/2504_04xxx/2504.04416/images/97e19c7227dc6a422910962387040c1a3ea3553ff701001be225f0665f2b0fca.jpg b/data/2025/2504_04xxx/2504.04416/images/97e19c7227dc6a422910962387040c1a3ea3553ff701001be225f0665f2b0fca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae0b572873025d165b4d836ee32c80a33f1554cf --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/97e19c7227dc6a422910962387040c1a3ea3553ff701001be225f0665f2b0fca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:267b3bc0772589d6dd6c2c30360bff6626b81722186db2708f01c6ac1d3ef03a +size 11383 diff --git a/data/2025/2504_04xxx/2504.04416/images/a3f7f1062be71f63c4bae1d01e8f8738c6ae6508294074041d1049150ddf8ed7.jpg b/data/2025/2504_04xxx/2504.04416/images/a3f7f1062be71f63c4bae1d01e8f8738c6ae6508294074041d1049150ddf8ed7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ad7416d2155882955ee6ee64f62fc566f20b15a2 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/a3f7f1062be71f63c4bae1d01e8f8738c6ae6508294074041d1049150ddf8ed7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7f092f7b5ba19804f97b55e1dcb586a64a06793ac578250baf40fb63826cd57 +size 5542 diff --git a/data/2025/2504_04xxx/2504.04416/images/af85018eec449fc68ac5144ad9df85fd8e0cadbddda6b88cbc23494fde490a97.jpg b/data/2025/2504_04xxx/2504.04416/images/af85018eec449fc68ac5144ad9df85fd8e0cadbddda6b88cbc23494fde490a97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49de5d7669db44fc1f7fc1372c5f4a5867966f9d --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/af85018eec449fc68ac5144ad9df85fd8e0cadbddda6b88cbc23494fde490a97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95edafc747087b40622d3d5d4d3565dce2d374cad826d58e1b322e62fc9bc4fe +size 10572 diff --git a/data/2025/2504_04xxx/2504.04416/images/b0a2eeb12deb5de8850dde1f39f87edcad7f4345aa159c0e8f18fb0e71060441.jpg b/data/2025/2504_04xxx/2504.04416/images/b0a2eeb12deb5de8850dde1f39f87edcad7f4345aa159c0e8f18fb0e71060441.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0dc791b5c66eca082e6a4852b222670a410466a --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/b0a2eeb12deb5de8850dde1f39f87edcad7f4345aa159c0e8f18fb0e71060441.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2866d8c2f3b70b36ba74eeabefeafbb62933b868dd92c63095d0d6526139f8a4 +size 4949 diff --git a/data/2025/2504_04xxx/2504.04416/images/b25695a790d83f0d9d9673d679882ad1f227b367f941835bfd97862a8c0677dc.jpg b/data/2025/2504_04xxx/2504.04416/images/b25695a790d83f0d9d9673d679882ad1f227b367f941835bfd97862a8c0677dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5f6b5f4c89da92ad40bcc9f91f02baa106e42ee --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/b25695a790d83f0d9d9673d679882ad1f227b367f941835bfd97862a8c0677dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a06e30a1234da264bf1987f04832f9832a1c4c871cbbe3f3e1e85d96d6f6cd30 +size 6530 diff --git a/data/2025/2504_04xxx/2504.04416/images/b5c9afe59b4dccf1a42f8b72e20121e99efe9a248bbba6067906f88014c41a74.jpg b/data/2025/2504_04xxx/2504.04416/images/b5c9afe59b4dccf1a42f8b72e20121e99efe9a248bbba6067906f88014c41a74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51a5e60f8f26938353bdc3c314022b9ed2d78af2 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/b5c9afe59b4dccf1a42f8b72e20121e99efe9a248bbba6067906f88014c41a74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8cef5947de0023e930297a56aa3e603eae51029485fde5c2a35ffb25ddea393 +size 6977 diff --git a/data/2025/2504_04xxx/2504.04416/images/b6eb4a1d2800300276e2151ee5678efe9ec3eeeb215dd857f8f025d396b05673.jpg b/data/2025/2504_04xxx/2504.04416/images/b6eb4a1d2800300276e2151ee5678efe9ec3eeeb215dd857f8f025d396b05673.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1847c95e32e434cb56435488d761467e8ba475d --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/b6eb4a1d2800300276e2151ee5678efe9ec3eeeb215dd857f8f025d396b05673.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d79de54113dfeda5a2211172c00191690d792fa315cdf16a0b7c4c3ef70942ef +size 6587 diff --git a/data/2025/2504_04xxx/2504.04416/images/bbaa66f9f2156b78f7fbc5df612f9584c7c94f840efaecc66a6d8a01f859f0b1.jpg b/data/2025/2504_04xxx/2504.04416/images/bbaa66f9f2156b78f7fbc5df612f9584c7c94f840efaecc66a6d8a01f859f0b1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0fa78ea8bda7657546c7aa9067fd87363a7a3799 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/bbaa66f9f2156b78f7fbc5df612f9584c7c94f840efaecc66a6d8a01f859f0b1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beef16ccd48d36ef27172bc69d86fd02846b9f0e30d9b35224f5288b038de09e +size 9299 diff --git a/data/2025/2504_04xxx/2504.04416/images/be44fa32397a16fb9f227ae605d2a0d7b3a5acb9546bc2e5c59552538eea9337.jpg b/data/2025/2504_04xxx/2504.04416/images/be44fa32397a16fb9f227ae605d2a0d7b3a5acb9546bc2e5c59552538eea9337.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51bcc4499eccd378b05556e0a25a0e6d126e0fa0 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/be44fa32397a16fb9f227ae605d2a0d7b3a5acb9546bc2e5c59552538eea9337.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b34facc0276865dfe4342d37dae1adf6315d3038ea57fb97102a9a05ed035f9 +size 7062 diff --git a/data/2025/2504_04xxx/2504.04416/images/c2770011c91d0f3fb1fef3645a44f5372fec17fd3463618290bb484c84ce9f32.jpg b/data/2025/2504_04xxx/2504.04416/images/c2770011c91d0f3fb1fef3645a44f5372fec17fd3463618290bb484c84ce9f32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49e0e20eab0deafbbf1dc1098fda385507b82fde --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/c2770011c91d0f3fb1fef3645a44f5372fec17fd3463618290bb484c84ce9f32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c054a763f661ead89eb2e41ea383e0bb2433975f56b24221925cd9029e4ad799 +size 6646 diff --git a/data/2025/2504_04xxx/2504.04416/images/cd7d69136e335ce215baac5231ff78cba27879367511c6bcd933384dcaafd79b.jpg b/data/2025/2504_04xxx/2504.04416/images/cd7d69136e335ce215baac5231ff78cba27879367511c6bcd933384dcaafd79b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ba8fa1df237e181ea65d5c56446f706d2b7a346 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/cd7d69136e335ce215baac5231ff78cba27879367511c6bcd933384dcaafd79b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e7432f2b115061b81ff42cb040686fbf3005e21ad52186753f49694ec89c5b5 +size 10230 diff --git a/data/2025/2504_04xxx/2504.04416/images/da27cfab6dfa7405c6fad9190dc61ca497cb4658c7aa6dbdc279516ce52aa7b4.jpg b/data/2025/2504_04xxx/2504.04416/images/da27cfab6dfa7405c6fad9190dc61ca497cb4658c7aa6dbdc279516ce52aa7b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48073d61cee664151bb7fbcf0ed23c2daab42e82 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/da27cfab6dfa7405c6fad9190dc61ca497cb4658c7aa6dbdc279516ce52aa7b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593a0aeb8eda4e1ab676c8c4632b235a8766b7d38e05dd67d25d041bb307e90e +size 3072 diff --git a/data/2025/2504_04xxx/2504.04416/images/dc520830c34ac7e220b6224cb4a3b131068da212eb12dd170b5700b5edf69529.jpg b/data/2025/2504_04xxx/2504.04416/images/dc520830c34ac7e220b6224cb4a3b131068da212eb12dd170b5700b5edf69529.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0d8dd55d03e713f50d5eb29ab250499b38a4bb9 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/dc520830c34ac7e220b6224cb4a3b131068da212eb12dd170b5700b5edf69529.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6536e1db55c202ee8834ed25fef53e712e457db0727ae7a0c19bb61d5d76dd94 +size 10284 diff --git a/data/2025/2504_04xxx/2504.04416/images/e7bf767ab905fa1fdcb5a401caf0e03f3d616f352134053d81f729940d213325.jpg b/data/2025/2504_04xxx/2504.04416/images/e7bf767ab905fa1fdcb5a401caf0e03f3d616f352134053d81f729940d213325.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43b2bd770c5235d7eedd04cb6ea70b6b488ac71c --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/e7bf767ab905fa1fdcb5a401caf0e03f3d616f352134053d81f729940d213325.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a10af090d2de67dd7f8582a6903e7e918b545befcc5079a8cea6b2485a2f2f4 +size 12188 diff --git a/data/2025/2504_04xxx/2504.04416/images/f2a95969060ff18ce584a40691a0daa08a51df76663be0ca7c0ca01091aed99e.jpg b/data/2025/2504_04xxx/2504.04416/images/f2a95969060ff18ce584a40691a0daa08a51df76663be0ca7c0ca01091aed99e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16c17c7e73e6408823b101cedbf850a4067a7487 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/f2a95969060ff18ce584a40691a0daa08a51df76663be0ca7c0ca01091aed99e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:120aa468e6e5edf27678a0cd7fe605328307fdd3203682ad35075df44d553372 +size 3329 diff --git a/data/2025/2504_04xxx/2504.04416/images/f2b95e442cd23b5fd71f2717372c082e92136b92b77792ee291f7ee40cf352c2.jpg b/data/2025/2504_04xxx/2504.04416/images/f2b95e442cd23b5fd71f2717372c082e92136b92b77792ee291f7ee40cf352c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..230049108cb6f3884411a765f84e0906408f8219 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/f2b95e442cd23b5fd71f2717372c082e92136b92b77792ee291f7ee40cf352c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30a6896c6bc49d3685ea88593467d73ec2f4606b7e2d8860d39cf418d424c857 +size 8496 diff --git a/data/2025/2504_04xxx/2504.04416/images/f8c01d27d4a2e2889ddd669ad8e3f7405e8fe86a6ea0eff1ea0f08c3ec72955f.jpg b/data/2025/2504_04xxx/2504.04416/images/f8c01d27d4a2e2889ddd669ad8e3f7405e8fe86a6ea0eff1ea0f08c3ec72955f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36a239085de1c8c519cf781d9c724f5a9eb16120 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/images/f8c01d27d4a2e2889ddd669ad8e3f7405e8fe86a6ea0eff1ea0f08c3ec72955f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:029f622bd6558598b453943fd5debd8a69032d7852f5e3f8db433043cdf68c48 +size 5570 diff --git a/data/2025/2504_04xxx/2504.04416/layout.json b/data/2025/2504_04xxx/2504.04416/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2157092ab950e17a7ec2db317bb8911bcc905794 --- /dev/null +++ b/data/2025/2504_04xxx/2504.04416/layout.json @@ -0,0 +1,40022 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 177, + 69, + 434, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 69, + 434, + 87 + ], + "spans": [ + { + "bbox": [ + 177, + 69, + 434, + 87 + ], + "type": "text", + "content": "SIGACT News Complexity Theory Column" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 128, + 91, + 482, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 91, + 482, + 110 + ], + "spans": [ + { + "bbox": [ + 128, + 91, + 482, + 110 + ], + "type": "text", + "content": "Meta-Mathematics of Computational Complexity Theory" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 255, + 118, + 353, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 118, + 353, + 134 + ], + "spans": [ + { + "bbox": [ + 255, + 118, + 353, + 134 + ], + "type": "text", + "content": "Igor C. Oliveira1" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 271, + 141, + 339, + 223 + ], + "blocks": [ + { + "bbox": [ + 271, + 141, + 339, + 223 + ], + "lines": [ + { + "bbox": [ + 271, + 141, + 339, + 223 + ], + "spans": [ + { + "bbox": [ + 271, + 141, + 339, + 223 + ], + "type": "image", + "image_path": "9070a982ca734a985fe9029753fd5664fa385b28312c9e502177b89de18d5c68.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 285, + 234, + 326, + 245 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 234, + 326, + 245 + ], + "spans": [ + { + "bbox": [ + 285, + 234, + 326, + 245 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 95, + 253, + 514, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 253, + 514, + 302 + ], + "spans": [ + { + "bbox": [ + 95, + 253, + 514, + 302 + ], + "type": "text", + "content": "We survey results on the formalization and independence of mathematical statements related to major open problems in computational complexity theory. Our primary focus is on recent findings concerning the (un)provability of complexity bounds within theories of bounded arithmetic. This includes the techniques employed and related open problems, such as the (non)existence of a feasible proof that " + }, + { + "bbox": [ + 95, + 253, + 514, + 302 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 95, + 253, + 514, + 302 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 312, + 128, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 312, + 128, + 326 + ], + "spans": [ + { + "bbox": [ + 69, + 312, + 128, + 326 + ], + "type": "text", + "content": "Contents" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 337, + 541, + 369 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 71, + 337, + 541, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 337, + 541, + 348 + ], + "spans": [ + { + "bbox": [ + 71, + 337, + 541, + 348 + ], + "type": "text", + "content": "1 Introduction 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 358, + 541, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 358, + 541, + 369 + ], + "spans": [ + { + "bbox": [ + 70, + 358, + 541, + 369 + ], + "type": "text", + "content": "2 Preliminaries 3" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 371, + 541, + 394 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 85, + 371, + 541, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 371, + 541, + 383 + ], + "spans": [ + { + "bbox": [ + 85, + 371, + 541, + 383 + ], + "type": "text", + "content": "2.1 Complexity Theory 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 85, + 384, + 541, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 384, + 541, + 394 + ], + "spans": [ + { + "bbox": [ + 85, + 384, + 541, + 394 + ], + "type": "text", + "content": "2.2 Theories of Bounded Arithmetic 3" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 107, + 395, + 541, + 430 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 395, + 541, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 395, + 541, + 406 + ], + "spans": [ + { + "bbox": [ + 107, + 395, + 541, + 406 + ], + "type": "text", + "content": "2.2.1 " + }, + { + "bbox": [ + 107, + 395, + 541, + 406 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 107, + 395, + 541, + 406 + ], + "type": "text", + "content": " 4" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 407, + 541, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 407, + 541, + 418 + ], + "spans": [ + { + "bbox": [ + 107, + 407, + 541, + 418 + ], + "type": "text", + "content": "2.2.2 " + }, + { + "bbox": [ + 107, + 407, + 541, + 418 + ], + "type": "inline_equation", + "content": "\\mathbf{S}_2^1,\\mathbf{T}_2^1" + }, + { + "bbox": [ + 107, + 407, + 541, + 418 + ], + "type": "text", + "content": " , and Beyond 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 418, + 541, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 418, + 541, + 430 + ], + "spans": [ + { + "bbox": [ + 107, + 418, + 541, + 430 + ], + "type": "text", + "content": "2.2.3 " + }, + { + "bbox": [ + 107, + 418, + 541, + 430 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1" + }, + { + "bbox": [ + 107, + 418, + 541, + 430 + ], + "type": "text", + "content": " 6" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 441, + 541, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 441, + 541, + 452 + ], + "spans": [ + { + "bbox": [ + 70, + 441, + 541, + 452 + ], + "type": "text", + "content": "3 Auxiliary Definitions and Results 6" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 85, + 453, + 541, + 487 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 85, + 453, + 541, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 453, + 541, + 464 + ], + "spans": [ + { + "bbox": [ + 85, + 453, + 541, + 464 + ], + "type": "text", + "content": "3.1 Witnessing Theorems 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 85, + 465, + 541, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 465, + 541, + 475 + ], + "spans": [ + { + "bbox": [ + 85, + 465, + 541, + 475 + ], + "type": "text", + "content": "3.2 Bounded Arithmetic and Propositional Proofs 7" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 85, + 476, + 541, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 476, + 541, + 487 + ], + "spans": [ + { + "bbox": [ + 85, + 476, + 541, + 487 + ], + "type": "text", + "content": "3.3 Cuts of Models of Bounded Arithmetic 8" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 498, + 541, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 498, + 541, + 510 + ], + "spans": [ + { + "bbox": [ + 70, + 498, + 541, + 510 + ], + "type": "text", + "content": "4 The Strength of Bounded Arithmetic 9" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 85, + 511, + 541, + 534 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 85, + 511, + 541, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 511, + 541, + 521 + ], + "spans": [ + { + "bbox": [ + 85, + 511, + 541, + 521 + ], + "type": "text", + "content": "4.1 Formalization of Results from Algorithms and Complexity 9" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 85, + 522, + 541, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 522, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 85, + 522, + 541, + 534 + ], + "type": "text", + "content": "4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in " + }, + { + "bbox": [ + 85, + 522, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 85, + 522, + 541, + 534 + ], + "type": "text", + "content": " 10" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 544, + 541, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 544, + 541, + 555 + ], + "spans": [ + { + "bbox": [ + 70, + 544, + 541, + 555 + ], + "type": "text", + "content": "5 Unprovability of Complexity Bounds 14" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 85, + 557, + 541, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 557, + 541, + 567 + ], + "spans": [ + { + "bbox": [ + 85, + 557, + 541, + 567 + ], + "type": "text", + "content": "5.1 Unprovability of Upper Bounds 14" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 107, + 569, + 541, + 592 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 107, + 569, + 541, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 569, + 541, + 579 + ], + "spans": [ + { + "bbox": [ + 107, + 569, + 541, + 579 + ], + "type": "text", + "content": "5.1.1 LEARN-Uniform Circuits and Unprovability 14" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 107, + 580, + 541, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 580, + 541, + 592 + ], + "spans": [ + { + "bbox": [ + 107, + 580, + 541, + 592 + ], + "type": "text", + "content": "5.1.2 " + }, + { + "bbox": [ + 107, + 580, + 541, + 592 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 107, + 580, + 541, + 592 + ], + "type": "text", + "content": " and Propositional Proof Complexity 17" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 593, + 541, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 593, + 541, + 603 + ], + "spans": [ + { + "bbox": [ + 85, + 593, + 541, + 603 + ], + "type": "text", + "content": "5.2 Unprovability of Lower Bounds 18" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 107, + 605, + 541, + 627 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 107, + 605, + 541, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 605, + 541, + 615 + ], + "spans": [ + { + "bbox": [ + 107, + 605, + 541, + 615 + ], + "type": "text", + "content": "5.2.1 Average-Case Circuit Lower Bounds 18" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 107, + 616, + 541, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 616, + 541, + 627 + ], + "spans": [ + { + "bbox": [ + 107, + 616, + 541, + 627 + ], + "type": "text", + "content": "5.2.2 Extended Frege Lower Bounds 21" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 628, + 541, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 628, + 541, + 639 + ], + "spans": [ + { + "bbox": [ + 85, + 628, + 541, + 639 + ], + "type": "text", + "content": "5.3 Connection Between Upper Bounds and Lower Bounds 22" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 70, + 650, + 541, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 650, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 70, + 650, + 541, + 662 + ], + "type": "text", + "content": "6 Additional Recent Developments 23" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.04416v1 [cs.CC] 6 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 670, + 490, + 681 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 670, + 490, + 681 + ], + "spans": [ + { + "bbox": [ + 83, + 670, + 490, + 681 + ], + "type": "text", + "content": "1Department of Computer Science, University of Warwick, UK. Email: igor.oliveira@warwick.ac.uk." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 39 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 70, + 173, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 70, + 173, + 86 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 173, + 86 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 97, + 541, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 97, + 541, + 178 + ], + "spans": [ + { + "bbox": [ + 67, + 97, + 541, + 178 + ], + "type": "text", + "content": "The investigation of the inherent complexity of computational tasks is a central research direction in theoretical computer science. While unconditional results are known in a variety of restricted contexts (i.e., with respect to weak models of computation), despite significant efforts, several central questions of the field remain wide open. Prominent examples include the relation between complexity classes P and NP, understanding the power of non-uniform Boolean circuits, and bounding the length of proofs in propositional proof systems such as Frege and extended Frege." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 178, + 541, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 178, + 541, + 287 + ], + "spans": [ + { + "bbox": [ + 67, + 178, + 541, + 287 + ], + "type": "text", + "content": "The investigation of the difficulty of settling these problems has long been an important and influential area of research by itself (e.g., barrier results such as [BGS75, RR97, AW09, " + }, + { + "bbox": [ + 67, + 178, + 541, + 287 + ], + "type": "inline_equation", + "content": "\\mathrm{CHO}^{+}22" + }, + { + "bbox": [ + 67, + 178, + 541, + 287 + ], + "type": "text", + "content": "]). Unfortunately, these results tend to be ad-hoc and do not consider a standard and robust notion of proof. In order to build a general theory, several works have considered provability in the usual sense of mathematical logic. Most importantly, this enables a deeper investigation of complexity theory that considers not only the running time of a program or the size of a circuit but also the feasibility of proving their existence and correctness. In particular, we can explore the fundamental question of what can and cannot be feasibly computed, along with the meta-question of what lower and upper bounds can and cannot be feasibly proven." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 288, + 267, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 288, + 267, + 300 + ], + "spans": [ + { + "bbox": [ + 86, + 288, + 267, + 300 + ], + "type": "text", + "content": "A fundamental goal of this research is to" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 308, + 541, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 541, + 350 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 541, + 350 + ], + "type": "inline_equation", + "content": "(\\star)" + }, + { + "bbox": [ + 67, + 308, + 541, + 350 + ], + "type": "text", + "content": " identify a suitable logical theory capable of formalizing most, if not all, known results in algorithms and complexity, and determine whether the major open problems mentioned above are provable or unprovable within this theory.2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "spans": [ + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "content": "Although we are still far from reaching this goal, progress has been made in understanding the (un)provability of statements concerning the complexity of computations within certain fragments of Peano Arithmetic, collectively known as Bounded Arithmetic. These theories are designed to capture proofs that manipulate and reason with concepts from a specified complexity class. For instance, a proof by induction whose inductive hypothesis can be expressed as an NP predicate is one such example. The earliest theory of this kind was " + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{I}\\Delta_0" + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "content": ", introduced by Parikh [Par71], who explored the intuitive concept of feasibility in arithmetic and addressed the infeasibility of exponentiation. The relationship between Parikh's theory and computational complexity was fully recognized and advanced by Paris and Wilkie in a series of influential papers during the 1980s (see [WP87]). Other significant theories include Cook's theory " + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "content": " [Coo75], which formalizes polynomial-time reasoning; Jerabek's theory " + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1" + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "content": " [Jer04, Jer05, Jer07], which extends " + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "content": " by incorporating the dual weak pigeonhole principle for polynomial-time functions and formalizes probabilistic polynomial-time reasoning; and Buss's theories " + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^i" + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^i" + }, + { + "bbox": [ + 67, + 357, + 541, + 533 + ], + "type": "text", + "content": " [Bus86], which include induction principles corresponding to various levels of the polynomial-time hierarchy." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "spans": [ + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "text", + "content": "These theories are capable of formalizing advanced results. For instance, it is known that " + }, + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "text", + "content": " can prove the PCP Theorem [Pic15b], while " + }, + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "inline_equation", + "content": "\\mathrm{APC}_1" + }, + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "text", + "content": " can establish several significant circuit lower bounds [MP20], including monotone circuit lower bounds for " + }, + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 533, + 541, + 601 + ], + "type": "text", + "content": "-Clique and bounded-depth circuit lower bounds for the Parity function. Further examples include the explicit construction of expander graphs [BKKK20] and the correctness of randomized polynomial-time matching algorithms [LC11], among many others." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 601, + 541, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 601, + 541, + 656 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 541, + 656 + ], + "type": "text", + "content": "Given the expressive power of these theories, even if we are not yet able to establish a breakthrough result of the magnitude of " + }, + { + "bbox": [ + 67, + 601, + 541, + 656 + ], + "type": "inline_equation", + "content": "(\\star)" + }, + { + "bbox": [ + 67, + 601, + 541, + 656 + ], + "type": "text", + "content": ", determining the (un)provability of complexity bounds of interest in theories of bounded arithmetic still represents significant progress towards our understanding of the power and limits of feasible computations and proofs. This survey aims to provide an introduction to some of these results," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 662, + 542, + 686 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 662, + 542, + 686 + ], + "spans": [ + { + "bbox": [ + 67, + 662, + 542, + 686 + ], + "type": "text", + "content": "As we elaborate in Section 5, the unprovability of a statement is equivalent to the consistency of its negation, which can be at least as important." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "text", + "content": "the underlying techniques, and related open problems. While our primary focus is on recent developments, in order to provide a broader perspective we also cover some classical results. Due to space limitations, the survey is not exhaustive, and several references had to be omitted (although some recent developments are mentioned in Section 6)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 146, + 177, + 160 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 146, + 177, + 160 + ], + "spans": [ + { + "bbox": [ + 69, + 146, + 177, + 160 + ], + "type": "text", + "content": "2 Preliminaries" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 173, + 198, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 173, + 198, + 186 + ], + "spans": [ + { + "bbox": [ + 69, + 173, + 198, + 186 + ], + "type": "text", + "content": "2.1 Complexity Theory" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 194, + 541, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 194, + 541, + 247 + ], + "spans": [ + { + "bbox": [ + 67, + 194, + 541, + 247 + ], + "type": "text", + "content": "We will rely on a few additional standard definitions from complexity theory, such as basic complexity classes, Boolean circuits and formulas, and propositional proof systems. These can be found in textbooks such as [AB09] and [Kra19]. Below we only establish notation and review a classical result that offers a convenient way to talk about polynomial-time computations in some logical theories." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 248, + 479, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 248, + 479, + 262 + ], + "spans": [ + { + "bbox": [ + 85, + 248, + 479, + 262 + ], + "type": "text", + "content": "We use " + }, + { + "bbox": [ + 85, + 248, + 479, + 262 + ], + "type": "inline_equation", + "content": "\\mathsf{SIZE}[s]" + }, + { + "bbox": [ + 85, + 248, + 479, + 262 + ], + "type": "text", + "content": " to denote the set of languages computed by Boolean circuits of size " + }, + { + "bbox": [ + 85, + 248, + 479, + 262 + ], + "type": "inline_equation", + "content": "s(n)" + }, + { + "bbox": [ + 85, + 248, + 479, + 262 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": "In theoretical computer science, one typically considers functions and predicates that operate over binary strings. This is equivalent to operations on integers, by identifying each non-negative integer with its binary representation. Let " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": " denote the set of non-negative integers. For " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "a\\in \\mathbb{N}" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": ", we let " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "|a|\\triangleq \\lceil \\log_2(a + 1)\\rceil" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": " denote the length of the binary representation of " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": ". For a constant " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "k\\geq 1" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": ", we say that a function " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "f\\colon \\mathbb{N}^k\\to \\mathbb{N}" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": " is computable in polynomial time if " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "f(x_{1},\\ldots ,x_{k})" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": " can be computed in time polynomial in " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "|x_{1}|,\\ldots ,|x_{k}|" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": ". (For convenience, we might write " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "|\\vec{x} |\\triangleq |x_1|,\\dots ,|x_k|." + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": ") Recall that FP denotes the set of polynomial time functions. While the definition of polynomial time refers to a machine model, FP can also be introduced in a machine independent way as the closure of a set of base functions under composition and limited recursion on notation. In more detail, we can consider the following class " + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 67, + 262, + 541, + 384 + ], + "type": "text", + "content": " of base functions:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 393, + 530, + 453 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 393, + 530, + 453 + ], + "spans": [ + { + "bbox": [ + 79, + 393, + 530, + 453 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} c (x) \\triangleq 0, \\quad s (x) \\triangleq x + 1, \\quad a (x) \\triangleq \\lfloor x / 2 \\rfloor , \\quad d (x) \\triangleq 2 \\cdot x, \\quad \\pi_ {\\ell} ^ {i} (x _ {1}, \\ldots , x _ {\\ell}) \\triangleq x _ {i}, \\quad x \\# y \\triangleq 2 ^ {| x | \\cdot | y |}, \\\\ x \\leq y \\triangleq \\left\\{ \\begin{array}{l l} 1 & \\text {i f} x \\leq y \\\\ 0 & \\text {o t h e r w i s e ,} \\end{array} \\right. \\quad \\text {C h o i c e} (x, y, z) \\triangleq \\left\\{ \\begin{array}{l l} y & \\text {i f} x > 0 \\\\ z & \\text {o t h e r w i s e .} \\end{array} \\right. \\\\ \\end{array}", + "image_path": "235d6586c0ce6341c4f21d8c4228405d2b57882e2c57d5575f5f71bd8ed9efa9.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "spans": [ + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "text", + "content": "We say that a function " + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "inline_equation", + "content": "f(\\vec{x},y)" + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "text", + "content": " is defined from functions " + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "inline_equation", + "content": "g(\\vec{x})" + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "inline_equation", + "content": "h(\\vec{x},y,z)" + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "inline_equation", + "content": "k(\\vec{x},y)" + }, + { + "bbox": [ + 67, + 462, + 539, + 487 + ], + "type": "text", + "content": " by limited recursion on notation if" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 230, + 500, + 312, + 513 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 500, + 312, + 513 + ], + "spans": [ + { + "bbox": [ + 230, + 500, + 312, + 513 + ], + "type": "interline_equation", + "content": "f (\\vec {x}, 0) = g (\\vec {x})", + "image_path": "9681b22a56be492cc7b99827f95021b06a5aa66bc3d1a10fdc5984ba903abee6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 230, + 517, + 380, + 530 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 517, + 380, + 530 + ], + "spans": [ + { + "bbox": [ + 230, + 517, + 380, + 530 + ], + "type": "interline_equation", + "content": "f (\\vec {x}, y) = h (\\vec {x}, y, f (\\vec {x}, \\lfloor y / 2 \\rfloor))", + "image_path": "b0a2eeb12deb5de8850dde1f39f87edcad7f4345aa159c0e8f18fb0e71060441.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 230, + 533, + 322, + 546 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 533, + 322, + 546 + ], + "spans": [ + { + "bbox": [ + 230, + 533, + 322, + 546 + ], + "type": "interline_equation", + "content": "f (\\vec {x}, y) \\leq k (\\vec {x}, y)", + "image_path": "267e5f08e0159c02b9d2a45e85a5b30719fb24bc8fbcb5fa0bcb3f348900839a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 557, + 539, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 557, + 539, + 583 + ], + "spans": [ + { + "bbox": [ + 67, + 557, + 539, + 583 + ], + "type": "text", + "content": "for every sequence " + }, + { + "bbox": [ + 67, + 557, + 539, + 583 + ], + "type": "inline_equation", + "content": "(\\vec{x},y)" + }, + { + "bbox": [ + 67, + 557, + 539, + 583 + ], + "type": "text", + "content": " of natural numbers. Cobham [Cob65] proved that FP is the least class of functions that contains " + }, + { + "bbox": [ + 67, + 557, + 539, + 583 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 67, + 557, + 539, + 583 + ], + "type": "text", + "content": " and is closed under composition and limited recursion on notation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 600, + 264, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 600, + 264, + 613 + ], + "spans": [ + { + "bbox": [ + 69, + 600, + 264, + 613 + ], + "type": "text", + "content": "2.2 Theories of Bounded Arithmetic" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "spans": [ + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "content": "Bounded arithmetic has a long and rich history (see [Bus97] for an introduction, and [HP93, Kra95, CN10] for a detailed treatment). The correspondence between the theories and complexity classes manifests in multiple ways. For instance, witnessing results show that every provably total function in a given theory " + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_{\\mathcal{C}}" + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "content": " (i.e., when " + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "inline_equation", + "content": "\\forall x \\exists!y \\psi(x,y)" + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "content": " is provable, for certain formulas " + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "content": ") is computable within the corresponding complexity class " + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "content": " (i.e., the function " + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "inline_equation", + "content": "y = f(x)" + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "content": " is in " + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "inline_equation", + "content": "\\mathcal{C}" + }, + { + "bbox": [ + 67, + 622, + 539, + 689 + ], + "type": "text", + "content": "). There is also a close connection between" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "content": "theories of bounded arithmetic and propositional proof systems, e.g., propositional translations between proofs of certain sentences in " + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "content": " and polynomial-size proofs in the extended Frege proof system of the corresponding propositional formulas. We review some related results in Section 3.1 and Section 3.2, respectively. In this section, we provide an overview of some widely investigated theories of bounded arithmetic: " + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^1" + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1" + }, + { + "bbox": [ + 67, + 72, + 543, + 156 + ], + "type": "text", + "content": ". We assume basic familiarity with first-order logic. Results claimed below without reference can be found in [Kra95]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 170, + 124, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 170, + 124, + 183 + ], + "spans": [ + { + "bbox": [ + 69, + 170, + 124, + 183 + ], + "type": "text", + "content": "2.2.1 PV" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "content": " [Coo75] (see also [KPT91]) is a first-order theory whose intended model is the set " + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "content": " of natural numbers, together with the standard interpretation for constants and functions symbols such as " + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "inline_equation", + "content": "0, +, \\times, \\text{etc.}" + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "content": ". The vocabulary (language) of " + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "content": ", denoted " + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}_1}" + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "content": ", contains a function symbol for each polynomial-time algorithm " + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "inline_equation", + "content": "f: \\mathbb{N}^k \\to \\mathbb{N}" + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 190, + 541, + 258 + ], + "type": "text", + "content": " is any constant). These function symbols, and the axioms defining them, are obtained through Cobham's characterization of polynomial-time functions discussed in Section 2.1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 258, + 541, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 258, + 541, + 298 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 541, + 298 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 258, + 541, + 298 + ], + "type": "text", + "content": " also postulates an induction axiom scheme that simulates binary search, and one can show that it admits induction over quantifier-free formulas (i.e., polynomial-time predicates). We discuss induction axioms in more detail in Section 2.2.2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "spans": [ + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "text", + "content": "We will use later in the text that " + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "text", + "content": " admits a formulation where all axioms are universal formulas (i.e., " + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\forall \\vec{x}\\phi (\\vec{x})" + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "text", + "content": " is a quantifier-free formula). In other words, " + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 299, + 541, + 326 + ], + "type": "text", + "content": " is a universal theory." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 326, + 541, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 326, + 541, + 367 + ], + "spans": [ + { + "bbox": [ + 67, + 326, + 541, + 367 + ], + "type": "text", + "content": "While the details of the definition of " + }, + { + "bbox": [ + 67, + 326, + 541, + 367 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 326, + 541, + 367 + ], + "type": "text", + "content": " are fairly technical (see, e.g., the longer overview in [CLO24b] or the exposition in [Kra95]), such details are often not needed. In particular, " + }, + { + "bbox": [ + 67, + 326, + 541, + 367 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 326, + 541, + 367 + ], + "type": "text", + "content": " has an equivalent formalization that does not require Cobham's result [Jef06]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 381, + 195, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 381, + 195, + 396 + ], + "spans": [ + { + "bbox": [ + 69, + 381, + 195, + 396 + ], + "type": "text", + "content": "2.2.2 " + }, + { + "bbox": [ + 69, + 381, + 195, + 396 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1,\\mathsf{T}_2^1" + }, + { + "bbox": [ + 69, + 381, + 195, + 396 + ], + "type": "text", + "content": " , and Beyond" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 403, + 541, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 403, + 541, + 443 + ], + "spans": [ + { + "bbox": [ + 67, + 403, + 541, + 443 + ], + "type": "text", + "content": "While " + }, + { + "bbox": [ + 67, + 403, + 541, + 443 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 403, + 541, + 443 + ], + "type": "text", + "content": " can be related to polynomial-time computations and feasible proofs, Buss [Bus86] introduced a hierarchy of theories with close ties to the different levels of the polynomial hierarchy. To specify the theories, we will need a few definitions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "spans": [ + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": "The language " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{B}}" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": " of these theories contains the predicate symbols " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\leq" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": ", the constant symbols 0 and 1, and function symbols " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": " (successor), " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "+" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\lfloor x / 2 \\rfloor" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "|x|" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": " (interpreted as the length of " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": " as in Section 2.1), and " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "\\#" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": " (\"smash\"; interpreted as " + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "inline_equation", + "content": "x \\# y = 2^{|x| \\cdot |y|}" + }, + { + "bbox": [ + 68, + 445, + 541, + 485 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "spans": [ + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "text", + "content": "A bounded quantifier is a quantifier of the form " + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "inline_equation", + "content": "Qy \\leq t" + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "inline_equation", + "content": "Q \\in \\{\\exists, \\forall\\}" + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "text", + "content": " is a term not involving " + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "text", + "content": ". Similarly, a sharply bounded quantifier is one of the form " + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "inline_equation", + "content": "Qy \\leq |t|" + }, + { + "bbox": [ + 67, + 485, + 541, + 525 + ], + "type": "text", + "content": ". Formally, such quantifiers are simply abbreviations. For instance," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 179, + 536, + 429, + 551 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 536, + 429, + 551 + ], + "spans": [ + { + "bbox": [ + 179, + 536, + 429, + 551 + ], + "type": "interline_equation", + "content": "\\forall y \\leq t (\\vec {x}) \\varphi (\\vec {x}, y) \\triangleq \\forall y (y \\leq t (\\vec {x}) \\rightarrow \\varphi (\\vec {x}, y)), a n d", + "image_path": "8736034b0bdaf59f539e97d72f6d54ac9a8a29fe6f511b800a01aafd1ae829b2.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 181, + 552, + 403, + 567 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 552, + 403, + 567 + ], + "spans": [ + { + "bbox": [ + 181, + 552, + 403, + 567 + ], + "type": "interline_equation", + "content": "\\exists y \\leq t (\\vec {x}) \\varphi (\\vec {x}, y) \\triangleq \\exists y (y \\leq t (\\vec {x}) \\wedge \\varphi (\\vec {x}, y)).", + "image_path": "b6eb4a1d2800300276e2151ee5678efe9ec3eeeb215dd857f8f025d396b05673.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "type": "text", + "content": "A formula where each quantifier appears bounded (resp., sharply bounded) is said to be a bounded (resp., sharply bounded) formula. It is not hard to show that every sharply bounded formula defines a polynomial-time predicate over the standard model " + }, + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 67, + 578, + 541, + 632 + ], + "type": "text", + "content": " under its usual operations. On the other hand, bounded quantifiers allow us to define predicates in NP, coNP, and beyond." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": "We can introduce a hierarchy of formulas by counting alternations of bounded quantifiers. The class " + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "inline_equation", + "content": "\\Pi_0^b = \\Sigma_0^b" + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": " contains the sharply bounded formulas. We then recursively define, for each " + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "inline_equation", + "content": "i\\geq 1" + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": ", the classes " + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "inline_equation", + "content": "\\Sigma_i^b" + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "inline_equation", + "content": "\\Pi_{i}^{b}" + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": " according to the quantifier structure of the sentence, ignoring the appearance of sharply bounded quantifiers. For instance, if " + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "inline_equation", + "content": "\\varphi \\in \\Sigma_0^b" + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "inline_equation", + "content": "\\psi \\triangleq \\exists y\\leq t(\\vec{x})\\varphi (y,\\vec{x})" + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "inline_equation", + "content": "\\psi \\in \\Sigma_1^b" + }, + { + "bbox": [ + 68, + 632, + 541, + 688 + ], + "type": "text", + "content": " (see, e.g., [Kra95] for the" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "content": "technical details in the general case). As alluded to above, it is known that, for each " + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "inline_equation", + "content": "i \\geq 1" + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "content": ", a predicate " + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "inline_equation", + "content": "P(\\vec{x})" + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "content": " is in " + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "inline_equation", + "content": "\\Sigma_i^p" + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "content": " (the " + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "content": "-th level of the polynomial hierarchy) if and only if there is a " + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "inline_equation", + "content": "\\Sigma_i^b" + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "content": "-formula that agrees with it over " + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 67, + 72, + 539, + 110 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 113, + 539, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 113, + 539, + 167 + ], + "spans": [ + { + "bbox": [ + 68, + 113, + 539, + 167 + ], + "type": "text", + "content": "The theories introduced by Buss share a common set BASIC of finitely many axioms postulating the expected arithmetic behavior of the constants, predicates, and function symbols, e.g., " + }, + { + "bbox": [ + 68, + 113, + 539, + 167 + ], + "type": "inline_equation", + "content": "x + y = y + x" + }, + { + "bbox": [ + 68, + 113, + 539, + 167 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 113, + 539, + 167 + ], + "type": "inline_equation", + "content": "|1| = 1" + }, + { + "bbox": [ + 68, + 113, + 539, + 167 + ], + "type": "text", + "content": " (see, e.g., [Kra95, Page 68] for the complete list). The only difference among the theories is the kind of induction axiom scheme that each of them postulates." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 182, + 493, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 182, + 493, + 196 + ], + "spans": [ + { + "bbox": [ + 68, + 182, + 493, + 196 + ], + "type": "text", + "content": "Theory " + }, + { + "bbox": [ + 68, + 182, + 493, + 196 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^1" + }, + { + "bbox": [ + 68, + 182, + 493, + 196 + ], + "type": "text", + "content": ". This is a theory in the language " + }, + { + "bbox": [ + 68, + 182, + 493, + 196 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathbb{B}}" + }, + { + "bbox": [ + 68, + 182, + 493, + 196 + ], + "type": "text", + "content": " extending BASIC by the induction axiom IND" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 207, + 205, + 402, + 220 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 205, + 402, + 220 + ], + "spans": [ + { + "bbox": [ + 207, + 205, + 402, + 220 + ], + "type": "interline_equation", + "content": "\\varphi (0) \\wedge \\forall x (\\varphi (x) \\rightarrow \\varphi (x + 1)) \\rightarrow \\forall x \\varphi (x)", + "image_path": "729decb105fb40164b936bb66a56656c3f63060a5e881226effea4eccac1ac9b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "spans": [ + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "inline_equation", + "content": "\\Sigma_1^b" + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "text", + "content": "-formulas " + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "inline_equation", + "content": "\\varphi(a)" + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "text", + "content": ". The formula " + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "inline_equation", + "content": "\\varphi(a)" + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "text", + "content": " may contain other free variables in addition to " + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 68, + 228, + 480, + 243 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "spans": [ + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "text", + "content": "We say that " + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^1" + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "text", + "content": " supports induction for NP predicates. Intuitively, this means that we can aim to prove a result in " + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^1" + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "text", + "content": " by induction, provided the induction hypothesis is defined by a predicate computable in NP. This definition can be extended to a theory that postulates induction for " + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "inline_equation", + "content": "\\Sigma_i^b" + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "text", + "content": "-formulas, which gives rise to the theory " + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^i" + }, + { + "bbox": [ + 67, + 255, + 539, + 310 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 325, + 539, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 325, + 539, + 350 + ], + "spans": [ + { + "bbox": [ + 67, + 325, + 539, + 350 + ], + "type": "text", + "content": "Theory " + }, + { + "bbox": [ + 67, + 325, + 539, + 350 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 67, + 325, + 539, + 350 + ], + "type": "text", + "content": ". This is a theory in the language " + }, + { + "bbox": [ + 67, + 325, + 539, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{B}}" + }, + { + "bbox": [ + 67, + 325, + 539, + 350 + ], + "type": "text", + "content": " extending BASIC by the polynomial induction axiom PIND" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 206, + 352, + 403, + 367 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 206, + 352, + 403, + 367 + ], + "spans": [ + { + "bbox": [ + 206, + 352, + 403, + 367 + ], + "type": "interline_equation", + "content": "\\varphi (0) \\wedge \\forall x (\\varphi (\\lfloor x / 2 \\rfloor) \\rightarrow \\varphi (x)) \\rightarrow \\forall x \\varphi (x)", + "image_path": "a3f7f1062be71f63c4bae1d01e8f8738c6ae6508294074041d1049150ddf8ed7.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "spans": [ + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "text", + "content": "for all " + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "inline_equation", + "content": "\\Sigma_1^b" + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "text", + "content": "-formulas " + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "inline_equation", + "content": "\\varphi(a)" + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "text", + "content": ". The formula " + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "inline_equation", + "content": "\\varphi(a)" + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "text", + "content": " may contain other free variables in addition to " + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 68, + 371, + 479, + 385 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "spans": [ + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": "Intuitively, polynomial induction reduces the proof of " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\varphi(x)" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": " to proving " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\varphi(\\lfloor x/2 \\rfloor)" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": ". Unlike the standard induction axiom, this approach allows us to reach the base case in just " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\mathrm{poly}(n)" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": " steps when starting with an integer " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": " represented by " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\mathrm{poly}(n)" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": " bits. This has implications for the efficiency of translating certain proofs in " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": " into sequences of propositional proofs and for the extraction of polynomial-time algorithms from proofs (see Section 3.1 and Section 3.2). Analogously to " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^i" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": ", we can define the theories " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^i" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": " via polynomial induction for " + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "inline_equation", + "content": "\\Sigma_i^b" + }, + { + "bbox": [ + 67, + 399, + 539, + 479 + ], + "type": "text", + "content": "-formulas." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "spans": [ + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "text", + "content": "It is known that " + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "text", + "content": " is essentially equivalent to " + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^0" + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "text", + "content": " under an appropriate vocabulary and axioms [Jer'06], and that " + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^i \\subseteq \\mathsf{T}_2^i \\subseteq \\mathsf{S}_2^{i+1}" + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "text", + "content": " for every " + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "inline_equation", + "content": "i \\geq 1" + }, + { + "bbox": [ + 68, + 480, + 538, + 506 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "spans": [ + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": "When stating and proving results in " + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": ", it is convenient to employ a more expressive vocabulary under which any polynomial-time function can be easily described. Moreover, it is possible to achieve this in a conservative way, i.e., without increasing the power of the theory. In more detail, let " + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": " be a set of " + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{B}}" + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": "-formulas. We say that a polynomial-time function " + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "inline_equation", + "content": "f\\colon \\mathbb{N}^k\\to \\mathbb{N}" + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": "-definable in " + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": " if there is a formula " + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "inline_equation", + "content": "\\psi (\\vec{x},y)\\in \\Gamma" + }, + { + "bbox": [ + 68, + 508, + 539, + 575 + ], + "type": "text", + "content": " for which the following conditions hold:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 77, + 582, + 340, + 618 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "spans": [ + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "text", + "content": "(i) For every " + }, + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "inline_equation", + "content": "a \\in \\mathbb{N}^k" + }, + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "inline_equation", + "content": "f(\\vec{a}) = b" + }, + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "inline_equation", + "content": "\\mathbb{N} \\models \\varphi(\\vec{a}, b)" + }, + { + "bbox": [ + 80, + 582, + 340, + 597 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 77, + 603, + 310, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 603, + 310, + 618 + ], + "spans": [ + { + "bbox": [ + 77, + 603, + 310, + 618 + ], + "type": "text", + "content": "(ii) " + }, + { + "bbox": [ + 77, + 603, + 310, + 618 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1\\vdash \\forall \\vec{x}\\left(\\exists y\\left(\\varphi (\\vec{x},y)\\land \\forall z\\left(\\varphi (\\vec{x},z)\\to y = z\\right)\\right). \\right." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "spans": [ + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": "Every function " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "f \\in \\mathsf{FP}" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\Sigma_1^b" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": "-definable in " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": ". By adding all functions in " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\mathsf{FP}" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " to the vocabulary of " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " and by extending " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " with their defining axioms (i.e., " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\forall x \\varphi(x, f(x))" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": "), we obtain a theory " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " that can refer to polynomial-time predicates using quantifier-free formulas. " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " proves the polynomial induction scheme for both " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\Sigma_1^b" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": "-formulas and " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\Pi_1^b" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": "-formulas in the extended vocabulary. " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " is conservative over " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": ", in the sense that any " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{B}}" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": "-sentence provable in " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": " is also provable in " + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 67, + 624, + 539, + 693 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": "A " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "\\forall \\Sigma_{i}^{b}" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": "-sentence is simply a sentence " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "\\psi = \\forall \\vec{x} \\varphi(\\vec{x})" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "\\varphi \\in \\Sigma_{i}^{b}" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": ". Every " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "\\forall \\Sigma_{1}^{b}" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": "-sentence provable in " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": " is also provable in " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": ". In other words, " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "\\forall \\Sigma_{1}^{b}" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": "-conservative over " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": ". On the other hand, it is known that if " + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "inline_equation", + "content": "S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}}) = \\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 539, + 114 + ], + "type": "text", + "content": ", then the polynomial-time hierarchy collapses." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 129, + 130, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 129, + 130, + 141 + ], + "spans": [ + { + "bbox": [ + 69, + 129, + 130, + 141 + ], + "type": "text", + "content": "2.2.3 APC" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "spans": [ + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "text", + "content": "In order to formalize probabilistic methods and randomized algorithms, Jeřábek [Jeř04, Jeř05, Jeř07] formulated the theory " + }, + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1" + }, + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "text", + "content": " (this terminology is from [BKT14]) by extending " + }, + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "text", + "content": " with the dual Weak Pigeonhole Principle (dWPHP) for " + }, + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 150, + 541, + 191 + ], + "type": "text", + "content": " functions:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 209, + 201, + 400, + 216 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 201, + 400, + 216 + ], + "spans": [ + { + "bbox": [ + 209, + 201, + 400, + 216 + ], + "type": "interline_equation", + "content": "\\mathsf {A P C} _ {1} \\triangleq \\mathsf {P V} _ {1} \\cup \\{\\mathsf {d W P H P} (f) \\mid f \\in \\mathcal {L} _ {\\mathsf {P V}} \\}.", + "image_path": "f8c01d27d4a2e2889ddd669ad8e3f7405e8fe86a6ea0eff1ea0f08c3ec72955f.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "text", + "content": "Informally, each sentence " + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "inline_equation", + "content": "\\mathrm{dWPHP}(f)" + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "text", + "content": " postulates that, for every length " + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "inline_equation", + "content": "n = |N|" + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "text", + "content": ", there is " + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "inline_equation", + "content": "y < (1 + 1/n) \\cdot N" + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "inline_equation", + "content": "f(x) \\neq y" + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "text", + "content": " for every " + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "inline_equation", + "content": "x < N" + }, + { + "bbox": [ + 67, + 226, + 541, + 252 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 253, + 541, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 253, + 541, + 281 + ], + "spans": [ + { + "bbox": [ + 68, + 253, + 541, + 281 + ], + "type": "text", + "content": "It is known that the dual Weak Pigeonhole Principle for polynomial-time predicates can be proved in " + }, + { + "bbox": [ + 68, + 253, + 541, + 281 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^2" + }, + { + "bbox": [ + 68, + 253, + 541, + 281 + ], + "type": "text", + "content": " [MPW02], and consequently " + }, + { + "bbox": [ + 68, + 253, + 541, + 281 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1 \\subseteq \\mathsf{T}_2^2(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 253, + 541, + 281 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 300, + 297, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 300, + 297, + 316 + ], + "spans": [ + { + "bbox": [ + 68, + 300, + 297, + 316 + ], + "type": "text", + "content": "3 Auxiliary Definitions and Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 327, + 211, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 211, + 341 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 211, + 341 + ], + "type": "text", + "content": "3.1 Witnessing Theorems" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "spans": [ + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "content": "Suppose a sentence " + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "content": " of a certain syntactic form admits a proof in a theory " + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "content": " over a vocabulary " + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "content": ". A witnessing theorem allows us to extract computational information from any such proof, by showing that an existential quantifier in " + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "content": " can be witnessed by " + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 348, + 541, + 389 + ], + "type": "text", + "content": "-terms. The simplest example of such a result is stated next." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "spans": [ + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": "Theorem 3.1 (Herbrand's Theorem (see, e.g., [Bus94, McK10])). Let " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": " be a universal theory over a vocabulary " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "\\varphi(x,y)" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": " be a quantifier-free " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": "-formula, and suppose that " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "T \\vdash \\forall x \\exists y \\varphi(x,y)" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": ". There is a constant " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "k \\geq 1" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": "-terms " + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "inline_equation", + "content": "t_1(x),\\ldots ,t_k(x)" + }, + { + "bbox": [ + 67, + 398, + 541, + 439 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 189, + 449, + 419, + 464 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 449, + 419, + 464 + ], + "spans": [ + { + "bbox": [ + 189, + 449, + 419, + 464 + ], + "type": "interline_equation", + "content": "T \\vdash \\varphi (x, t _ {1} (x)) \\lor \\varphi (x, t _ {2} (x)) \\lor \\dots \\lor \\varphi (x, t _ {k} (x)).", + "image_path": "c2770011c91d0f3fb1fef3645a44f5372fec17fd3463618290bb484c84ce9f32.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "spans": [ + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "content": "As an immediate consequence, if we apply Theorem 3.1 to " + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "inline_equation", + "content": "T \\triangleq \\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "content": ", we obtain " + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "content": "-terms (corresponding to polynomial-time functions over " + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "content": ") such that, given " + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "inline_equation", + "content": "a \\in \\mathbb{N}" + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "content": ", at least one of them produces a witness " + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "inline_equation", + "content": "b \\in \\mathbb{N}" + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "inline_equation", + "content": "\\mathbb{N} \\models \\varphi(a, b)" + }, + { + "bbox": [ + 67, + 474, + 541, + 514 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 85, + 515, + 451, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 515, + 451, + 529 + ], + "spans": [ + { + "bbox": [ + 85, + 515, + 451, + 529 + ], + "type": "text", + "content": "Next, we consider the provability of more complex sentences in a universal theory." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "spans": [ + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": "Theorem 3.2 (KPT Theorem [KPT91]). Let " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": " be a universal theory with vocabulary " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "\\varphi(w, u, v)" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": " be a quantifier-free " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": "-formula, and suppose that " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "T \\vdash \\forall w \\exists u \\forall v \\varphi(w, u, v)" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": ". Then there exist a constant " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "k \\geq 1" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": "-terms " + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "inline_equation", + "content": "t_1, \\ldots, t_k" + }, + { + "bbox": [ + 67, + 537, + 541, + 578 + ], + "type": "text", + "content": " such that" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 588, + 487, + 603 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 588, + 487, + 603 + ], + "spans": [ + { + "bbox": [ + 121, + 588, + 487, + 603 + ], + "type": "interline_equation", + "content": "T \\vdash \\varphi (w, t _ {1} (w), v _ {1}) \\vee \\varphi (w, t _ {2} (w, v _ {1}), v _ {2}) \\vee \\dots \\vee \\varphi (w, t _ {k} (w, v _ {1}, \\dots , v _ {k - 1}), v _ {k}),", + "image_path": "27e37c0bfa938324d20725c153383fe2e008ba8d7c523465b4cde0d3211f1a38.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 613, + 493, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 613, + 493, + 627 + ], + "spans": [ + { + "bbox": [ + 68, + 613, + 493, + 627 + ], + "type": "text", + "content": "where the notation " + }, + { + "bbox": [ + 68, + 613, + 493, + 627 + ], + "type": "inline_equation", + "content": "t_i(w, v_1, \\ldots, v_{i-1})" + }, + { + "bbox": [ + 68, + 613, + 493, + 627 + ], + "type": "text", + "content": " indicates that these are the only variables occurring in " + }, + { + "bbox": [ + 68, + 613, + 493, + 627 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 68, + 613, + 493, + 627 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 636, + 541, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 636, + 541, + 663 + ], + "spans": [ + { + "bbox": [ + 67, + 636, + 541, + 663 + ], + "type": "text", + "content": "Theorem 3.2 has a natural interpretation as an interactive game with finitely many rounds, which we revisit in Section 5.1.1 in the context of the provability of circuit upper bounds." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 81, + 670, + 486, + 682 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 670, + 486, + 682 + ], + "spans": [ + { + "bbox": [ + 81, + 670, + 486, + 682 + ], + "type": "text", + "content": "3The dWPHP axiom scheme is also referred to as the surjective Weak Pigeonhole Principle in some references." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "text", + "content": "A similar form of Theorem 3.2 holds under the provability of a " + }, + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "inline_equation", + "content": "\\forall \\exists \\forall \\exists" + }, + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "text", + "content": "-sentence (see, e.g., " + }, + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "inline_equation", + "content": "\\mathrm{[CKK^{+}24]}" + }, + { + "bbox": [ + 67, + 72, + 539, + 125 + ], + "type": "text", + "content": " for a concrete application in the context of circuit lower bounds). In contrast, there is no straightforward analogue of the KPT Theorem for a larger number of quantifier alternations. In this case, more general formulations are needed, such as the ones considered in [Pud06, BKT14, LO23]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "spans": [ + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": "It is also possible to establish witnessing theorems for theories that are not universal. This can be done either by first transforming the theory into a universal theory through the inclusion of new function symbols and quantifier elimination, or via direct approaches (see, e.g., [Kra95, Section 7.3]). Another example is Buss's Theorem for " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": ", which can be used to show that every " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "\\forall \\Sigma_1^b" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": "-sentence provable in " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": " is also provable in " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": ". This has two implications. First, we can combine this result with Theorem 3.1, which yields polynomial-time algorithms from proofs of " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "\\forall \\Sigma_1^b" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": "-sentences in " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": ". Second, this means that in some situations we can establish the provability of a sentence in " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": " using the more convenient theory " + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 127, + 541, + 236 + ], + "type": "text", + "content": " (see Section 4.2 for an example)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 251, + 334, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 251, + 334, + 264 + ], + "spans": [ + { + "bbox": [ + 67, + 251, + 334, + 264 + ], + "type": "text", + "content": "3.2 Bounded Arithmetic and Propositional Proofs" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "spans": [ + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": "In this section, we explain a connection between " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": " and the extended Frege proof system discovered by [Coo75]. In short, it says that if a universal " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": "-sentence " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\phi(x)" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": " is provable in " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": ", then there is a translation of " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\phi(x)" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": " into a sequence " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\{G_n\\}_{n \\geq 1}" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": " of propositional formulas " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "G_n(p_1, \\ldots, p_n)" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": " such that each " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "G_n" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": " has an extended Frege proof " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "\\pi_n" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": " of size polynomial in " + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 272, + 541, + 326 + ], + "type": "text", + "content": ".4" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": "First, we review some concepts and fix notation, deferring the details to a standard textbook (e.g., [Kra19]). Recall that a propositional formula " + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "inline_equation", + "content": "G(p_{1},\\ldots ,p_{n})" + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": " is formed using variables " + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "inline_equation", + "content": "p_1,\\dots ,p_n" + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": ", constants 0 and 1, and logical connectives " + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "inline_equation", + "content": "\\land ,\\lor" + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "inline_equation", + "content": "\\neg" + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": ". A Frege " + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "inline_equation", + "content": "(\\mathcal{F})" + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": " proof system is a \"textbook\" style proof system for propositional logic. It can be formulated as a finite set of axiom schemes together with the modus ponens rule. " + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": " is known to be sound and complete. The size of a Frege proof is the total number of symbols occurring in the proof. In the extended Frege " + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "inline_equation", + "content": "(e\\mathcal{F})" + }, + { + "bbox": [ + 67, + 327, + 541, + 422 + ], + "type": "text", + "content": " proof system, we also allow repeated subformulas appearing in a proof to be abbreviated via new variables." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "spans": [ + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": "Cook's Translation [Coo75]. Let " + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "inline_equation", + "content": "\\varphi" + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": " be a universal " + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": "-sentence of the form " + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "inline_equation", + "content": "\\varphi \\triangleq \\forall x \\psi(x)" + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "inline_equation", + "content": "\\psi(x)" + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": " is a quantifier-free formula. Cook [Coo75] established that if " + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "inline_equation", + "content": "\\varphi" + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": " is provable in " + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": ", then there is a sequence " + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "inline_equation", + "content": "\\{G_n\\}_{n \\geq 1}" + }, + { + "bbox": [ + 67, + 436, + 541, + 478 + ], + "type": "text", + "content": " of propositional tautologies such that" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 483, + 538, + 577 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 85, + 483, + 326, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 483, + 326, + 497 + ], + "spans": [ + { + "bbox": [ + 85, + 483, + 326, + 497 + ], + "type": "text", + "content": "- Each " + }, + { + "bbox": [ + 85, + 483, + 326, + 497 + ], + "type": "inline_equation", + "content": "G_{n}(p_{1},\\ldots ,p_{n})" + }, + { + "bbox": [ + 85, + 483, + 326, + 497 + ], + "type": "text", + "content": " is a polynomial-size formula." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "spans": [ + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "inline_equation", + "content": "G_{n}" + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "text", + "content": " encodes that " + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "inline_equation", + "content": "\\psi(x)" + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "text", + "content": " is true whenever " + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "inline_equation", + "content": "|x| \\leq n" + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "text", + "content": ", i.e., over all integers encoded as " + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 84, + 506, + 508, + 520 + ], + "type": "text", + "content": "-bit strings." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 85, + 528, + 269, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 528, + 269, + 540 + ], + "spans": [ + { + "bbox": [ + 85, + 528, + 269, + 540 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 85, + 528, + 269, + 540 + ], + "type": "inline_equation", + "content": "G_{n}" + }, + { + "bbox": [ + 85, + 528, + 269, + 540 + ], + "type": "text", + "content": " admits polynomial-size " + }, + { + "bbox": [ + 85, + 528, + 269, + 540 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 85, + 528, + 269, + 540 + ], + "type": "text", + "content": "-proofs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "spans": [ + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "text", + "content": "- Moreover, the existence of polynomial-size " + }, + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "text", + "content": "-proofs for each " + }, + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "inline_equation", + "content": "G_{n}" + }, + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "text", + "content": " is provable in " + }, + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 85, + 549, + 538, + 577 + ], + "type": "text", + "content": ". (We will need this additional property of the translation in Section 5.2.2.)" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "spans": [ + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "text", + "content": "For a formula " + }, + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "inline_equation", + "content": "\\psi(x)" + }, + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "text", + "content": " as above, we often write " + }, + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "inline_equation", + "content": "||\\psi||_n" + }, + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "text", + "content": " to denote the corresponding propositional formula over inputs of length " + }, + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 584, + 539, + 611 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 624, + 539, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 624, + 539, + 651 + ], + "spans": [ + { + "bbox": [ + 67, + 624, + 539, + 651 + ], + "type": "text", + "content": "For more information about the relation between proofs in bounded arithmetic and propositional proofs, including additional examples of propositional translations, we refer to [Bey09, Kra19]." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "spans": [ + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "text", + "content": "Conceptually, this is analogous to the translation of a polynomial-time Turing machine " + }, + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "text", + "content": " into a sequence " + }, + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "inline_equation", + "content": "\\{C_n\\}_{n\\geq 1}" + }, + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "text", + "content": " of polynomial-size Boolean circuits, one for each input length " + }, + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 658, + 541, + 681 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 82, + 681, + 386, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 681, + 386, + 693 + ], + "spans": [ + { + "bbox": [ + 82, + 681, + 386, + 693 + ], + "type": "text", + "content": "5We note that " + }, + { + "bbox": [ + 82, + 681, + 386, + 693 + ], + "type": "inline_equation", + "content": "G_{n}(p_{1},\\ldots ,p_{n})" + }, + { + "bbox": [ + 82, + 681, + 386, + 693 + ], + "type": "text", + "content": " might contain auxiliary variables beyond " + }, + { + "bbox": [ + 82, + 681, + 386, + 693 + ], + "type": "inline_equation", + "content": "p_1,\\dots ,p_n" + }, + { + "bbox": [ + 82, + 681, + 386, + 693 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 298, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 298, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 298, + 85 + ], + "type": "text", + "content": "3.3 Cuts of Models of Bounded Arithmetic" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 94, + 541, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 94, + 541, + 133 + ], + "spans": [ + { + "bbox": [ + 68, + 94, + 541, + 133 + ], + "type": "text", + "content": "Many fundamental results in bounded arithmetic are established using model-theoretic techniques (see, e.g., the exposition of Parikh's Theorem in [Kra95]). We will provide an example in Section 5.2.2. In this section, we include the required background for the result. We assume basic familiarity with model theory." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 134, + 541, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 134, + 541, + 162 + ], + "spans": [ + { + "bbox": [ + 68, + 134, + 541, + 162 + ], + "type": "text", + "content": "While the definitions and results presented below can be adapted to other theories of bounded arithmetic, we focus on the theory " + }, + { + "bbox": [ + 68, + 134, + 541, + 162 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 68, + 134, + 541, + 162 + ], + "type": "text", + "content": " for concreteness." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "spans": [ + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "text", + "content": "Definition 3.3 (Cut in a Model of Arithmetic). A cut in a model " + }, + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "text", + "content": " is a nonempty set " + }, + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "inline_equation", + "content": "I \\subseteq M" + }, + { + "bbox": [ + 68, + 167, + 541, + 194 + ], + "type": "text", + "content": " such that:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 82, + 202, + 321, + 237 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "spans": [ + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "text", + "content": "1. For every " + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "inline_equation", + "content": "a, b \\in M" + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "inline_equation", + "content": "b \\in I" + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "inline_equation", + "content": "a < b" + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "inline_equation", + "content": "a \\in I" + }, + { + "bbox": [ + 82, + 202, + 321, + 216 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "spans": [ + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "text", + "content": "2. For every " + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "inline_equation", + "content": "a \\in M" + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "inline_equation", + "content": "a \\in I" + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "inline_equation", + "content": "a + 1 \\in I" + }, + { + "bbox": [ + 82, + 224, + 286, + 237 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 69, + 244, + 206, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 244, + 206, + 258 + ], + "spans": [ + { + "bbox": [ + 69, + 244, + 206, + 258 + ], + "type": "text", + "content": "In this case, we write " + }, + { + "bbox": [ + 69, + 244, + 206, + 258 + ], + "type": "inline_equation", + "content": "I \\subseteq_{e} M" + }, + { + "bbox": [ + 69, + 244, + 206, + 258 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 85, + 265, + 495, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 265, + 495, + 278 + ], + "spans": [ + { + "bbox": [ + 85, + 265, + 495, + 278 + ], + "type": "text", + "content": "Note that a cut is not necessarily closed under operations such as addition and multiplication." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "spans": [ + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": "Claim 3.4. Let " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": " be a model of " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": ", and let " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "I \\subseteq_e M" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": ". Moreover, assume that " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": " is closed under " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "+, \\cdot" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": ", and # operations. Let " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "\\varphi(a, \\vec{b})" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": " be a bounded formula with all free variables displayed. Let " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "\\vec{v}" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": " be elements of " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": ". Then for every " + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "inline_equation", + "content": "u \\in I" + }, + { + "bbox": [ + 68, + 285, + 542, + 327 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 220, + 327, + 389, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 327, + 389, + 342 + ], + "spans": [ + { + "bbox": [ + 220, + 327, + 389, + 342 + ], + "type": "interline_equation", + "content": "I \\vDash \\varphi (u, \\vec {v}) \\quad \\Longleftrightarrow \\quad M \\vDash \\varphi (u, \\vec {v}).", + "image_path": "56ac5bb88cdef5b62d57e1f085108ca60d496943a43c6de07136626b9a27bc0b.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 348, + 541, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 348, + 541, + 374 + ], + "spans": [ + { + "bbox": [ + 68, + 348, + 541, + 374 + ], + "type": "text", + "content": "Claim 3.4 can be proved by induction on the complexity of " + }, + { + "bbox": [ + 68, + 348, + 541, + 374 + ], + "type": "inline_equation", + "content": "\\varphi" + }, + { + "bbox": [ + 68, + 348, + 541, + 374 + ], + "type": "text", + "content": ". Using the claim, one can establish the following lemma." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "spans": [ + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": "Lemma 3.5. Let " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": " be a model of " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": ", and let " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "I \\subseteq_{e} M" + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": ". Moreover, assume that " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": " is closed under " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "+, \\cdot," + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "\\#" + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": " operations. Then " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": " is a model of " + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 381, + 541, + 410 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "spans": [ + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "text", + "content": "Since it is not hard to check that a cut " + }, + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "text", + "content": " as above satisfies the BASIC axioms of " + }, + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "text", + "content": ", the proof of Lemma 3.5 essentially amounts to verifying that " + }, + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 68, + 416, + 541, + 456 + ], + "type": "text", + "content": " satisfies the corresponding induction principle (see, e.g., [Kra95, Lemma 5.1.3] for a similar argument)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "spans": [ + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "text", + "content": "For a model " + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "text", + "content": ", we say that " + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "inline_equation", + "content": "n \\in M" + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "text", + "content": " is a length if there is " + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "inline_equation", + "content": "N \\in M" + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "inline_equation", + "content": "n = |N|" + }, + { + "bbox": [ + 85, + 456, + 459, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": "Lemma 3.6. Let " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "M_0" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": " be a nonstandard countable model of " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": ". Then there is a (countable) cut " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "M_0" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": " that is a model of " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": " and a length " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "n \\in M" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "n = |e|" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": " for some nonstandard " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "e \\in M" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": ", for which the following holds. For every " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "b \\in M" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": " there is a standard number " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "inline_equation", + "content": "M \\models |b| \\leq n^k" + }, + { + "bbox": [ + 68, + 477, + 541, + 518 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 525, + 372, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 525, + 372, + 538 + ], + "spans": [ + { + "bbox": [ + 68, + 525, + 372, + 538 + ], + "type": "text", + "content": "Proof. Let " + }, + { + "bbox": [ + 68, + 525, + 372, + 538 + ], + "type": "inline_equation", + "content": "e \\in M_0" + }, + { + "bbox": [ + 68, + 525, + 372, + 538 + ], + "type": "text", + "content": " be nonstandard, and let " + }, + { + "bbox": [ + 68, + 525, + 372, + 538 + ], + "type": "inline_equation", + "content": "n \\triangleq |e|" + }, + { + "bbox": [ + 68, + 525, + 372, + 538 + ], + "type": "text", + "content": ". Consider the set" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 192, + 546, + 417, + 562 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 546, + 417, + 562 + ], + "spans": [ + { + "bbox": [ + 192, + 546, + 417, + 562 + ], + "type": "interline_equation", + "content": "I _ {e} \\triangleq \\left\\{a \\in M _ {0} \\mid a \\leq t (e) \\text {f o r s o m e} \\mathcal {L} _ {\\mathrm {B}} \\text {- t e r m} t (x) \\right\\},", + "image_path": "b5c9afe59b4dccf1a42f8b72e20121e99efe9a248bbba6067906f88014c41a74.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "spans": [ + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": "where we compare elements with respect to the interpretation of the relation symbol " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "\\leq" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "M_0" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": ". Note that " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "I_e" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " is a cut of " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "M_0" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "e \\in I_e" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": ". Moreover, it is not hard to check that it is closed under addition, multiplication, and smash operations. By Lemma 3.5, " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "I_e" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " is a model of " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "\\mathbb{S}_2^1" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": ". Finally, by construction, for every " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "b \\in I_e" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " we have " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "b \\leq t(e)" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{B}}" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": "-term " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": ". A simple induction on the structure of " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " shows the existence of a standard number " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "|b| \\leq n^k" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "inline_equation", + "content": "I_e" + }, + { + "bbox": [ + 68, + 569, + 541, + 637 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 85, + 646, + 288, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 646, + 288, + 658 + ], + "spans": [ + { + "bbox": [ + 85, + 646, + 288, + 658 + ], + "type": "text", + "content": "Finally, we will need the following definition." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": "Definition 3.7 (Cofinal extension). We say that an extension " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": " of a model " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": " is cofinal (or " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": " is cofinal in " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": ") if for every " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "a \\in M'" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": " there is " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "b \\in M" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "a \\leq b" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": ". If this is the case, we write " + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "inline_equation", + "content": "M' \\supseteq_{\\mathrm{cf}} M" + }, + { + "bbox": [ + 68, + 666, + 541, + 693 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 309, + 720 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 70, + 320, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 70, + 320, + 86 + ], + "spans": [ + { + "bbox": [ + 68, + 70, + 320, + 86 + ], + "type": "text", + "content": "4 The Strength of Bounded Arithmetic" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 97, + 541, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 97, + 541, + 151 + ], + "spans": [ + { + "bbox": [ + 67, + 97, + 541, + 151 + ], + "type": "text", + "content": "In connection with the fundamental research goal mentioned in Section 1, research on the provability of complexity bounds has achieved significant progress on two complementary fronts: the formalization of several established results from algorithms and complexity within theories of bounded arithmetic, and the unprovability of complexity bounds in the same theories, often conditional on a computational assumption." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 152, + 541, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 152, + 541, + 191 + ], + "spans": [ + { + "bbox": [ + 67, + 152, + 541, + 191 + ], + "type": "text", + "content": "In Section 4.1, we explore what it means to formalize results from algorithms and complexity theory within the framework of bounded arithmetic, highlighting some of the nuances involved. In Section 4.2, we present some concrete details of the formalization of a formula lower bound in " + }, + { + "bbox": [ + 67, + 152, + 541, + 191 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 152, + 541, + 191 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 208, + 398, + 222 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 208, + 398, + 222 + ], + "spans": [ + { + "bbox": [ + 68, + 208, + 398, + 222 + ], + "type": "text", + "content": "4.1 Formalization of Results from Algorithms and Complexity" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 229, + 541, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 229, + 541, + 309 + ], + "spans": [ + { + "bbox": [ + 67, + 229, + 541, + 309 + ], + "type": "text", + "content": "Several central theorems from mathematics and computer science can be proved in bounded arithmetic. They include results from number theory [Woo81, PWW88], graph theory and extremal combinatorics [Oja04], randomized algorithms and probabilistic arguments [Jer05, LC11, Lé14], probabilistic checkable proofs [Pic15b], circuit lower bounds [MP20], expander graphs [BKKK20], linear algebra [TC21], Zhuk's CSP algorithm [Gay23, Gay24], etc. The reader can find numerous other examples in [CN10, Kra19, MP20] and references therein." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 311, + 541, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 311, + 541, + 378 + ], + "spans": [ + { + "bbox": [ + 67, + 311, + 541, + 378 + ], + "type": "text", + "content": "In some cases, the formalization of an existing result in bounded arithmetic is straightforward, specially once an appropriate framework has been developed (e.g., the approximate counting framework of [Jér07], which enables the use of tools from probability theory in " + }, + { + "bbox": [ + 67, + 311, + 541, + 378 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1" + }, + { + "bbox": [ + 67, + 311, + 541, + 378 + ], + "type": "text", + "content": "). However, sometimes one needs to discover a new proof whose concepts can be defined in the theory and their associated properties established using the available inductive axioms (e.g., Razborov's formalization of the Switching Lemma [Raz95a])." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 379, + 541, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 379, + 541, + 418 + ], + "spans": [ + { + "bbox": [ + 67, + 379, + 541, + 418 + ], + "type": "text", + "content": "We provide two instructive examples below. The first is a consequence of the formalization of the PCP Theorem in " + }, + { + "bbox": [ + 67, + 379, + 541, + 418 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 379, + 541, + 418 + ], + "type": "text", + "content": ", while the second concerns different ways of formulating a circuit lower bound statement in bounded arithmetic." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "spans": [ + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": "The PCP Theorem in " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": ". Pich [Pic15b] proved the PCP Theorem in " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": " by formalizing Dinur's proof [Din07]. Exploiting the standard connection between PCPs and hardness of approximation, Pich's result can be used to show that " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": " establishes the NP-hardness of approximating the value of a " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": "-SAT instance. This means in particular that, for a suitable " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": "-function symbol " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": " obtained from Dinur's argument, " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": " proves that " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": " is a gap-inducing reduction from the Boolean Formula Satisfiability Problem to " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": "-SAT (for a sufficiently large " + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 435, + 541, + 517 + ], + "type": "text", + "content": "):" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 117, + 525, + 489, + 572 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 525, + 489, + 572 + ], + "spans": [ + { + "bbox": [ + 117, + 525, + 489, + 572 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathrm {P V} _ {1} \\vdash \\forall \\varphi \\left(\\operatorname {F l a} (\\varphi) \\wedge \\exists y \\operatorname {S a t} (\\varphi , y) \\rightarrow k - C N F (f (\\varphi)) \\wedge \\exists z \\operatorname {S a t} (f (\\varphi), z)\\right) \\\\ \\mathrm {P V} _ {1} \\vdash \\forall \\varphi \\left(\\operatorname {F l a} (\\varphi) \\wedge \\forall y \\neg \\operatorname {S a t} (\\varphi , y) \\rightarrow k - \\operatorname {C N F} (f (\\varphi)) \\wedge \\forall z \\operatorname {V a l u e} _ {\\leq 1 - \\delta} (f (\\varphi), z)\\right) \\\\ \\end{array}", + "image_path": "61b2543cf59e3a45cc3cc0df307cd1ce6e20839aa6386a8f2cbb2bede5fccc80.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": "where all the expressions are quantifier-free " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": "-formulas: " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "\\mathsf{Fla}(x)" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " checks if " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " is a valid description of a Boolean formula, " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": "-CNF(x) checks if " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " is a valid description of a " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": "-CNF, " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "\\mathsf{Sat}(u,v)" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " checks if " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " is a satisfying assignment for " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "\\mathsf{Value}_{\\leq 1 - \\delta}(u,v)" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " holds if " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " satisfies at most a " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "(1 - \\delta)" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": "-fraction of the clauses in " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " (with " + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "inline_equation", + "content": "\\delta > 0" + }, + { + "bbox": [ + 67, + 580, + 541, + 634 + ], + "type": "text", + "content": " being a universal constant from the formalized Dinur's proof)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "spans": [ + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": "In the formalization the key point is that " + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": " proves that the function symbol " + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": " behaves as expected. In practice, in order to achieve this, a typical formalization is presented in a semi-formal way, and might claim on a few occasions that some algorithm " + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "inline_equation", + "content": "f_1" + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": " constructed in a particular way from another algorithm " + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "inline_equation", + "content": "f_2" + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": " can be defined in " + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": ". This means that " + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": " proves that " + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "inline_equation", + "content": "f_1" + }, + { + "bbox": [ + 68, + 635, + 541, + 689 + ], + "type": "text", + "content": " behaves as described in the definition." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "spans": [ + { + "bbox": [ + 302, + 712, + 308, + 720 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "text", + "content": "This is possible thanks to Cobham's characterization of FP and the axioms of " + }, + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "text", + "content": ", which ensure that the theory \"understands\" how different algorithms are constructed from one another. In many cases, the verification that " + }, + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "text", + "content": " proves the desired properties is straightforward but tedious, requiring some initial setup of basic capabilities of " + }, + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 543, + 140 + ], + "type": "text", + "content": " (often referred to as \"bootstrapping\") which is part of the standard background in bounded arithmetic." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "spans": [ + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "content": "Circuit Lower Bound Statements. We discuss two ways of formalizing a complexity lower bound. In this example, for a given size bound " + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "inline_equation", + "content": "s(n)" + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "content": " (e.g., " + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "inline_equation", + "content": "s(n) = n^2" + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "content": "), we consider an " + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "content": "-sentence " + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "inline_equation", + "content": "\\mathsf{FLB}_s^\\oplus" + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "content": " stating that Boolean formulas for the parity function on " + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "content": " bits require at least " + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "inline_equation", + "content": "s(n)" + }, + { + "bbox": [ + 67, + 156, + 541, + 198 + ], + "type": "text", + "content": " leaves:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 87, + 207, + 520, + 222 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 207, + 520, + 222 + ], + "spans": [ + { + "bbox": [ + 87, + 207, + 520, + 222 + ], + "type": "interline_equation", + "content": "\\forall N \\forall n \\forall F (n = | N | \\wedge n \\geq 1 \\wedge \\mathsf {F l a} (F) \\wedge \\mathsf {S i z e} (F) < s (n) \\rightarrow \\exists x (| x | \\leq n \\wedge \\mathsf {E v a l} (F, x) \\neq \\oplus (x)),", + "image_path": "97e19c7227dc6a422910962387040c1a3ea3553ff701001be225f0665f2b0fca.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "spans": [ + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "content": "where we identify " + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "content": "-bit strings with natural numbers of length at most " + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "content": ", and employ a well-behaved " + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "content": "-function symbol " + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "content": " proves the basic properties of the parity function, e.g., " + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1 \\vdash \\oplus (x1) = 1 - \\oplus (x)" + }, + { + "bbox": [ + 67, + 232, + 541, + 273 + ], + "type": "text", + "content": ".6" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "\\mathsf{FLB}_s^\\oplus" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "\\forall \\Sigma_1^b" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": "-sentence. Consequently, if " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1 \\vdash \\mathsf{FLB}_s^\\oplus" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": ", we obtain via Herbrand's Theorem (Theorem 3.1) a polynomial-time algorithm " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " that, when given " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " and the description of an " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": "-bit formula " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "< s(n)" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "A(N,F)" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " outputs a string " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "x \\in \\{0,1\\}^n" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "F(x) \\neq \\oplus(x)" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": ". In other words, circuit lower bounds provable in " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " are constructive in the sense that they also provide an efficient refuter witnessing that " + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 274, + 541, + 341 + ], + "type": "text", + "content": " does not compute parity (see [CJSW21] for more on this topic)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "spans": [ + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": "The aforementioned formalization is informally referred to as a \"Log\" formalization of circuit lower bounds. This is because the main parameter " + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": " is the length of a variable " + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": " and all objects quantified over are of length polynomial in " + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": ". It is also possible to consider a formalization where " + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "n = ||N||" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": " is the length of the length of " + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": "), which is known as a \"LogLog\" formalization. This allows us to quantify over exponentially larger objects, e.g., under such a formalization the entire truth-table of a formula " + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": " has length polynomial in the length of " + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 341, + 541, + 422 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 422, + 541, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 422, + 541, + 502 + ], + "spans": [ + { + "bbox": [ + 67, + 422, + 541, + 502 + ], + "type": "text", + "content": "Obtaining a Log formalization (e.g., [MP20]) is a stronger result than obtaining a LogLog formalization (e.g., [Raz95a]). In particular, in contrast to the discussion above, a witnessing theorem applied to a LogLog formalization provides a refuter with access to " + }, + { + "bbox": [ + 67, + 422, + 541, + 502 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 422, + 541, + 502 + ], + "type": "text", + "content": " and thus running in time " + }, + { + "bbox": [ + 67, + 422, + 541, + 502 + ], + "type": "inline_equation", + "content": "\\mathrm{poly}(N) = \\mathrm{poly}(2^n)" + }, + { + "bbox": [ + 67, + 422, + 541, + 502 + ], + "type": "text", + "content": ". Conversely, the unprovability of a LogLog circuit lower bound statement (e.g., [PS21, LO23]) is a stronger result than the unprovability of a Log statement. We refer to the introduction of [MP20] for a more extensive discussion on this matter." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 519, + 443, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 519, + 443, + 533 + ], + "spans": [ + { + "bbox": [ + 67, + 519, + 443, + 533 + ], + "type": "text", + "content": "4.2 Concrete Example: Subbotovskaya's Formula Lower Bound in " + }, + { + "bbox": [ + 67, + 519, + 443, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "text", + "content": "In this section, we explore some details of a formalization in " + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "text", + "content": " that the parity function " + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "text", + "content": " bits requires Boolean formulas of size " + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "inline_equation", + "content": "\\geq n^{3/2}" + }, + { + "bbox": [ + 67, + 541, + 541, + 567 + ], + "type": "text", + "content": " [Sub61]. We follow the notation introduced in Section 4.1." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 575, + 359, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 575, + 359, + 590 + ], + "spans": [ + { + "bbox": [ + 69, + 575, + 359, + 590 + ], + "type": "interline_equation", + "content": "\\text {T h e o r m 4 . 1} \\left(\\left[ C K K ^ {+} 2 4 \\right]\\right). L e t s (n) \\triangleq n ^ {3 / 2}. T h e n P V _ {1} \\vdash F L B _ {s} ^ {\\oplus}.", + "image_path": "bbaa66f9f2156b78f7fbc5df612f9584c7c94f840efaecc66a6d8a01f859f0b1.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 599, + 541, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 599, + 541, + 625 + ], + "spans": [ + { + "bbox": [ + 67, + 599, + 541, + 625 + ], + "type": "text", + "content": "The formalization is an adaptation of the argument presented in [Juk12, Section 6.3], which proceeds as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "spans": [ + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "text", + "content": "1. [Juk12, Lemma 6.8]: For any formula " + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "text", + "content": "-bit inputs, it is possible to fix one of its variables so that the resulting formula " + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "inline_equation", + "content": "F_{1}" + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "text", + "content": " satisfies " + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "inline_equation", + "content": "\\mathrm{Size}(F_1) \\leq (1 - 1 / n)^{3 / 2} \\cdot \\mathrm{Size}(F)" + }, + { + "bbox": [ + 82, + 635, + 541, + 663 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 81, + 670, + 355, + 682 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 670, + 355, + 682 + ], + "spans": [ + { + "bbox": [ + 81, + 670, + 355, + 682 + ], + "type": "text", + "content": "We often abuse notation and treat " + }, + { + "bbox": [ + 81, + 670, + 355, + 682 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 81, + 670, + 355, + 682 + ], + "type": "text", + "content": " as a string in semi-formal discussions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "spans": [ + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "text", + "content": "2. [Juk12, Theorem 6.10]: If we apply this result " + }, + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "inline_equation", + "content": "\\ell \\triangleq n - k" + }, + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "text", + "content": " times, we obtain a formula " + }, + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "inline_equation", + "content": "F_{\\ell}" + }, + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 82, + 72, + 538, + 99 + ], + "type": "text", + "content": "-bit inputs such that" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 95, + 108, + 539, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 108, + 539, + 125 + ], + "spans": [ + { + "bbox": [ + 95, + 108, + 539, + 125 + ], + "type": "interline_equation", + "content": "\\operatorname {S i z e} (F _ {\\ell}) \\leq \\operatorname {S i z e} (F) \\cdot (1 - 1 / n) ^ {3 / 2} \\cdot (1 - 1 / (n - 1)) ^ {3 / 2} \\dots (1 - 1 / (k + 1)) ^ {3 / 2} = \\operatorname {S i z e} (F) \\cdot (k / n) ^ {3 / 2}.", + "image_path": "e7bf767ab905fa1fdcb5a401caf0e03f3d616f352134053d81f729940d213325.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "spans": [ + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "text", + "content": "3. [Juk12, Example 6.11]: Finally, if the initial formula " + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "text", + "content": " computes the parity function, by setting " + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "inline_equation", + "content": "\\ell = n - 1" + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "text", + "content": " we get " + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "inline_equation", + "content": "1 \\leq \\operatorname{Size}(F_{\\ell}) \\leq (1/n)^{3/2} \\cdot \\operatorname{Size}(F)" + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "text", + "content": ", and consequently " + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "inline_equation", + "content": "\\operatorname{Size}(F) \\geq n^{3/2}" + }, + { + "bbox": [ + 82, + 140, + 538, + 167 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "spans": [ + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "text", + "content": "We present the argument in a more constructive way when formalizing the result in " + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "text", + "content": ". In more detail, given a small formula " + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "text", + "content": ", we recursively construct (and establish correctness by induction) an " + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "text", + "content": "-bit input " + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "text", + "content": " witnessing that " + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 175, + 539, + 216 + ], + "type": "text", + "content": " does not compute the parity function." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "spans": [ + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": "Proof. We follow closely the presentation from " + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "inline_equation", + "content": "\\left[\\mathrm{CKK}^{+}24\\right]" + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": ". For brevity, we only discuss the formalization of the main inductive argument. More details can be found in " + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "inline_equation", + "content": "\\left[\\mathrm{CKK}^{+}24\\right]" + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": ". Given " + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "inline_equation", + "content": "b \\in \\{0,1\\}" + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": ", we introduce the function " + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "inline_equation", + "content": "\\oplus^b(x) \\triangleq \\oplus(x) + b \\pmod{2}" + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": ". In order to prove " + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "inline_equation", + "content": "\\mathsf{FLB}_s^\\oplus" + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": ", we explicitly consider a polynomial-time function " + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 67, + 225, + 539, + 279 + ], + "type": "text", + "content": " with the following property:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "spans": [ + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "inline_equation", + "content": "\\operatorname{Size}(F) < s(n)" + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "text", + "content": " outputs an " + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "text", + "content": "-bit string " + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "inline_equation", + "content": "y_n^b" + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "inline_equation", + "content": "\\operatorname{Eval}(F, y_n^b) \\neq \\oplus^b(y_n^b)" + }, + { + "bbox": [ + 95, + 285, + 511, + 300 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "content": "In other words, " + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "inline_equation", + "content": "R(1^n,F,b)" + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "content": " witnesses that the formula " + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "content": " does not compute the function " + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "inline_equation", + "content": "\\oplus^b" + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "content": "-bit strings. Note that the correctness of " + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "content": " is captured by a sentence " + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "inline_equation", + "content": "\\operatorname{Ref}_{R,s}" + }, + { + "bbox": [ + 67, + 304, + 538, + 331 + ], + "type": "text", + "content": " described as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 341, + 520, + 357 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 341, + 520, + 357 + ], + "spans": [ + { + "bbox": [ + 86, + 341, + 520, + 357 + ], + "type": "interline_equation", + "content": "\\forall 1 ^ {n} \\forall F (\\mathsf {F l a} (F) \\wedge \\mathsf {S i z e} (F) < s (n) \\rightarrow | y _ {n} ^ {0} | _ {\\ell} = | y _ {n} ^ {1} | _ {\\ell} = n \\wedge F (y _ {n} ^ {0}) \\neq \\oplus^ {0} (y _ {n} ^ {0}) \\wedge F (y _ {n} ^ {1}) \\neq \\oplus^ {1} (y _ {n} ^ {1}))", + "image_path": "68a518a72601171095d2ba1dea9ce54ca02f92cd0f23c4c4b899735da48a3aee.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "spans": [ + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": "where we employ the abbreviations " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "y_{n}^{0} \\triangleq R(1^{n}, F, 0)" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "y_{n}^{1} \\triangleq R(1^{n}, F, 1)" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": ", and for convenience use " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "|z|_{\\ell}" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": " to denote the bitlength of " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": ". Our plan is to define " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": " and show that " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1 \\vdash \\mathsf{Ref}_{R,s}" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": ". Note that this implies " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "\\mathsf{FLB}_s^{\\oplus}" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 366, + 538, + 407 + ], + "type": "text", + "content": " by standard first-order logic reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "spans": [ + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": "The correctness of " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": " will be established by polynomial induction on " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": " (equivalently, induction on " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "n = |N|" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": "). Since " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "\\operatorname{Ref}_{R,s}" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": " is a universal sentence and " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "\\forall \\Sigma_1^b" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": "-conservative over " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": " (i.e., provability of such a sentence in " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": " implies its provability in " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": "), it is sufficient to describe a formalization in the more convenient theory " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": ". For this reason, polynomial induction for NP and coNP predicates (admissible in " + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "inline_equation", + "content": "S_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 408, + 538, + 486 + ], + "type": "text", + "content": "; see, e.g., [Kra95, Section 5.2]) is available during the formalization. More details follow." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 490, + 538, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 490, + 538, + 516 + ], + "spans": [ + { + "bbox": [ + 67, + 490, + 538, + 516 + ], + "type": "text", + "content": "The procedure " + }, + { + "bbox": [ + 67, + 490, + 538, + 516 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 67, + 490, + 538, + 516 + ], + "type": "text", + "content": " makes use of a few polynomial-time sub-routines (briefly discussed in the comments in the pseudocode below) and is defined in the following way:" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "spans": [ + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "text", + "content": "7 Actually, for technical reasons related to the induction step, we will simultaneously construct an " + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "text", + "content": "-bit input " + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "inline_equation", + "content": "y_{n}^{0}" + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "text", + "content": " witnessing that " + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "text", + "content": " does not compute the parity function and an " + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "text", + "content": "-bit input " + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "inline_equation", + "content": "y_{n}^{1}" + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "text", + "content": " witnessing that " + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 534, + 538, + 566 + ], + "type": "text", + "content": " does not compute the negation of the parity function." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "spans": [ + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": "For convenience, we often write " + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "inline_equation", + "content": "1^n" + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": " instead of explicitly considering parameters " + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "inline_equation", + "content": "n = |N|" + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": ". In practice, it means that " + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": " gets as input " + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": " (together with other parameters) but with respect to " + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": " it only depends on " + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "inline_equation", + "content": "n = |N|" + }, + { + "bbox": [ + 69, + 568, + 539, + 590 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "spans": [ + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "inline_equation", + "content": "{}^{9}" + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "text", + "content": " Similarly,the notation " + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "inline_equation", + "content": "{\\forall 1}^{n}" + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "text", + "content": " denotes " + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "inline_equation", + "content": "\\forall N\\forall n" + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "text", + "content": " but we add the condition that " + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "inline_equation", + "content": "n = \\left| N\\right|" + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "text", + "content": " in the subsequent formula. We might also write just " + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "inline_equation", + "content": "F\\left( x\\right)" + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "text", + "content": " instead of " + }, + { + "bbox": [ + 70, + 591, + 539, + 613 + ], + "type": "inline_equation", + "content": "\\operatorname{Eval}\\left( {F,x}\\right)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 299, + 711, + 310, + 719 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 711, + 310, + 719 + ], + "spans": [ + { + "bbox": [ + 299, + 711, + 310, + 719 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "spans": [ + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "text", + "content": "Input: " + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "inline_equation", + "content": "1^n" + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "inline_equation", + "content": "n \\geq 1" + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "text", + "content": ", formula " + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "text", + "content": "-bit inputs, " + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "inline_equation", + "content": "b \\in \\{0,1\\}" + }, + { + "bbox": [ + 86, + 76, + 382, + 89 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 89, + 514, + 269 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "spans": [ + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "text", + "content": "1 Let " + }, + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "inline_equation", + "content": "s(n) \\triangleq n^{3/2}" + }, + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "inline_equation", + "content": "\\operatorname{Size}(F) \\geq s(n)" + }, + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "inline_equation", + "content": "\\neg \\mathsf{Fla}(F)" + }, + { + "bbox": [ + 77, + 89, + 373, + 102 + ], + "type": "text", + "content": " return \"error\";" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "spans": [ + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "content": "2 If " + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "inline_equation", + "content": "\\operatorname{Size}(F) = 0" + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "content": " computes a constant function " + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "inline_equation", + "content": "b_{F} \\in \\{0,1\\}" + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "content": ". In this case, return the " + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "content": "-bit string " + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "inline_equation", + "content": "y_{n}^{b} \\triangleq y_{1}^{b} 0^{n-1}" + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "inline_equation", + "content": "\\oplus^{b}(y_{1}^{b} 0^{n-1}) \\neq b_{F}" + }, + { + "bbox": [ + 77, + 103, + 512, + 132 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "spans": [ + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "text", + "content": "3 Let " + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "inline_equation", + "content": "\\widetilde{F} \\triangleq \\text{Normalize}(1^n, F)" + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "text", + "content": "; // " + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "inline_equation", + "content": "\\widetilde{F}" + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "text", + "content": " satisfies the conditions in the proof of [Juk12, Claim 6.9], " + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "inline_equation", + "content": "\\text{Size}(\\widetilde{F}) \\leq \\text{Size}(F)" + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "inline_equation", + "content": "\\forall x \\in \\{0, 1\\}^n F(x) = \\widetilde{F}(x)" + }, + { + "bbox": [ + 77, + 133, + 514, + 178 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "spans": [ + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "content": "4 Let " + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "inline_equation", + "content": "\\rho \\triangleq \\text{Find-Restriction}(1^n, \\widetilde{F})" + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "inline_equation", + "content": "\\rho: [n] \\to \\{0, 1, \\star\\}" + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "inline_equation", + "content": "|\\rho^{-1}(\\star)| = n - 1" + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "content": "; // " + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "content": " restricts a suitable variable " + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "content": " to a bit " + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 77, + 180, + 496, + 220 + ], + "type": "text", + "content": ", as in [Juk12, Lemma 6.8]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "spans": [ + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "content": "5 Let " + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "inline_equation", + "content": "F' \\triangleq \\text{Apply-Restriction}(1^n, \\widetilde{F}, \\rho)" + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "content": ". Moreover, let " + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "inline_equation", + "content": "b' \\triangleq b \\oplus c_i" + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "inline_equation", + "content": "n' \\triangleq n - 1" + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "content": "; // " + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "inline_equation", + "content": "F'" + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "content": " is an " + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "inline_equation", + "content": "n'" + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "content": "-bit formula; " + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "inline_equation", + "content": "\\forall z \\in \\{0, 1\\}^{\\rho^{-1}(\\star)} F'(z) = \\widetilde{F}(z \\cup x_i \\mapsto c_i)" + }, + { + "bbox": [ + 77, + 222, + 460, + 253 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "spans": [ + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "text", + "content": "6 Let " + }, + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "inline_equation", + "content": "y_{n'}^{b'} \\triangleq R(1^{n'}, F', b')" + }, + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "text", + "content": " and return the " + }, + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "text", + "content": "-bit string " + }, + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "inline_equation", + "content": "y_n^b \\triangleq y_{n'}^{b'} \\cup y_i \\mapsto c_i" + }, + { + "bbox": [ + 77, + 253, + 405, + 269 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 161, + 272, + 414, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 272, + 414, + 285 + ], + "spans": [ + { + "bbox": [ + 161, + 272, + 414, + 285 + ], + "type": "text", + "content": "Algorithm 1: Refuter Algorithm " + }, + { + "bbox": [ + 161, + 272, + 414, + 285 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 161, + 272, + 414, + 285 + ], + "type": "text", + "content": " [CKK+24]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "spans": [ + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "text", + "content": "(The pseudocode presented above is only an informal specification of " + }, + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "text", + "content": ". As mentioned in Section 4.1, a completely formal proof in " + }, + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "text", + "content": " would employ Cobham's formalism and would specify how " + }, + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 68, + 293, + 541, + 346 + ], + "type": "text", + "content": " can be defined from previously defined algorithms (e.g., Apply-Restriction) via the allowed operations.)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "spans": [ + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": "We note that " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": " runs in time polynomial in " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "n + |F| + |b|" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": " and that it is definable in " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": ". Next, as an instructive example, we establish the correctness " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": " by polynomial induction (PIND) for " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "\\Pi_1^b" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": "-formulas, assuming that the subroutines appearing in the pseudocode of " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": " satisfy the necessary properties (provably in " + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 346, + 541, + 401 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 406, + 316, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 406, + 316, + 421 + ], + "spans": [ + { + "bbox": [ + 69, + 406, + 316, + 421 + ], + "type": "text", + "content": "Lemma 4.2. Let " + }, + { + "bbox": [ + 69, + 406, + 316, + 421 + ], + "type": "inline_equation", + "content": "s(n) \\triangleq n^{3/2}" + }, + { + "bbox": [ + 69, + 406, + 316, + 421 + ], + "type": "text", + "content": ". Then " + }, + { + "bbox": [ + 69, + 406, + 316, + 421 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\mathsf{Ref}_{R,s}" + }, + { + "bbox": [ + 69, + 406, + 316, + 421 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 430, + 288, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 430, + 288, + 442 + ], + "spans": [ + { + "bbox": [ + 69, + 430, + 288, + 442 + ], + "type": "text", + "content": "Proof. We consider the formula " + }, + { + "bbox": [ + 69, + 430, + 288, + 442 + ], + "type": "inline_equation", + "content": "\\varphi(N)" + }, + { + "bbox": [ + 69, + 430, + 288, + 442 + ], + "type": "text", + "content": " defined as" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 120, + 453, + 493, + 493 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 453, + 493, + 493 + ], + "spans": [ + { + "bbox": [ + 120, + 453, + 493, + 493 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\forall F \\forall n (n = | N | \\wedge n \\geq 1 \\wedge \\operatorname {F l a} (F) \\wedge \\operatorname {S i z e} (F) < s (n)) \\rightarrow \\\\ \\left(\\left| y _ {n} ^ {0} \\right| _ {\\ell} = \\left| y _ {n} ^ {1} \\right| _ {\\ell} = n \\wedge F \\left(y _ {n} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {n} ^ {0}\\right) \\wedge F \\left(y _ {n} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {n} ^ {1}\\right)\\right), \\\\ \\end{array}", + "image_path": "2d22254fa53ed9c0bfb4f7a3721e172f45299ac18002cfe7b6c71e2c693406ce.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "spans": [ + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "text", + "content": "where as before we use " + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "inline_equation", + "content": "y_{n}^{0} \\triangleq R(1^{n}, F, 0)" + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "inline_equation", + "content": "y_{n}^{1} \\triangleq R(1^{n}, F, 1)" + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "text", + "content": ". Note that " + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "inline_equation", + "content": "\\varphi(N)" + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "inline_equation", + "content": "\\Pi_1^b" + }, + { + "bbox": [ + 68, + 498, + 540, + 525 + ], + "type": "text", + "content": "-formula. Below, we argue that" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 166, + 525, + 441, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 525, + 441, + 540 + ], + "spans": [ + { + "bbox": [ + 166, + 525, + 441, + 540 + ], + "type": "interline_equation", + "content": "\\mathsf {S} _ {2} ^ {1} (\\mathcal {L} _ {\\mathsf {P V}}) \\vdash \\varphi (1) \\quad \\text {a n d} \\quad \\mathsf {S} _ {2} ^ {1} (\\mathcal {L} _ {\\mathsf {P V}}) \\vdash \\forall N \\varphi (\\lfloor N / 2 \\rfloor) \\rightarrow \\varphi (N).", + "image_path": "361d2eabb4fbcc8569f1abf8fa96a20860733cd7792ec1b3bcbc2b9e891263e2.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "spans": [ + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "text", + "content": "Then, by polynomial induction for " + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "inline_equation", + "content": "\\Pi_1^b" + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "text", + "content": "-formulas (available in " + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "text", + "content": ") and using that " + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "inline_equation", + "content": "\\varphi(0)" + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "text", + "content": " trivially holds, it follows that " + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\forall N \\varphi(N)" + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "text", + "content": ". In turn, this yields " + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}}) \\vdash \\mathsf{Ref}_{R,s}" + }, + { + "bbox": [ + 68, + 545, + 541, + 574 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "spans": [ + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": "Base Case: " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathrm{PV}})\\vdash \\varphi (1)" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": ". In this case, for a given formula " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": " and length " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": ", the hypothesis of " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "\\varphi (1)" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": " is satisfied only if " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": " is a valid description of a formula, and " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "\\operatorname {Size}(F) = 0" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "y_1^0\\triangleq R(1,F,0)" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "inline_equation", + "content": "y_{1}^{1}\\triangleq R(1,F,1)" + }, + { + "bbox": [ + 68, + 588, + 541, + 630 + ], + "type": "text", + "content": ". We need to prove that" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 176, + 638, + 432, + 654 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 638, + 432, + 654 + ], + "spans": [ + { + "bbox": [ + 176, + 638, + 432, + 654 + ], + "type": "interline_equation", + "content": "\\left| y _ {1} ^ {0} \\right| _ {\\ell} = \\left| y _ {1} ^ {1} \\right| _ {\\ell} = 1 \\wedge F \\left(y _ {1} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {1} ^ {0}\\right) \\wedge F \\left(y _ {1} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {1} ^ {1}\\right).", + "image_path": "61ab6dc3ffda86376e6eedf888d5e7150209d299534efacbd943d531c38369f0.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "spans": [ + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "inline_equation", + "content": "\\mathrm{Size}(F) = 0" + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "text", + "content": " evaluates to a constant " + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "inline_equation", + "content": "b_{F}" + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "text", + "content": " on every input bit. The statement above is implied by Line 2 in the definition of " + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "inline_equation", + "content": "R(n,F,b)" + }, + { + "bbox": [ + 68, + 664, + 541, + 692 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": "(Polynomial) Induction Step: " + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})\\vdash \\forall N\\varphi (\\lfloor N / 2\\rfloor)\\to \\varphi (N)" + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": ". Fix an arbitrary " + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": ", let " + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "inline_equation", + "content": "n\\triangleq |N|" + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": ", and assume that " + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "inline_equation", + "content": "\\varphi (\\lfloor N / 2\\rfloor)" + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": " holds. By the induction hypothesis, for every valid formula " + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "inline_equation", + "content": "F^{\\prime}" + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "inline_equation", + "content": "\\mathrm{Size}(F^{\\prime}) < n'^{3 / 2}" + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "inline_equation", + "content": "n^\\prime \\triangleq n - 1" + }, + { + "bbox": [ + 68, + 72, + 539, + 111 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 156, + 121, + 539, + 137 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 121, + 539, + 137 + ], + "spans": [ + { + "bbox": [ + 156, + 121, + 539, + 137 + ], + "type": "interline_equation", + "content": "\\left| y _ {n ^ {\\prime}} ^ {0} \\right| _ {\\ell} = \\left| y _ {n ^ {\\prime}} ^ {1} \\right| _ {\\ell} = n ^ {\\prime} \\wedge F ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {0}\\right) \\neq \\oplus^ {0} \\left(y _ {n ^ {\\prime}} ^ {0}\\right) \\wedge F ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {1}\\right) \\neq \\oplus^ {1} \\left(y _ {n ^ {\\prime}} ^ {1}\\right), \\tag {1}", + "image_path": "729d05ea8781301143b559b94849dadea509519b5a2544b3ea844ebee55114e4.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 144, + 296, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 144, + 296, + 159 + ], + "spans": [ + { + "bbox": [ + 68, + 144, + 296, + 159 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 144, + 296, + 159 + ], + "type": "inline_equation", + "content": "y_{n^{\\prime}}^{0}\\triangleq R(1^{n^{\\prime}},F^{\\prime},0)" + }, + { + "bbox": [ + 68, + 144, + 296, + 159 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 144, + 296, + 159 + ], + "type": "inline_equation", + "content": "y_{n^{\\prime}}^{1}\\triangleq R(1^{n^{\\prime}},F^{\\prime},1)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "spans": [ + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": "Now let " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "n \\geq 2" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": ", and let " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": " be a valid description of a formula over " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": "-bit inputs with " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "\\mathrm{Size}(F) < n^{3/2}" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": ". By the size bound on " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": " ignores Line 1. If " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "\\mathrm{Size}(F) = 0" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": ", then similarly to the base case it is trivial to check that the conclusion of " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "\\varphi(N)" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": " holds. Therefore, we assume that " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "\\mathrm{Size}(F) \\geq 1" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "inline_equation", + "content": "R(1^n, F, b)" + }, + { + "bbox": [ + 67, + 159, + 539, + 212 + ], + "type": "text", + "content": " does not stop at Line 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 213, + 240, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 213, + 240, + 225 + ], + "spans": [ + { + "bbox": [ + 85, + 213, + 240, + 225 + ], + "type": "text", + "content": "Consider the following definitions:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 236, + 242, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 236, + 242, + 251 + ], + "spans": [ + { + "bbox": [ + 83, + 236, + 242, + 251 + ], + "type": "text", + "content": "1. " + }, + { + "bbox": [ + 83, + 236, + 242, + 251 + ], + "type": "inline_equation", + "content": "\\widetilde{F} \\triangleq \\mathrm{Normalize}(1^n, F)" + }, + { + "bbox": [ + 83, + 236, + 242, + 251 + ], + "type": "text", + "content": " (Line 3)," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "spans": [ + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "text", + "content": "5. " + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "inline_equation", + "content": "b' \\triangleq b \\oplus c_i" + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "text", + "content": " (Line 5), where " + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "text", + "content": " restricts " + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "inline_equation", + "content": "x_i" + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "inline_equation", + "content": "c_i" + }, + { + "bbox": [ + 322, + 236, + 539, + 251 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 259, + 266, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 259, + 266, + 275 + ], + "spans": [ + { + "bbox": [ + 83, + 259, + 266, + 275 + ], + "type": "text", + "content": "2. " + }, + { + "bbox": [ + 83, + 259, + 266, + 275 + ], + "type": "inline_equation", + "content": "\\rho \\triangleq" + }, + { + "bbox": [ + 83, + 259, + 266, + 275 + ], + "type": "text", + "content": " Find-Restriction " + }, + { + "bbox": [ + 83, + 259, + 266, + 275 + ], + "type": "inline_equation", + "content": "(1^n,\\widetilde{F})" + }, + { + "bbox": [ + 83, + 259, + 266, + 275 + ], + "type": "text", + "content": " (Line 4)," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 322, + 258, + 466, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 258, + 466, + 275 + ], + "spans": [ + { + "bbox": [ + 322, + 258, + 466, + 275 + ], + "type": "text", + "content": "6. " + }, + { + "bbox": [ + 322, + 258, + 466, + 275 + ], + "type": "inline_equation", + "content": "y_{n^{\\prime}}^{b^{\\prime}}\\triangleq R(1^{n^{\\prime}},F^{\\prime},b^{\\prime})" + }, + { + "bbox": [ + 322, + 258, + 466, + 275 + ], + "type": "text", + "content": " (Line 6)," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 282, + 288, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 282, + 288, + 298 + ], + "spans": [ + { + "bbox": [ + 83, + 282, + 288, + 298 + ], + "type": "text", + "content": "3. " + }, + { + "bbox": [ + 83, + 282, + 288, + 298 + ], + "type": "inline_equation", + "content": "F^{\\prime}\\triangleq" + }, + { + "bbox": [ + 83, + 282, + 288, + 298 + ], + "type": "text", + "content": " Apply-Restriction " + }, + { + "bbox": [ + 83, + 282, + 288, + 298 + ], + "type": "inline_equation", + "content": "(1^{n},\\widetilde{F},\\rho)" + }, + { + "bbox": [ + 83, + 282, + 288, + 298 + ], + "type": "text", + "content": " (Line 5)," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 322, + 281, + 465, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 281, + 465, + 298 + ], + "spans": [ + { + "bbox": [ + 322, + 281, + 465, + 298 + ], + "type": "text", + "content": "7. " + }, + { + "bbox": [ + 322, + 281, + 465, + 298 + ], + "type": "inline_equation", + "content": "y_{n}^{b}\\triangleq y_{n^{\\prime}}^{b^{\\prime}}\\cup y_{i}\\mapsto c_{i}" + }, + { + "bbox": [ + 322, + 281, + 465, + 298 + ], + "type": "text", + "content": " (Line 6)," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 82, + 306, + 189, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 306, + 189, + 320 + ], + "spans": [ + { + "bbox": [ + 82, + 306, + 189, + 320 + ], + "type": "text", + "content": "4. " + }, + { + "bbox": [ + 82, + 306, + 189, + 320 + ], + "type": "inline_equation", + "content": "n^{\\prime}\\triangleq n - 1" + }, + { + "bbox": [ + 82, + 306, + 189, + 320 + ], + "type": "text", + "content": " (Line 5)," + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "spans": [ + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "text", + "content": "8. " + }, + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "inline_equation", + "content": "s \\triangleq \\operatorname{Size}(F)" + }, + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "inline_equation", + "content": "\\widetilde{s} \\triangleq \\operatorname{Size}(\\widetilde{F})" + }, + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "inline_equation", + "content": "s' \\triangleq \\operatorname{Size}(F')" + }, + { + "bbox": [ + 322, + 305, + 539, + 321 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 331, + 539, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 331, + 539, + 359 + ], + "spans": [ + { + "bbox": [ + 68, + 331, + 539, + 359 + ], + "type": "text", + "content": "We rely on the provability in " + }, + { + "bbox": [ + 68, + 331, + 539, + 359 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 331, + 539, + 359 + ], + "type": "text", + "content": " of the following statements about the subroutines of " + }, + { + "bbox": [ + 68, + 331, + 539, + 359 + ], + "type": "inline_equation", + "content": "R(1^{n},F,b)" + }, + { + "bbox": [ + 68, + 331, + 539, + 359 + ], + "type": "text", + "content": " (see [CKK+24]):" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 80, + 369, + 128, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 369, + 128, + 383 + ], + "spans": [ + { + "bbox": [ + 80, + 369, + 128, + 383 + ], + "type": "text", + "content": "(i) " + }, + { + "bbox": [ + 80, + 369, + 128, + 383 + ], + "type": "inline_equation", + "content": "\\widetilde{s}\\leq s" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 369, + 465, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 369, + 465, + 384 + ], + "spans": [ + { + "bbox": [ + 314, + 369, + 465, + 384 + ], + "type": "text", + "content": "(iii) " + }, + { + "bbox": [ + 314, + 369, + 465, + 384 + ], + "type": "inline_equation", + "content": "\\forall x\\in \\{0,1\\} ^n\\widetilde{F} (x) = F(x)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 78, + 392, + 197, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 392, + 197, + 407 + ], + "spans": [ + { + "bbox": [ + 78, + 392, + 197, + 407 + ], + "type": "text", + "content": "(ii) " + }, + { + "bbox": [ + 78, + 392, + 197, + 407 + ], + "type": "inline_equation", + "content": "s' \\leq \\widetilde{s} \\cdot (1 - 1/n)^{3/2}" + }, + { + "bbox": [ + 78, + 392, + 197, + 407 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 391, + 531, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 391, + 531, + 407 + ], + "spans": [ + { + "bbox": [ + 315, + 391, + 531, + 407 + ], + "type": "text", + "content": "(iv) " + }, + { + "bbox": [ + 315, + 391, + 531, + 407 + ], + "type": "inline_equation", + "content": "\\forall z\\in \\{0,1\\}^{\\rho^{-1}(\\star)}F'(z) = \\widetilde{F}\\big(z\\cup x_i\\mapsto c_i\\big)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 417, + 310, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 417, + 310, + 431 + ], + "spans": [ + { + "bbox": [ + 69, + 417, + 310, + 431 + ], + "type": "text", + "content": "By Items (i) and (ii) together with the bound " + }, + { + "bbox": [ + 69, + 417, + 310, + 431 + ], + "type": "inline_equation", + "content": "s < n^{3/2}" + }, + { + "bbox": [ + 69, + 417, + 310, + 431 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 438, + 503, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 438, + 503, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 438, + 503, + 455 + ], + "type": "interline_equation", + "content": "\\mathsf {S} _ {2} ^ {1} \\left(\\mathcal {L} _ {\\mathsf {P V}}\\right) \\vdash s ^ {\\prime} \\leq \\widetilde {s} \\cdot (1 - 1 / n) ^ {3 / 2} \\leq s \\cdot (1 - 1 / n) ^ {3 / 2} < n ^ {3 / 2} \\cdot (1 - 1 / n) ^ {3 / 2} = (n - 1) ^ {3 / 2}.", + "image_path": "cd7d69136e335ce215baac5231ff78cba27879367511c6bcd933384dcaafd79b.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "spans": [ + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": "Thus " + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "inline_equation", + "content": "F'" + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": " is a valid formula on " + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "inline_equation", + "content": "n'" + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": "-bit inputs of size " + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "inline_equation", + "content": "< n'^{3/2}" + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": ". By the first condition in the induction hypothesis (Equation (1)) and the definition of each " + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "inline_equation", + "content": "y_{n}^{b}" + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "inline_equation", + "content": "|y_{n}^{0}|_{\\ell} = |y_{n}^{1}|_{\\ell} = n" + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": ". Using the definitions listed above, the last two conditions in the induction hypothesis (Equation (1)), and Items (iii) and (iv), we derive in " + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "inline_equation", + "content": "S_{2}^{1}(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": " the following statements for each " + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "inline_equation", + "content": "b \\in \\{0, 1\\}" + }, + { + "bbox": [ + 68, + 463, + 539, + 518 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 250, + 525, + 357, + 543 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 525, + 357, + 543 + ], + "spans": [ + { + "bbox": [ + 250, + 525, + 357, + 543 + ], + "type": "interline_equation", + "content": "F ^ {\\prime} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) \\neq \\oplus^ {b ^ {\\prime}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right),", + "image_path": "1eaa6e0d10a6bef8d8ba21407da2a82fb6e533d4ab45bfd8bf39b85cb2fc26aa.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 257, + 544, + 353, + 559 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 544, + 353, + 559 + ], + "spans": [ + { + "bbox": [ + 257, + 544, + 353, + 559 + ], + "type": "interline_equation", + "content": "F (y _ {n} ^ {b}) = F ^ {\\prime} (y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}),", + "image_path": "da27cfab6dfa7405c6fad9190dc61ca497cb4658c7aa6dbdc279516ce52aa7b4.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 257, + 559, + 356, + 575 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 559, + 356, + 575 + ], + "spans": [ + { + "bbox": [ + 257, + 559, + 356, + 575 + ], + "type": "interline_equation", + "content": "F (y _ {n} ^ {b}) \\neq \\oplus^ {b ^ {\\prime}} (y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}).", + "image_path": "f2a95969060ff18ce584a40691a0daa08a51df76663be0ca7c0ca01091aed99e.jpg" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 583, + 367, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 583, + 367, + 597 + ], + "spans": [ + { + "bbox": [ + 69, + 583, + 367, + 597 + ], + "type": "text", + "content": "Therefore, using basic facts about the function symbols " + }, + { + "bbox": [ + 69, + 583, + 367, + 597 + ], + "type": "inline_equation", + "content": "\\oplus^0" + }, + { + "bbox": [ + 69, + 583, + 367, + 597 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 69, + 583, + 367, + 597 + ], + "type": "inline_equation", + "content": "\\oplus^1" + }, + { + "bbox": [ + 69, + 583, + 367, + 597 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 137, + 605, + 469, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 605, + 469, + 622 + ], + "spans": [ + { + "bbox": [ + 137, + 605, + 469, + 622 + ], + "type": "interline_equation", + "content": "\\oplus^ {b ^ {\\prime}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) = \\oplus^ {b \\oplus c _ {i}} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right) = c _ {i} \\oplus \\left(\\oplus^ {b} \\left(y _ {n ^ {\\prime}} ^ {b ^ {\\prime}}\\right)\\right) = c _ {i} \\oplus \\left(\\oplus^ {b} \\left(y _ {n} ^ {b}\\right) \\oplus c _ {i}\\right) = \\oplus^ {b} \\left(y _ {n} ^ {b}\\right).", + "image_path": "dc520830c34ac7e220b6224cb4a3b131068da212eb12dd170b5700b5edf69529.jpg" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "spans": [ + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "text", + "content": "These statements imply that, for each " + }, + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "inline_equation", + "content": "b \\in \\{0,1\\}" + }, + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "inline_equation", + "content": "F(y_{n}^{b}) \\neq \\oplus^{b}(y_{n}^{b})" + }, + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "text", + "content": ". In other words, the conclusion of " + }, + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "inline_equation", + "content": "\\varphi(N)" + }, + { + "bbox": [ + 68, + 630, + 539, + 657 + ], + "type": "text", + "content": " holds. This completes the proof of the induction step." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "spans": [ + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "text", + "content": "As explained above, the provability of " + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\operatorname{Ref}_{R,s}" + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1 (\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "text", + "content": " implies its provability in " + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1 \\vdash \\operatorname{Ref}_{R,s} \\to \\mathsf{FLB}_s^\\oplus" + }, + { + "bbox": [ + 68, + 666, + 539, + 693 + ], + "type": "text", + "content": ", this completes the proof of Theorem 4.1." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "text", + "content": "We have seen that a non-trivial formula size lower bound can be established in " + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "text", + "content": ". More advanced circuit lower bounds are known to be provable assuming additional axioms extending " + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "text", + "content": " (e.g., [Kra95, Section 15.2] and [MP20]), but their provability in " + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "text", + "content": " (or equivalently, in " + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1(\\mathcal{L}_{\\mathsf{PV}})" + }, + { + "bbox": [ + 67, + 72, + 542, + 116 + ], + "type": "text", + "content": ") is less clear." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "spans": [ + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "content": "Open Problem 4.3. For each " + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "inline_equation", + "content": "d \\geq 1" + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "inline_equation", + "content": "\\ell \\geq 1" + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "content": ", can " + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "content": " prove that the parity function on " + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "content": " bits cannot be computed by depth- " + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "content": " circuits of size " + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "inline_equation", + "content": "n^\\ell" + }, + { + "bbox": [ + 67, + 122, + 542, + 150 + ], + "type": "text", + "content": "?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "content": "Open Problem 4.4. For each " + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "inline_equation", + "content": "\\ell \\geq 1" + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "content": ", is there a constant " + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "inline_equation", + "content": "k = k(\\ell)" + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "content": " proves that every monotone circuit for the " + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "content": "-clique problem on " + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "content": "-vertex graphs must be of size at least " + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "inline_equation", + "content": "n^\\ell" + }, + { + "bbox": [ + 67, + 158, + 542, + 185 + ], + "type": "text", + "content": "?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 205, + 321, + 222 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 205, + 321, + 222 + ], + "spans": [ + { + "bbox": [ + 67, + 205, + 321, + 222 + ], + "type": "text", + "content": "5 Unprovability of Complexity Bounds" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 232, + 541, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 232, + 541, + 299 + ], + "spans": [ + { + "bbox": [ + 67, + 232, + 541, + 299 + ], + "type": "text", + "content": "The investigation of the unprovability of complexity bounds within theories of bounded arithmetic has a long and rich history. Much of the early work took place in the nineties, with significant results obtained by Razborov [Raz95a, Raz95b], Krajicek [Kra97], and other researchers. Since then, and in particular over the last decade, there has been renewed interest and progress in establishing unprovability results (see, e.g., [CK07, PS21, CKKO21, LO23, ABM23] and references therein)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 300, + 541, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 300, + 541, + 407 + ], + "spans": [ + { + "bbox": [ + 67, + 300, + 541, + 407 + ], + "type": "text", + "content": "In Section 5.1, we consider the unprovability of complexity upper bounds. The unprovability of an inclusion such as " + }, + { + "bbox": [ + 67, + 300, + 541, + 407 + ], + "type": "inline_equation", + "content": "\\mathsf{NP} \\subseteq \\mathsf{SIZE}[n^k]" + }, + { + "bbox": [ + 67, + 300, + 541, + 407 + ], + "type": "text", + "content": " is equivalent to the consistency of NP " + }, + { + "bbox": [ + 67, + 300, + 541, + 407 + ], + "type": "inline_equation", + "content": "\\not\\subseteq \\mathsf{SIZE}[n^k]" + }, + { + "bbox": [ + 67, + 300, + 541, + 407 + ], + "type": "text", + "content": " with the corresponding theory. Such a consistency result establishes that, while we cannot confirm the separation is true in the standard model of natural numbers, we know it holds in a non-standard model of a theory so strong that complexity theory appears almost indistinguishable from the standard one. We stress that establishing the consistency of a lower bound is a necessary step towards showing that the lower bound is true. For this reason, the unprovability of upper bounds can be formally seen as progress towards showing unconditional complexity lower bounds." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 408, + 541, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 408, + 541, + 489 + ], + "spans": [ + { + "bbox": [ + 67, + 408, + 541, + 489 + ], + "type": "text", + "content": "In Section 5.2, we turn our attention to the unprovability of complexity lower bounds. This direction is partly driven by the desire to formally understand why proving complexity lower bounds is challenging, and to explore the possibility of a more fundamental underlying reason for this difficulty. Moreover, it might provide examples of hard sentences for logical theories and of hard propositional tautologies for proof systems. The investigation of the meta-mathematics of lower bounds has also found unexpected applications in algorithms and complexity (e.g., [CIKK16])." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "text", + "content": "Finally, in Section 5.3 we connect the two directions and explain how the unprovability of circuit lower bounds in " + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "text", + "content": " yields the unprovability of " + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "text", + "content": ". The latter can be seen as a weakening of the " + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "inline_equation", + "content": "\\mathsf{P}" + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "text", + "content": " versus NP problem that considers the existence of feasible proofs that " + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 490, + 541, + 544 + ], + "type": "text", + "content": ". This further motivates the investigation of the unprovability of lower bounds." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 559, + 262, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 559, + 262, + 574 + ], + "spans": [ + { + "bbox": [ + 67, + 559, + 262, + 574 + ], + "type": "text", + "content": "5.1 Unprovability of Upper Bounds" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 581, + 316, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 581, + 316, + 594 + ], + "spans": [ + { + "bbox": [ + 67, + 581, + 316, + 594 + ], + "type": "text", + "content": "5.1.1 LEARN-Uniform Circuits and Unprovability" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "text", + "content": "Cook and Krajicek [CK07] considered the provability of NP " + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "inline_equation", + "content": "\\subseteq" + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "text", + "content": " SIZE[poly] in bounded arithmetic and obtained a number of conditional negative results. [KO17], building on techniques from [CK07], showed that for no integer " + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "inline_equation", + "content": "k\\geq 1" + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "text", + "content": " the theory " + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "text", + "content": " proves that " + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "inline_equation", + "content": "\\mathsf{P}\\subseteq \\mathsf{SIZE}[n^k ]" + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "text", + "content": " . Note that this is an unconditional result. Thus, for a natural theory capable of formalizing advanced results from complexity theory, such as the PCP Theorem, we can unconditionally rule out the provability of " + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "inline_equation", + "content": "\\mathsf{P}\\subseteq \\mathsf{SIZE}[n^{k}]" + }, + { + "bbox": [ + 67, + 601, + 541, + 684 + ], + "type": "text", + "content": " . A slightly stronger model-theoretic formulation of the result of [KO17] appears in [BM20]." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 312, + 721 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 312, + 721 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 312, + 721 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 539, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 539, + 99 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 539, + 99 + ], + "type": "text", + "content": "[BKO20] obtained results for stronger theories and ruled out the provability of infinitely often inclusions. In more detail, for an " + }, + { + "bbox": [ + 68, + 72, + 539, + 99 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 68, + 72, + 539, + 99 + ], + "type": "text", + "content": "-function symbol " + }, + { + "bbox": [ + 68, + 72, + 539, + 99 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 68, + 72, + 539, + 99 + ], + "type": "text", + "content": ", consider the sentence" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 113, + 109, + 494, + 125 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 109, + 494, + 125 + ], + "spans": [ + { + "bbox": [ + 113, + 109, + 494, + 125 + ], + "type": "interline_equation", + "content": "\\left. \\cup B _ {k} ^ {i. o.} [ h ] \\triangleq \\forall 1 ^ {m} \\exists 1 ^ {n} \\exists C _ {n} \\forall x \\left(n \\geq m \\wedge | C _ {n} | \\leq n ^ {k} \\wedge \\left(| x | \\leq n \\rightarrow \\psi (n, C _ {n}, x, h)\\right)\\right), \\right. ^ {1 0}", + "image_path": "1ea9e01081d47172c398dfa6f9930dc757af6069b9c122853a9410bf9e9309a1.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "spans": [ + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " is a quantifier-free " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": "-formula stating that " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "h(x) \\neq 0" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " if and only if the evaluation of the circuit " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "C_n" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " (viewed as an " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": "-bit string) is 1. In other words, " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_k^{i.o.}[h]" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " states that the language defined by " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " (which is in " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathsf{P}" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": ") admits circuits of size at most " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " on infinitely many input lengths " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": ". [BKO20] showed that for each " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "k \\geq 1" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": ", there is an " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": "-function symbol " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " does not prove " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_k^{i.o.}[h]" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": ". Similarly, they established that " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1 \\not\\vdash \\mathsf{NP} \\subseteq \\text{i.o.SIZE}[n^k]" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^1 \\not\\vdash \\mathsf{P}^{\\mathsf{NP}} \\subseteq \\text{i.o.SIZE}[n^k]" + }, + { + "bbox": [ + 67, + 135, + 541, + 202 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "content": "Building on these results, [CKKO21] introduced a modular framework to establish the unprovability of circuit upper bounds in bounded arithmetic using a learning-theoretic perspective. Next, we describe how their approach can be used to show a slightly weaker form of the result from [BKO20] described above. For an " + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "content": "-function symbol " + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "content": ", we consider a sentence " + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_{c,k}[h]" + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "content": " stating that " + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "inline_equation", + "content": "L_{h} \\in \\mathsf{SIZE}[c \\cdot n^{k}]" + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "inline_equation", + "content": "x \\in L_{h}" + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "content": " if and only if " + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "inline_equation", + "content": "h(x) \\neq 0" + }, + { + "bbox": [ + 67, + 203, + 541, + 270 + ], + "type": "text", + "content": ", i.e.," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 102, + 280, + 541, + 296 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 280, + 541, + 296 + ], + "spans": [ + { + "bbox": [ + 102, + 280, + 541, + 296 + ], + "type": "interline_equation", + "content": "\\bigcup \\mathrm {B} _ {c, k} [ h ] \\triangleq \\forall 1 ^ {n} \\exists C _ {n} \\forall x \\left(\\left| C _ {n} \\right| \\leq c \\cdot n ^ {k} \\wedge \\left(\\left| x \\right| \\leq n \\rightarrow (\\operatorname {E v a l} \\left(C _ {n}, x, n\\right) = 1 \\leftrightarrow h (x) \\neq 0)\\right)\\right), \\tag {2}", + "image_path": "9584c77630f177f4418c28a0f4cd991c9f3bf4a3f003fd5be36438ed24623c50.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "spans": [ + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "\\operatorname{Eval}(C_n, x, n)" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": " is an " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": "-function that evaluates the circuit " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "C_n" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": " on the " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": "-bit string described by " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": ". Our goal is to show that for every " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "k \\geq 1" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": " there is a function symbol " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": " such that, for no choice of " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "c \\geq 1" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": " proves " + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "inline_equation", + "content": "\\mathrm{UB}_{c,k}[h]" + }, + { + "bbox": [ + 67, + 306, + 541, + 360 + ], + "type": "text", + "content": ". (Note that in all results discussed in this section, we consider Log formalizations, as explained in Section 4.1.)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "spans": [ + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": "Overview of the Approach. Note that " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_{c,k}[h]" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " claims the existence of circuits for " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "L_{h}" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": ", i.e., it states a non-uniform upper bound. We explore the constructive aspect of " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " proofs, by extracting computational information from a " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": "-proof that such circuits exist. The argument has a logical component, where we extract from a proof of " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_{c,k}[h]" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " a \"LEARN-uniform\" construction of a sequence " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\{C_n\\}_n" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " of circuits for " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "L_{h}" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " and a complexity-theoretic component, where we unconditionally establish that for each " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " LEARN-uniform circuits of this form do not exist for some " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": ". Altogether, we get that for some " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " theory " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " does not prove " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_{c,k}[h]" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": " (no matter the choice of " + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 373, + 541, + 469 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "spans": [ + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": "LEARN-uniform circuits. We will be interested in languages that can be efficiently learned with a bounded number of equivalence queries, in the following sense. For functions " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "s, q \\colon \\mathbb{N} \\to \\mathbb{N}" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": ", we say that a language " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "L \\subseteq \\{0,1\\}^*" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " is in LEARN-uniform " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "^{\\mathsf{EQ}[q]}" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " SIZE[s] if there is a polynomial-time algorithm " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "A^{\\mathsf{EQ}(L_n)}(1^n)" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " that outputs a circuit of size at most " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "s(n)" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "L_n" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " after making at most " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "q(n)" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " equivalence queries to " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "L_n" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "L_n = L \\cap \\{0,1\\}^n" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": ". The equivalence query oracle, given the description of an " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": "-bit circuit " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " of size a most " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "s(n)" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": ", replies \"yes\" if " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " computes " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "L_n" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": ", or provides some counter-example " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "inline_equation", + "content": "D(w) \\neq L_n(w)" + }, + { + "bbox": [ + 68, + 482, + 541, + 564 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "content": "Extracting LEARN-uniform circuits from " + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "content": " proofs. For convenience, write " + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_{c,k}[h] = \\forall 1^n \\exists C_n \\forall x \\phi(1^n, C_n, x)" + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "content": " in Equation (2), where " + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "inline_equation", + "content": "\\phi(1^n, C_n, x)" + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "content": " is a quantifier-free formula. Since " + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "content": " is a universal theory, under the assumption that " + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1 \\vdash \\mathsf{UB}_{c,k}[h]" + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "content": ", we can apply Theorem 3.2 (KPT Witnessing Theorem) to obtain the provability in " + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 577, + 541, + 632 + ], + "type": "text", + "content": " of the disjunction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 640, + 541, + 663 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 640, + 541, + 663 + ], + "spans": [ + { + "bbox": [ + 73, + 640, + 541, + 663 + ], + "type": "interline_equation", + "content": "\\forall 1 ^ {n} \\forall x _ {1} \\dots \\forall x _ {k} (\\phi (1 ^ {n}, t _ {1} (1 ^ {n}), x _ {1}) \\vee \\phi (1 ^ {n}, t _ {2} (1 ^ {n}, x _ {1}), x _ {2}) \\vee \\dots \\vee \\phi (1 ^ {n}, t _ {k} (1 ^ {n}, x _ {1}, \\dots , x _ {k - 1}), x _ {k})) \\tag {3}", + "image_path": "84a6feded65b490e5d36ea059c84406fff3021b7a55b5662776fb58139b9c1d2.jpg" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "spans": [ + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "text", + "content": "10Recall that " + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "inline_equation", + "content": "1^n" + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "text", + "content": " is simply a convenient notation to refer to a variable " + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "text", + "content": " that is set to " + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "inline_equation", + "content": "|N|" + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "text", + "content": " for some variable " + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 78, + 670, + 473, + 682 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "t_1, \\ldots, t_k" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": "-terms and " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "k = O(1)" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": ". Most importantly, due to the soundness of " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": ", this statement is true over the standard model " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": ". Additionally, the terms in " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": " correspond to polynomial-time algorithms. Next, we will discuss how to interpret Equation (3) over " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": " as an interactive protocol and how this perspective leads to a LEARN-uniform construction." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": "The KPT Witnessing Theorem can be intuitively understood as follows [KPS90]. Consider a search problem " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "Q(1^n)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ", where given the input " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "1^n" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ", we need to find " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "\\forall x \\phi(1^n, D, x)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ". The problem " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "Q(1^n)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " can be solved using a " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": "-round Student-Teacher protocol. In the first round, the student proposes " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "D_1 = t_1(1^n)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " as a solution to the search problem " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "Q(1^n)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ". This solution is either correct, or there exists a counterexample " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "w_1" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "\\neg \\phi(1^n, t_1(1^n), w_1)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ". The teacher then provides this counterexample value " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "w_1" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ", and the protocol moves to the next round. In each subsequent round " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "1 \\leq i < k" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ", the student computes " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "D_i = t_i(1^n, w_1, \\ldots, w_{i-1})" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " based on the counterexamples " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "w_1, \\ldots, w_{i-1}" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " received in the previous rounds. This " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "D_i" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " is either a correct solution for " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "Q(1^n)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ", in which case the problem is solved, or there is another counterexample " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "w_i" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " provided by the teacher such that " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "\\neg \\phi(1^n, t_i(1^n, w_1, \\ldots, w_{i-1}), w_i)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ". If the latter is the case, the protocol continues to the next round " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "i + 1" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ". The theorem guarantees that for every input " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "1^n" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": ", the student will successfully solve the search problem " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "Q(1^n)" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": " within some round " + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "inline_equation", + "content": "1 \\leq i \\leq k" + }, + { + "bbox": [ + 67, + 127, + 541, + 275 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "spans": [ + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": "From a " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " proof of a circuit upper bound for a language " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "L_h" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ", we can derive a Student-Teacher protocol for the search problem " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "Q(1^n)" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " corresponding to Equation (3). In this protocol, the student proposes a candidate circuit " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ", and the teacher provides a counterexample " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " (an input " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "D(w) \\neq L_h(w)" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ") if one exists. (Note that " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\phi(1^n, D, x)" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " might not be true for other reasons, e.g., if " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "|D| > c \\cdot n^k" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ", but in such cases there is no need to invoke the equivalence query oracle and we can proceed in the Student-Teacher protocol with, say, " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "w = 0^n" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ".) The student is guaranteed to succeed after at most " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " queries, regardless of the counterexamples provided by the teacher. Finally, for every input " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ", the student computes according to a constant number of fixed " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " terms " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "t_1, \\ldots, t_k" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ". Since a " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " term is merely a composition of a finite number of " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " function symbols (polynomial-time algorithms), the student's computation runs in polynomial time. Therefore, from the provability in " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": " of a non-uniform circuit upper bound for a language " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "L \\in \\mathsf{P}" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": ", we can extract a LEARN-uniform family of circuits for " + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 67, + 276, + 541, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "spans": [ + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "text", + "content": "Unconditional lower bound against LEARN-uniform circuits. The argument described above reduces the unprovability of upper bounds to a complexity-theoretic question with no reference to logic. To complete the proof, it is enough to show that for each " + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "text", + "content": " there is a language " + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "inline_equation", + "content": "L \\in \\mathbb{P}" + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "inline_equation", + "content": "L \\notin \\mathrm{LEARN-uniform}^{\\mathrm{EQ}[O(1)]} \\mathrm{SIZE}[O(n^{k})]" + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "text", + "content": ". This unconditional lower bound against LEARN-uniform circuits is established in [CKKO21] by generalizing a lower bound from [SW14] against P-uniform circuits, which can be interpreted as LEARN-uniform constructions with " + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "inline_equation", + "content": "q = 0" + }, + { + "bbox": [ + 67, + 438, + 541, + 560 + ], + "type": "text", + "content": " queries. Roughly speaking, [CKKO21] shows that one can eliminate each equivalence query using a small amount of non-uniform advice, and that the base case where no queries are present (as in [SW14]) can be extended to a lower bound against a bounded amount of advice." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 574, + 541, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 574, + 541, + 615 + ], + "spans": [ + { + "bbox": [ + 67, + 574, + 541, + 615 + ], + "type": "text", + "content": "This completes the sketch of the argument. The approach is fairly general and can be adapted to other theories. The strength of the theory affects the learning model against which one needs to obtain lower bounds (e.g., by increasing the number of queries or allowing randomized learners)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 620, + 373, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 620, + 373, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 620, + 373, + 634 + ], + "type": "text", + "content": "Open Problem 5.1. Show that " + }, + { + "bbox": [ + 67, + 620, + 373, + 634 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 67, + 620, + 373, + 634 + ], + "type": "text", + "content": " does not prove that " + }, + { + "bbox": [ + 67, + 620, + 373, + 634 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\subseteq \\mathsf{SIZE}[n^k]" + }, + { + "bbox": [ + 67, + 620, + 373, + 634 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 639, + 541, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 541, + 693 + ], + "type": "text", + "content": "In order to solve Open Problem 5.1, using the connection from [CKKO21] it is sufficient to show that " + }, + { + "bbox": [ + 67, + 639, + 541, + 693 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\not\\subset \\mathsf{LEARN}\\text{-uniform}^{\\mathsf{EQ}[q]} \\mathsf{SIZE}[O(n^{k})]" + }, + { + "bbox": [ + 67, + 639, + 541, + 693 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 67, + 639, + 541, + 693 + ], + "type": "inline_equation", + "content": "q = \\mathrm{poly}(n)" + }, + { + "bbox": [ + 67, + 639, + 541, + 693 + ], + "type": "text", + "content": ". In other words, this amounts to understanding the class of languages that admit circuits that can be produced with a polynomial number of equivalence queries." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 383, + 87 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 383, + 87 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 383, + 87 + ], + "type": "text", + "content": "Open Problem 5.2. Show that " + }, + { + "bbox": [ + 68, + 72, + 383, + 87 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^1" + }, + { + "bbox": [ + 68, + 72, + 383, + 87 + ], + "type": "text", + "content": " does not prove that " + }, + { + "bbox": [ + 68, + 72, + 383, + 87 + ], + "type": "inline_equation", + "content": "\\mathsf{NP} \\subseteq \\mathsf{SIZE}[n^k]" + }, + { + "bbox": [ + 68, + 72, + 383, + 87 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 102, + 313, + 117 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 102, + 313, + 117 + ], + "spans": [ + { + "bbox": [ + 68, + 102, + 313, + 117 + ], + "type": "text", + "content": "5.1.2 " + }, + { + "bbox": [ + 68, + 102, + 313, + 117 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 102, + 313, + 117 + ], + "type": "text", + "content": " and Propositional Proof Complexity" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "spans": [ + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": "Suppose that " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "\\mathsf{P}" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " is actually equal to NP. In this scenario, there exists a polynomial-time algorithm " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " (i.e., a " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " function symbol) that can find a satisfying assignment for any given satisfiable formula. In other words, if " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "\\operatorname{Formula}(F, 1^n)" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " denotes an " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": "-formula that checks if " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " is a valid description of a formula over " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " input bits, and " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "\\operatorname{Sat}(F, x)" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " is an " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": "-formula that checks if " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": " satisfies the formula encoded by " + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 123, + 542, + 178 + ], + "type": "text", + "content": ", the sentence" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 137, + 186, + 541, + 202 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 186, + 541, + 202 + ], + "spans": [ + { + "bbox": [ + 137, + 186, + 541, + 202 + ], + "type": "interline_equation", + "content": "\\varphi_ {\\mathrm {P} = \\mathrm {N P}} [ g ] \\triangleq \\forall 1 ^ {n} \\forall F \\forall x \\left(\\left(\\operatorname {F o r m u l a} (F, 1 ^ {n}) \\wedge \\operatorname {S a t} (F, x)\\right)\\rightarrow \\operatorname {S a t} (F, g (F))\\right) \\tag {4}", + "image_path": "19428c7bc08f88c4b13a686b11ea2e157a026aa70c539069b4339f8f9f77ff5c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 210, + 210, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 210, + 210, + 222 + ], + "spans": [ + { + "bbox": [ + 68, + 210, + 210, + 222 + ], + "type": "text", + "content": "is true in the standard model " + }, + { + "bbox": [ + 68, + 210, + 210, + 222 + ], + "type": "inline_equation", + "content": "\\mathbb{N}" + }, + { + "bbox": [ + 68, + 210, + 210, + 222 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "spans": [ + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "text", + "content": "Open Problem 5.3. Show that for no polynomial-time function symbol " + }, + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "text", + "content": " theory " + }, + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "text", + "content": " proves the sentence " + }, + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "inline_equation", + "content": "\\varphi_{\\mathrm{P} = \\mathrm{NP}}[g]" + }, + { + "bbox": [ + 68, + 232, + 542, + 260 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "spans": [ + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": "Equivalently, Open Problem 5.3 states that " + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": " (and by standard conservation results " + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": ") is consistent with " + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\neq \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": ". This means that either " + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\neq \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": ", as is commonly assumed, making the conjecture trivially true, or " + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": ", but this cannot be proven using only polynomial-time concepts and reasoning. Therefore, Open Problem 5.3 represents a formal weakening of the conjecture that " + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\neq \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": ". The statement is known to follow from the purely combinatorial conjecture that the extended Frege propositional proof system " + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 68, + 266, + 541, + 350 + ], + "type": "text", + "content": " (see Section 3.2) is not polynomially bounded, which is a major open problem in proof complexity." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "spans": [ + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "content": "Theorem 5.4 ([Coo75]). Suppose that there is a sequence " + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "inline_equation", + "content": "\\{F_n\\}_{n\\geq 1}" + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "content": " of propositional tautologies of size polynomial in " + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "content": " that require eF proofs of size " + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "inline_equation", + "content": "n^{\\omega (1)}" + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "content": ". Then there is no function symbol " + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "content": " proves " + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "inline_equation", + "content": "\\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]" + }, + { + "bbox": [ + 67, + 356, + 541, + 398 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "spans": [ + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "text", + "content": "Proof. Here we only provide a sketch of the proof. More details and extensions of the result can be found in the textbooks [Kra95, Kra19]. We establish that if " + }, + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1 \\vdash \\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]" + }, + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "text", + "content": ", then every tautology has a polynomial size " + }, + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 68, + 404, + 541, + 445 + ], + "type": "text", + "content": " proof." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "spans": [ + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "text", + "content": "Recall the definitions and results from Section 3.2. For a propositional proof system " + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "text", + "content": " (described by an " + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "text", + "content": " function symbol), we consider an " + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "text", + "content": "-sentence stating the soundness of " + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 69, + 445, + 541, + 472 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 481, + 501, + 497 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 481, + 501, + 497 + ], + "spans": [ + { + "bbox": [ + 108, + 481, + 501, + 497 + ], + "type": "interline_equation", + "content": "\\mathsf {S o u n d} _ {P} \\triangleq \\forall 1 ^ {n} \\forall F \\forall \\pi (\\mathsf {F o r m u l a} (F, 1 ^ {n}) \\land \\mathsf {P r o o f} _ {P} (F, \\pi)) \\to \\forall x (| x | \\leq n \\to \\mathsf {S a t} (F, x)),", + "image_path": "af85018eec449fc68ac5144ad9df85fd8e0cadbddda6b88cbc23494fde490a97.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "spans": [ + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "inline_equation", + "content": "\\operatorname{Proof}_P(F, \\pi)" + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "text", + "content": " states that " + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "text", + "content": " is a valid " + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "text", + "content": "-proof of " + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 505, + 324, + 518 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "spans": [ + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": "Note that if " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " is not a tautology then " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "g(\\neg F)" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " outputs a satisfying assignment of " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "\\neg F" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": ", while if " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " is a tautology then " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "\\neg F" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " admits no satisfying assignment. We consider a proof system " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "P_g" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " defined as follows: Given a valid description of an " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": "-bit propositional formula " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " and a candidate proof " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "\\widetilde{\\pi}" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "P_g" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " accepts " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "\\widetilde{\\pi}" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " as a proof of " + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 519, + 542, + 572 + ], + "type": "text", + "content": " if and only if" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 227, + 573, + 382, + 588 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 573, + 382, + 588 + ], + "spans": [ + { + "bbox": [ + 227, + 573, + 382, + 588 + ], + "type": "interline_equation", + "content": "g (\\neg F) = \\widetilde {\\pi} \\quad \\text {a n d} \\quad \\neg \\operatorname {S a t} (\\neg F, \\widetilde {\\pi}) ,", + "image_path": "784fab6a74a130c6131dd8f85eae445ec182d7ab4a62fcc23bd371da93a48fba.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "spans": [ + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "inline_equation", + "content": "\\neg F" + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "content": " represents the negation of " + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "content": ". Observe that for any tautology " + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "inline_equation", + "content": "\\pi_F \\triangleq g(\\neg F)" + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "content": " is a valid " + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "inline_equation", + "content": "P_g" + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "content": "-proof of " + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 68, + 592, + 542, + 618 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "spans": [ + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1\\vdash \\mathsf{Sound}_{P_g}" + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "text", + "content": ", which follows from the provability of Equation (4) and the definition of " + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "inline_equation", + "content": "P_{g}" + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "text", + "content": ". Now consider the quantifier-free " + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 68, + 620, + 541, + 647 + ], + "type": "text", + "content": "-formula" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 160, + 655, + 449, + 671 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 655, + 449, + 671 + ], + "spans": [ + { + "bbox": [ + 160, + 655, + 449, + 671 + ], + "type": "interline_equation", + "content": "\\psi \\triangleq \\neg \\operatorname {F o r m u l a} (F, 1 ^ {n}) \\vee \\neg \\operatorname {P r o o f} _ {P _ {g}} (F, \\pi) \\vee | x | > n \\vee \\operatorname {S a t} (F, x).", + "image_path": "f2b95e442cd23b5fd71f2717372c082e92136b92b77792ee291f7ee40cf352c2.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "spans": [ + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "type": "text", + "content": "The provability of " + }, + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "type": "inline_equation", + "content": "\\forall 1^n\\forall F\\forall \\pi \\psi" + }, + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "type": "text", + "content": " follows from the provability of " + }, + { + "bbox": [ + 68, + 679, + 427, + 694 + ], + "type": "inline_equation", + "content": "\\mathsf{Sound}_{P_g}" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 312, + 721 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 312, + 721 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 312, + 721 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": "Using Cook's translation (Section 3.2), the sequence of propositional formulas " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "||\\psi||_m" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " admits " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": "-proofs of polynomial size. Moreover, given an actual " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": "-bit propositional formula " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " of polynomial size and the corresponding " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "P_g" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": "-proof " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "\\pi_F" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " (represented by fixed strings " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "\\langle F\\rangle" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "\\langle \\pi_F\\rangle" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": "), one can show that there are polynomial size " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " proofs of both " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "||\\mathrm{Formula}(\\langle F\\rangle,1^n)||_{\\mathrm{poly}(n)}" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "||\\mathrm{Proof}_{P_g}(\\langle F\\rangle,\\langle \\pi_F\\rangle)||_{\\mathrm{poly}(n)}" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": ". (Intuitively, this follows by an evaluation of the expressions on these fixed inputs.) Since " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " is closed under substitution, we can derive in " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": " with a polynomial size proof the formula " + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "inline_equation", + "content": "||\\mathrm{Sat}(\\langle F\\rangle,x)||_{\\mathrm{poly}(n)}" + }, + { + "bbox": [ + 67, + 72, + 542, + 155 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "spans": [ + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": "Finally, for every propositional formula " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "F(x)" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": "-bit inputs, it is possible to efficiently prove in " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": " the propositional formula " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "||\\mathrm{Sat}(\\langle F\\rangle ,x)||_{\\mathrm{poly}(n)}\\to F(x)" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": ". (This can be established by a slightly more general structural induction on formulas " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": " using information about " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "||\\cdot||" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "\\langle \\cdot \\rangle" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": ".) Overall, since " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": " is closed under implication, it follows from these derivations that there is a polynomial size " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": " proof of " + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 67, + 156, + 542, + 222 + ], + "type": "text", + "content": ". This completes the sketch of the proof of the result." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 230, + 542, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 230, + 542, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 230, + 542, + 270 + ], + "type": "text", + "content": "Open Problem 5.3 would also follow from a proof that Buss's hierarchy of theories " + }, + { + "bbox": [ + 67, + 230, + 542, + 270 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^i" + }, + { + "bbox": [ + 67, + 230, + 542, + 270 + ], + "type": "text", + "content": " does not collapse [KPT91], another central problem in bounded arithmetic. More precisely, it is enough to obtain the following separation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 279, + 378, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 279, + 378, + 295 + ], + "spans": [ + { + "bbox": [ + 67, + 279, + 378, + 295 + ], + "type": "text", + "content": "Open Problem 5.5. Show that for some " + }, + { + "bbox": [ + 67, + 279, + 378, + 295 + ], + "type": "inline_equation", + "content": "i > j \\geq 1" + }, + { + "bbox": [ + 67, + 279, + 378, + 295 + ], + "type": "text", + "content": " we have " + }, + { + "bbox": [ + 67, + 279, + 378, + 295 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^i \\neq \\mathsf{T}_2^j" + }, + { + "bbox": [ + 67, + 279, + 378, + 295 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "content": "It is known that " + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "content": " proves that " + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "content": " if and only if it proves that " + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "inline_equation", + "content": "\\mathsf{NP} = \\mathsf{coNP}" + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "content": ". Consequently, a super-polynomial lower bound on the length of " + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "content": " proofs also yields the consistency of " + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "inline_equation", + "content": "\\mathsf{NP} \\neq \\mathsf{coNP}" + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 302, + 541, + 342 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 343, + 541, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 343, + 541, + 426 + ], + "spans": [ + { + "bbox": [ + 67, + 343, + 541, + 426 + ], + "type": "text", + "content": "Finally, we remark that the use of witnessing theorems alone (as done in Section 5.1.1) is probably not sufficient to settle Open Problem 5.3. This is because these theorems typically also hold when we extend the theory with all true universal statements. Thus an unprovability argument that only employs the witnessing theorem would establish unconditionally that each sentence " + }, + { + "bbox": [ + 67, + 343, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\varphi_{\\mathsf{P} = \\mathsf{NP}}[g]" + }, + { + "bbox": [ + 67, + 343, + 541, + 426 + ], + "type": "text", + "content": " is false and therefore " + }, + { + "bbox": [ + 67, + 343, + 541, + 426 + ], + "type": "inline_equation", + "content": "\\mathsf{P}\\neq \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 343, + 541, + 426 + ], + "type": "text", + "content": ". Some researchers interpret this as evidence that the investigation of propositional proof complexity might be unavoidable. Another approach to Open Problem 5.3 is discussed in Section 5.3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 441, + 262, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 441, + 262, + 454 + ], + "spans": [ + { + "bbox": [ + 67, + 441, + 262, + 454 + ], + "type": "text", + "content": "5.2 Unprovability of Lower Bounds" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 462, + 276, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 462, + 276, + 475 + ], + "spans": [ + { + "bbox": [ + 67, + 462, + 276, + 475 + ], + "type": "text", + "content": "5.2.1 Average-Case Circuit Lower Bounds" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 483, + 541, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 483, + 541, + 523 + ], + "spans": [ + { + "bbox": [ + 67, + 483, + 541, + 523 + ], + "type": "text", + "content": "In this section, we discuss the unprovability of strong average-case lower bounds in " + }, + { + "bbox": [ + 67, + 483, + 541, + 523 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 483, + 541, + 523 + ], + "type": "text", + "content": ". We focus on an unprovability result from [PS21], stated and proved in a slightly stronger form in [LO23]. The proof is based on a technique introduced by [Kra11] and further explored in [Pic15a]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "spans": [ + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "content": "We consider an average-case separation of co-nondeterministic circuits against non-deterministic circuits of subexponential size. In more detail, we investigate the provability of a sentence " + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}^1 (s_1,s_2,m,n_0)" + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "content": " stating that, for every input length " + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "inline_equation", + "content": "n\\geq n_0" + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "content": ", there is a co-nondeterministic circuit " + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "inline_equation", + "content": "\\leq s_{1}(n)" + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "content": " such that, for every nondeterministic circuit " + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "inline_equation", + "content": "\\leq s_2(n)" + }, + { + "bbox": [ + 67, + 524, + 541, + 578 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 212, + 587, + 397, + 616 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 587, + 397, + 616 + ], + "spans": [ + { + "bbox": [ + 212, + 587, + 397, + 616 + ], + "type": "interline_equation", + "content": "\\operatorname * {P r} _ {x \\sim \\{0, 1 \\} ^ {n}} \\Big [ C (x) = D (x) \\Big ] \\leq 1 - \\frac {m (n)}{2 ^ {n}}.", + "image_path": "b25695a790d83f0d9d9673d679882ad1f227b367f941835bfd97862a8c0677dc.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "spans": [ + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{coNSIZE}[s(n)]" + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{NSIZE}[s(n)]" + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "text", + "content": " refer to co-nondeterministic circuits and nondeterministic circuits of size " + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "inline_equation", + "content": "s(n)" + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "text", + "content": ", respectively. More formally, " + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "inline_equation", + "content": "\\mathrm{LB}^1(s_1, s_2, m, n_0)" + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "text", + "content": " is an " + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{PV}}" + }, + { + "bbox": [ + 67, + 625, + 542, + 653 + ], + "type": "text", + "content": "-sentence capturing the following lower" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 78, + 660, + 386, + 673 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 660, + 386, + 673 + ], + "spans": [ + { + "bbox": [ + 78, + 660, + 386, + 673 + ], + "type": "text", + "content": "11Due to space constraints, we do not elaborate on the formalization of NP = coNP." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 300, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 300, + 712, + 311, + 720 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 73, + 147, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 73, + 147, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 73, + 147, + 83 + ], + "type": "text", + "content": "bound statement:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 125, + 96, + 441, + 111 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 96, + 441, + 111 + ], + "spans": [ + { + "bbox": [ + 125, + 96, + 441, + 111 + ], + "type": "interline_equation", + "content": "\\forall n \\in \\operatorname {L o g L o g} \\text {w i t h} n \\geq n _ {0} \\exists C \\in \\operatorname {c o N S I Z E} \\left[ s _ {1} (n) \\right] \\forall D \\in \\operatorname {N S I Z E} \\left[ s _ {2} (n) \\right]", + "image_path": "47aefef7c299135f8cd4a10560540de952da8874e8d904415659cff411b2bca0.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 125, + 114, + 482, + 129 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 114, + 482, + 129 + ], + "spans": [ + { + "bbox": [ + 125, + 114, + 482, + 129 + ], + "type": "interline_equation", + "content": "\\exists m = m (n) \\text {d i s t i n c t} x ^ {1}, \\dots , x ^ {m} \\text {s . t . E r r o r} (C, D, x ^ {i}) \\text {f o r a l l} i \\in [ m ],", + "image_path": "1e719365c9b8487be9ebe15f82e9d7c05674d60514e56e0d115615219d9d1d93.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "spans": [ + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "\\operatorname{Error}(C, D, x)" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": " means that the circuits " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": " disagree on the input " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": ". This statement can be seen as an average-case form of the coNP " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "\\nsubseteq \\mathsf{NP} / \\mathsf{poly}" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": " conjecture if we let " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "s_1(n) = n^{O(1)}" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "s_2(n) = n^{\\omega(1)}" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "inline_equation", + "content": "m(n) = 2^n / n" + }, + { + "bbox": [ + 68, + 139, + 542, + 193 + ], + "type": "text", + "content": ". (Note that we consider in this section a LogLog formalization, according to the notation explained in Section 4.1.)" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "spans": [ + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": "Theorem 5.6 ([PS21, LO23]). Let " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "d \\geq 1" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "\\delta > 0" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "n_0 \\geq 1" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": " be arbitrary parameters, and let " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "s_1(n) = n^d" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "s_2(n) = 2^{n^\\delta}" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "m(n) = 2^n / n" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": ". Then " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": " does not prove the sentence " + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}^1(s_1, s_2, m, n_0)" + }, + { + "bbox": [ + 68, + 201, + 541, + 232 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 239, + 473, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 239, + 473, + 252 + ], + "spans": [ + { + "bbox": [ + 85, + 239, + 473, + 252 + ], + "type": "text", + "content": "In the remainder of this section, we provide some intuition about the proof of this result." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 266, + 541, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 266, + 541, + 334 + ], + "spans": [ + { + "bbox": [ + 68, + 266, + 541, + 334 + ], + "type": "text", + "content": "Overview of the Argument. Suppose, towards a contradiction, that " + }, + { + "bbox": [ + 68, + 266, + 541, + 334 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1\\vdash \\mathsf{LB}^1 (s_1,s_2,m,n_0)" + }, + { + "bbox": [ + 68, + 266, + 541, + 334 + ], + "type": "text", + "content": " with parameters as above. The central idea of the argument is that establishing a strong complexity lower bound within bounded arithmetic leads to a corresponding complexity upper bound. These lower and upper bounds contradict each other. Consequently, this contradiction implies the unprovability of the lower bound statement. In a bit more detail, the argument proceeds as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 342, + 542, + 500 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "spans": [ + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "text", + "content": "(i) The provability of the average-case lower bound sentence " + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}^1 (s_1,s_2,m,n_0)" + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "text", + "content": " implies the provability of a worst-case lower bound for coNSIZE" + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "inline_equation", + "content": "[n^d]" + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "text", + "content": " against NSIZE" + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "inline_equation", + "content": "[2^{n^\\delta}]" + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "text", + "content": ". We formalize the latter by a sentence " + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}_{\\mathrm{wst}}^1 (s_1,s_2,n_0)" + }, + { + "bbox": [ + 80, + 342, + 541, + 386 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "spans": [ + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": "(ii) Given any proof of " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}_{\\mathsf{wst}}^{1}(s_{1}, s_{2}, n_{0})" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": ", we extract a complexity upper bound for an arbitrary co-nondeterministic circuit " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "E_{m}(x)" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": " over an input " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": " and of size at most " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "\\mathrm{poly}(m)" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": ". More precisely, we show that there is a deterministic circuit " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "B_{m}" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "\\leq 2^{m^{o(1)}}" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "inline_equation", + "content": "\\operatorname{Pr}_{x \\sim \\{0,1\\}^m}[E_m(x) = B_m(x)] \\geq 1/2 + 2^{-m^{o(1)}}" + }, + { + "bbox": [ + 77, + 393, + 542, + 452 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "spans": [ + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "content": "(iii) We invoke an existing hardness amplification result to conclude that, on any large enough input length " + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "content": ", every co-nondeterministic circuit " + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "inline_equation", + "content": "C_n" + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "inline_equation", + "content": "\\leq n^d" + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "content": " agrees with some nondeterministic circuit " + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "inline_equation", + "content": "D_n" + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "inline_equation", + "content": "\\leq 2^{n^\\delta}" + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "content": " on more than a " + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "inline_equation", + "content": "1 - 1/n" + }, + { + "bbox": [ + 74, + 459, + 542, + 500 + ], + "type": "text", + "content": " fraction of the inputs." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "text", + "content": "Since " + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "text", + "content": " is a sound theory, i.e., every theorem of " + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "text", + "content": " is a true sentence, Item (iii) is in contradiction with the complexity lower bound stated in " + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}^1(s_1, s_2, m, n_0)" + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "text", + "content": ". Consequently, " + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 508, + 541, + 536 + ], + "type": "text", + "content": " does not prove this sentence." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "spans": [ + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "text", + "content": "The most interesting step of the argument is the proof of Item (ii). The key point is that the proof of a lower bound in " + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "text", + "content": " must be somewhat constructive, in the sense that it not only shows that every small circuit " + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "text", + "content": " fails to solve the problem but also produces a string " + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "text", + "content": " witnessing this fact. Below we give a simple example of its usefulness, showing a setting where a constructive lower bound yields an upper bound. Note that the application of a witnessing theorem to a LogLog formalization provides algorithms running in time poly" + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "inline_equation", + "content": "(2^n)" + }, + { + "bbox": [ + 68, + 549, + 542, + 631 + ], + "type": "text", + "content": ". The example provided next shows that this is still useful." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "spans": [ + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": "Lemma 5.7 ([CLO24a]). Let " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "L \\in \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": ". Suppose that there is a uniform algorithm " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "R(1^n, D)" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": " such that, for every co-nondeterministic circuit " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": " input variables and of size at most " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "n^{\\log n}" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "R(1^n, D)" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": " runs in time " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "2^{O(n)}" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": " and outputs a string " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "w \\in \\{0, 1\\}^n" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "D(w) \\neq L(w)" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": ". Then, for every language " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "L' \\in \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": " and for every constant " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "\\varepsilon > 0" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": ", we have " + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "inline_equation", + "content": "L' \\in \\mathsf{DTIME}[2^{n^\\varepsilon}]" + }, + { + "bbox": [ + 67, + 639, + 542, + 694 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": "Proof. Suppose that " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "L \\in \\mathsf{NTIME}[n^d]" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "d \\in \\mathbb{N}" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " be a nondeterministic machine that decides " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " and runs in time at most " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "n^{c'}" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "c' \\in \\mathbb{N}" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "\\varepsilon > 0" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " be an arbitrary constant. Let " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "\\gamma = \\gamma(d, \\varepsilon) > 0" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " be a small enough constant to be defined later. Finally, let " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " be the algorithm provided by the hypothesis of the lemma. We show that the following deterministic algorithm " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "B^{\\gamma}(x)" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " decides " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": " in time " + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "inline_equation", + "content": "O(2^{n^{\\varepsilon}})" + }, + { + "bbox": [ + 68, + 72, + 543, + 128 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 86, + 141, + 247, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 141, + 247, + 155 + ], + "spans": [ + { + "bbox": [ + 86, + 141, + 247, + 155 + ], + "type": "text", + "content": "Input: " + }, + { + "bbox": [ + 86, + 141, + 247, + 155 + ], + "type": "inline_equation", + "content": "x \\in \\{0,1\\}^n" + }, + { + "bbox": [ + 86, + 141, + 247, + 155 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 86, + 141, + 247, + 155 + ], + "type": "inline_equation", + "content": "n \\geq 1" + }, + { + "bbox": [ + 86, + 141, + 247, + 155 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 156, + 511, + 291 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "spans": [ + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "text", + "content": "1 Compute the description of a co-nondeterministic circuit " + }, + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "inline_equation", + "content": "E'" + }, + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "text", + "content": " of size at most " + }, + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "inline_equation", + "content": "n^{2c'}" + }, + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "text", + "content": " that decides the complement of " + }, + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 77, + 156, + 507, + 180 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 182, + 471, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 182, + 471, + 196 + ], + "spans": [ + { + "bbox": [ + 86, + 182, + 471, + 196 + ], + "type": "text", + "content": "// In other words, " + }, + { + "bbox": [ + 86, + 182, + 471, + 196 + ], + "type": "inline_equation", + "content": "E'(u) = 1 - L'(u)" + }, + { + "bbox": [ + 86, + 182, + 471, + 196 + ], + "type": "text", + "content": " for every string " + }, + { + "bbox": [ + 86, + 182, + 471, + 196 + ], + "type": "inline_equation", + "content": "u \\in \\{0,1\\}^n" + }, + { + "bbox": [ + 86, + 182, + 471, + 196 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "spans": [ + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "text", + "content": "2 Produce the description of a co-nondeterministic circuit " + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "inline_equation", + "content": "D_{x}(y)" + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "inline_equation", + "content": "y \\in \\{0,1\\}^{n^{\\gamma}}" + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "inline_equation", + "content": "D_{x}(y)" + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "text", + "content": " ignores its input " + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "text", + "content": " and computes according to " + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "inline_equation", + "content": "E'(x)" + }, + { + "bbox": [ + 77, + 196, + 499, + 223 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "spans": [ + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": "// While the length of " + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": " is smaller than the length of " + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "inline_equation", + "content": "D_x" + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "inline_equation", + "content": "E'" + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": " share the same nondeterministic input string, and " + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "inline_equation", + "content": "E'" + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": " sets " + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": " to be the fixed string " + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 86, + 224, + 511, + 262 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 77, + 263, + 260, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 263, + 260, + 277 + ], + "spans": [ + { + "bbox": [ + 77, + 263, + 260, + 277 + ], + "type": "text", + "content": "3 Compute " + }, + { + "bbox": [ + 77, + 263, + 260, + 277 + ], + "type": "inline_equation", + "content": "w = R(1^{n^{\\gamma}}, D_x) \\in \\{0, 1\\}^{n^{\\gamma}}" + }, + { + "bbox": [ + 77, + 263, + 260, + 277 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 77, + 278, + 407, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 278, + 407, + 291 + ], + "spans": [ + { + "bbox": [ + 77, + 278, + 407, + 291 + ], + "type": "text", + "content": "4 Determine the bit " + }, + { + "bbox": [ + 77, + 278, + 407, + 291 + ], + "type": "inline_equation", + "content": "b = L(w)" + }, + { + "bbox": [ + 77, + 278, + 407, + 291 + ], + "type": "text", + "content": " by a brute force computation, then return " + }, + { + "bbox": [ + 77, + 278, + 407, + 291 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 77, + 278, + 407, + 291 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 159, + 295, + 416, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 295, + 416, + 308 + ], + "spans": [ + { + "bbox": [ + 159, + 295, + 416, + 308 + ], + "type": "text", + "content": "Algorithm 2: Algorithm " + }, + { + "bbox": [ + 159, + 295, + 416, + 308 + ], + "type": "inline_equation", + "content": "B^{\\gamma}(x)" + }, + { + "bbox": [ + 159, + 295, + 416, + 308 + ], + "type": "text", + "content": " for deciding language " + }, + { + "bbox": [ + 159, + 295, + 416, + 308 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 159, + 295, + 416, + 308 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "spans": [ + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": "First, we argue that " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "B^{\\gamma}" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": " decides " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "L'" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "D_x" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": " is a co-nondeterministic circuit over inputs of length " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "m \\triangleq n^{\\gamma}" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": " and has size at most " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "n^{2c'} = m^{2c'/\\gamma} \\leq m^{\\log m}" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": " (for a large enough " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "m" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": "), " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "R(1^{n^{\\gamma}}, D_x)" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": " outputs a string " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "w \\in \\{0, 1\\}^{n^{\\gamma}}" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "inline_equation", + "content": "L(w) = 1 - D_x(w)" + }, + { + "bbox": [ + 68, + 316, + 542, + 357 + ], + "type": "text", + "content": ". Consequently," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 153, + 366, + 455, + 380 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 366, + 455, + 380 + ], + "spans": [ + { + "bbox": [ + 153, + 366, + 455, + 380 + ], + "type": "interline_equation", + "content": "b = L (w) = 1 - D _ {x} (w) = 1 - E ^ {\\prime} (x) = 1 - \\left(1 - L ^ {\\prime} (x)\\right) = L ^ {\\prime} (x),", + "image_path": "be44fa32397a16fb9f227ae605d2a0d7b3a5acb9546bc2e5c59552538eea9337.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 392, + 238, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 392, + 238, + 403 + ], + "spans": [ + { + "bbox": [ + 68, + 392, + 238, + 403 + ], + "type": "text", + "content": "i.e., the output bit of " + }, + { + "bbox": [ + 68, + 392, + 238, + 403 + ], + "type": "inline_equation", + "content": "B^{\\gamma}(x)" + }, + { + "bbox": [ + 68, + 392, + 238, + 403 + ], + "type": "text", + "content": " is correct." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "spans": [ + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": "Next, we argue that " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "B^{\\gamma}" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " runs in time at most " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "O(2^{n^{\\varepsilon}})" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ". Clearly, Steps 1-2 run in " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "\\mathrm{poly}(n)" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " time. Moreover, Step 3 runs in time " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "2^{O(n^{\\gamma})}" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " under the assumption on the running time of " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "R(1^{n^{\\gamma}}, D_x)" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ". This is at most " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "2^{n^{\\varepsilon}}" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " if we set " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "\\gamma \\leq \\varepsilon / 2" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ". Finally, since " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "L \\in \\mathsf{NTIME}[n^d]" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ", the brute force computation in Step 4 can be performed in deterministic time " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "2^{O(\\ell^d)}" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " over an input of length " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "\\ell = n^{\\gamma} = |w|" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " in our case, if " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "\\gamma \\leq \\varepsilon / 2d" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " we get that Step 4 runs in time at most " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "2^{n^{\\varepsilon}}" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ". Overall, if we set " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "\\gamma \\triangleq \\varepsilon / 2d" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ", it follows that " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "B^{\\gamma}" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": " runs in time at most " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "O(2^{n^{\\varepsilon}})" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": ". This completes the proof that " + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "inline_equation", + "content": "L' \\in \\mathsf{DTIME}[2^{n^{\\varepsilon}}]" + }, + { + "bbox": [ + 68, + 405, + 543, + 487 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "spans": [ + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "text", + "content": "The proof of Item (ii) is significantly more sophisticated, since one does not get an algorithm " + }, + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "text", + "content": " as above from a " + }, + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "text", + "content": " proof of the lower bound sentence " + }, + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}^1(s_1, s_2, m, n_0)" + }, + { + "bbox": [ + 68, + 497, + 543, + 552 + ], + "type": "text", + "content": ". The argument combines a witnessing theorem for sentences with more than four quantifier alternations and an ingenious technique from [Kra11] that relies on ideas from the theory of computational pseudorandomness." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 559, + 517, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 559, + 517, + 573 + ], + "spans": [ + { + "bbox": [ + 68, + 559, + 517, + 573 + ], + "type": "text", + "content": "Open Problem 5.8. Strengthen the unprovability result from Theorem 5.6 in the following directions:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 77, + 582, + 518, + 662 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 77, + 582, + 518, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 582, + 518, + 596 + ], + "spans": [ + { + "bbox": [ + 77, + 582, + 518, + 596 + ], + "type": "text", + "content": "(a) show that it holds in the polynomial size regime, i.e., with " + }, + { + "bbox": [ + 77, + 582, + 518, + 596 + ], + "type": "inline_equation", + "content": "s_1(n) = n^a" + }, + { + "bbox": [ + 77, + 582, + 518, + 596 + ], + "type": "text", + "content": " and for some " + }, + { + "bbox": [ + 77, + 582, + 518, + 596 + ], + "type": "inline_equation", + "content": "s_2(n) = n^b" + }, + { + "bbox": [ + 77, + 582, + 518, + 596 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 77, + 605, + 487, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 605, + 487, + 618 + ], + "spans": [ + { + "bbox": [ + 77, + 605, + 487, + 618 + ], + "type": "text", + "content": "(b) establish the unprovability of worst-case lower bounds against nondeterministic circuits;" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 78, + 627, + 465, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 627, + 465, + 640 + ], + "spans": [ + { + "bbox": [ + 78, + 627, + 465, + 640 + ], + "type": "text", + "content": "(c) show the unprovability of average-case lower bounds against deterministic circuits;" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 77, + 650, + 353, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 650, + 353, + 662 + ], + "spans": [ + { + "bbox": [ + 77, + 650, + 353, + 662 + ], + "type": "text", + "content": "(d) establish the same result with respect to a stronger theory." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 672, + 439, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 672, + 439, + 685 + ], + "spans": [ + { + "bbox": [ + 68, + 672, + 439, + 685 + ], + "type": "text", + "content": "We refer to [LO23, CLO24a, CLO24b] for some related results and partial progress." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 250, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 250, + 85 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 250, + 85 + ], + "type": "text", + "content": "5.2.2 Extended Frege Lower Bounds" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 94, + 541, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 94, + 541, + 133 + ], + "spans": [ + { + "bbox": [ + 67, + 94, + 541, + 133 + ], + "type": "text", + "content": "This section covers a result on the unprovability of super-polynomial size extended Frege " + }, + { + "bbox": [ + 67, + 94, + 541, + 133 + ], + "type": "inline_equation", + "content": "(e\\mathcal{F})" + }, + { + "bbox": [ + 67, + 94, + 541, + 133 + ], + "type": "text", + "content": " lower bounds in " + }, + { + "bbox": [ + 67, + 94, + 541, + 133 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 94, + 541, + 133 + ], + "type": "text", + "content": " [KP89] (see also [CU93, Bus90]). We refer to Section 3.2 for the necessary background. We will also need the definitions and results from Section 3.3." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "spans": [ + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "text", + "content": "We adapt the presentation from [Kra19]. Consider the theory " + }, + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "text", + "content": " and its language " + }, + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "text", + "content": ". We shall use the following " + }, + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 134, + 541, + 161 + ], + "type": "text", + "content": " formulas:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 86, + 169, + 538, + 239 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "spans": [ + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "inline_equation", + "content": "\\operatorname{Sat}(x, y)" + }, + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "text", + "content": ": a quantifier-free formula formalizing that " + }, + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "text", + "content": " is a satisfying assignment of the Boolean formula " + }, + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 86, + 169, + 538, + 194 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 204, + 228, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 204, + 228, + 217 + ], + "spans": [ + { + "bbox": [ + 86, + 204, + 228, + 217 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 86, + 204, + 228, + 217 + ], + "type": "inline_equation", + "content": "\\operatorname{Taut}(x) \\triangleq \\forall y \\leq x \\operatorname{Sat}(x, y)" + }, + { + "bbox": [ + 86, + 204, + 228, + 217 + ], + "type": "text", + "content": ";" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "spans": [ + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "text", + "content": "Proof " + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "inline_equation", + "content": "P(x,z)" + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "text", + "content": " : a quantifier-free formula formalizing that " + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "text", + "content": " -proof of " + }, + { + "bbox": [ + 86, + 227, + 430, + 239 + ], + "type": "inline_equation", + "content": "x" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 248, + 343, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 248, + 343, + 262 + ], + "spans": [ + { + "bbox": [ + 85, + 248, + 343, + 262 + ], + "type": "text", + "content": "The following lemma is central to the unprovability result." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 270, + 541, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 270, + 541, + 297 + ], + "spans": [ + { + "bbox": [ + 67, + 270, + 541, + 297 + ], + "type": "text", + "content": "Lemma 5.9. Let " + }, + { + "bbox": [ + 67, + 270, + 541, + 297 + ], + "type": "inline_equation", + "content": "M \\models \\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 270, + 541, + 297 + ], + "type": "text", + "content": ", and assume that " + }, + { + "bbox": [ + 67, + 270, + 541, + 297 + ], + "type": "inline_equation", + "content": "\\phi \\in M" + }, + { + "bbox": [ + 67, + 270, + 541, + 297 + ], + "type": "text", + "content": " is a propositional formula. The following statements are equivalent:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 80, + 305, + 241, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 305, + 241, + 318 + ], + "spans": [ + { + "bbox": [ + 80, + 305, + 241, + 318 + ], + "type": "text", + "content": "(i) There is no eF-proof of " + }, + { + "bbox": [ + 80, + 305, + 241, + 318 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 80, + 305, + 241, + 318 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 80, + 305, + 241, + 318 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 80, + 305, + 241, + 318 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 258, + 328, + 377, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 328, + 377, + 342 + ], + "spans": [ + { + "bbox": [ + 258, + 328, + 377, + 342 + ], + "type": "interline_equation", + "content": "M \\models \\forall z \\neg \\operatorname {P r o o f} _ {e \\mathcal {F}} (\\phi , z).", + "image_path": "1722095f7b04eddf62a8433353d1f5dce7d563215069760618d773acd4b5bfae.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "spans": [ + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "text", + "content": "(ii) There is an extension " + }, + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "inline_equation", + "content": "M^{\\prime}\\supseteq M" + }, + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "text", + "content": " (also a model of " + }, + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "text", + "content": ") in which " + }, + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 77, + 355, + 434, + 369 + ], + "type": "text", + "content": " is falsified:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 268, + 378, + 368, + 393 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 378, + 368, + 393 + ], + "spans": [ + { + "bbox": [ + 268, + 378, + 368, + 393 + ], + "type": "interline_equation", + "content": "M ^ {\\prime} \\vDash \\exists y \\operatorname {S a t} (\\neg \\phi , y).", + "image_path": "38010ea5f5cc138fddb5b73510aa383621fffbca94c78430904f64e876ec2f19.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "spans": [ + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "text", + "content": "The proof of Lemma 5.9 proceeds by compactness and uses that the correctness of the propositional translation from " + }, + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "text", + "content": " (Section 3.2) is also provable in " + }, + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 407, + 541, + 434 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "spans": [ + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "content": "Lemma 5.10. Let " + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "content": " be a nonstandard countable model of " + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "content": ". Then it has a cofinal extension " + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "inline_equation", + "content": "M' \\supseteq_{\\mathrm{cf}} M" + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "content": " (also a model of " + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "content": ") such that every tautology in " + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "content": " has an eF-proof in " + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "inline_equation", + "content": "M'" + }, + { + "bbox": [ + 67, + 441, + 541, + 469 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "spans": [ + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "content": "The proof of Lemma 5.10 iterates Lemma 5.9 while taking cuts to ensure that the limit extension " + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "inline_equation", + "content": "M' = \\bigcup_{i} M_i" + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "content": " (where " + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "inline_equation", + "content": "M_0 = M" + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "content": ") is cofinal in " + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "content": ". Since each " + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "inline_equation", + "content": "M_i \\models \\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "content": " is universal, we also have " + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "inline_equation", + "content": "M' \\models \\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 476, + 541, + 516 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 86, + 517, + 353, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 517, + 353, + 530 + ], + "spans": [ + { + "bbox": [ + 86, + 517, + 353, + 530 + ], + "type": "text", + "content": "We will need the following analogue of Lemma 3.6 for " + }, + { + "bbox": [ + 86, + 517, + 353, + 530 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 86, + 517, + 353, + 530 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "spans": [ + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": "Fact 5.11. Let " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "M_0" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": " be a nonstandard countable model of " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": ". Then there is a (countable) cut " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "M_0" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": " that is a (nonstandard) model of " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": " and a length " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "n \\in M" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "n = |a|" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": " for some nonstandard " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "a \\in M" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": ", such that for every " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "b \\in M" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": " we have " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "M \\models |b| \\leq n^k" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": " for some standard number " + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 539, + 541, + 579 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 588, + 541, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 588, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 67, + 588, + 541, + 613 + ], + "type": "text", + "content": "The next result is a consequence of the existence of nonstandard countable models, Fact 5.11, and Lemma 5.10." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 622, + 436, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 622, + 436, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 622, + 436, + 635 + ], + "type": "text", + "content": "Lemma 5.12. There is a model " + }, + { + "bbox": [ + 67, + 622, + 436, + 635 + ], + "type": "inline_equation", + "content": "M^{*}" + }, + { + "bbox": [ + 67, + 622, + 436, + 635 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 67, + 622, + 436, + 635 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 622, + 436, + 635 + ], + "type": "text", + "content": " such that the following properties hold:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 77, + 644, + 539, + 693 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 80, + 644, + 298, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 644, + 298, + 657 + ], + "spans": [ + { + "bbox": [ + 80, + 644, + 298, + 657 + ], + "type": "text", + "content": "(i) Any tautology in " + }, + { + "bbox": [ + 80, + 644, + 298, + 657 + ], + "type": "inline_equation", + "content": "M^{*}" + }, + { + "bbox": [ + 80, + 644, + 298, + 657 + ], + "type": "text", + "content": " has an eF-proof in " + }, + { + "bbox": [ + 80, + 644, + 298, + 657 + ], + "type": "inline_equation", + "content": "M^{*}" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "spans": [ + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "text", + "content": "(ii) There is a nonstandard element " + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "a \\in M^*" + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "text", + "content": " of length " + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "n \\triangleq |a|" + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "text", + "content": " such that for any element " + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "b \\in M^*" + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "text", + "content": " there is a standard number " + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "inline_equation", + "content": "M^* \\models |b| \\leq n^k" + }, + { + "bbox": [ + 77, + 666, + 539, + 693 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 543, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 543, + 99 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 543, + 99 + ], + "type": "text", + "content": "Theorem 5.13 (Unprovability of super-polynomial size " + }, + { + "bbox": [ + 68, + 72, + 543, + 99 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 68, + 72, + 543, + 99 + ], + "type": "text", + "content": " lower bounds in " + }, + { + "bbox": [ + 68, + 72, + 543, + 99 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 72, + 543, + 99 + ], + "type": "text", + "content": " [KP89]). Consider the sentence" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 140, + 99, + 472, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 99, + 472, + 114 + ], + "spans": [ + { + "bbox": [ + 140, + 99, + 472, + 114 + ], + "type": "interline_equation", + "content": "\\Psi_ {e \\mathcal {F}} \\triangleq \\forall x \\exists \\phi \\geq x [ \\operatorname {T a u t} (\\phi) \\wedge \\forall \\pi (| \\pi | \\leq | \\phi | \\# | \\phi | \\rightarrow \\neg \\operatorname {P r o o f} _ {e \\mathcal {F}} (\\phi , \\pi)) ]. ^ {1 2}", + "image_path": "0b7cc6ae545f43e15552a3e812577062636b59aaf9178ecc7da4e43b7e5c6e22.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 120, + 255, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 120, + 255, + 133 + ], + "spans": [ + { + "bbox": [ + 69, + 120, + 255, + 133 + ], + "type": "text", + "content": "The sentence " + }, + { + "bbox": [ + 69, + 120, + 255, + 133 + ], + "type": "inline_equation", + "content": "\\Psi_{e\\mathcal{F}}" + }, + { + "bbox": [ + 69, + 120, + 255, + 133 + ], + "type": "text", + "content": " is not provable in " + }, + { + "bbox": [ + 69, + 120, + 255, + 133 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 69, + 120, + 255, + 133 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "spans": [ + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": "Proof. Suppose " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1 \\vdash \\Psi_{e\\mathcal{F}}" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "M^*" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "n \\triangleq |a|" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " be as in Lemma 5.12. Since " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\Psi_{e\\mathcal{F}}" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " holds in " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "M^*" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": ", there is a tautology " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\phi \\in M^*" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\phi \\geq a" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " and consequently " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "|\\phi| \\geq n" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " does not have an " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": "-proof of size " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "|\\phi|\\# |\\phi|" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "M^*" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": ". On the other hand, by the two properties of " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "M^*" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " given by Lemma 5.12, the formula " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " has an " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": "-proof of size at most " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " for some standard number " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": ". Finally, since the element " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " is nonstandard, we have " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "n^k \\leq n\\# n \\leq |\\phi|\\# |\\phi|" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "M^\\star" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": ". This contradiction implies that " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": " does not prove " + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "inline_equation", + "content": "\\Psi_{e\\mathcal{F}}" + }, + { + "bbox": [ + 68, + 141, + 542, + 211 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 218, + 542, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 218, + 542, + 246 + ], + "spans": [ + { + "bbox": [ + 67, + 218, + 542, + 246 + ], + "type": "text", + "content": "Open Problem 5.14. Show that " + }, + { + "bbox": [ + 67, + 218, + 542, + 246 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 218, + 542, + 246 + ], + "type": "text", + "content": " cannot prove fixed-polynomial size lower bounds on the length of " + }, + { + "bbox": [ + 67, + 218, + 542, + 246 + ], + "type": "inline_equation", + "content": "e\\mathcal{F}" + }, + { + "bbox": [ + 67, + 218, + 542, + 246 + ], + "type": "text", + "content": " proofs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 254, + 436, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 254, + 436, + 269 + ], + "spans": [ + { + "bbox": [ + 68, + 254, + 436, + 269 + ], + "type": "text", + "content": "Open Problem 5.15. Establish the unprovability of the sentence " + }, + { + "bbox": [ + 68, + 254, + 436, + 269 + ], + "type": "inline_equation", + "content": "\\Psi_{e\\mathcal{F}}" + }, + { + "bbox": [ + 68, + 254, + 436, + 269 + ], + "type": "text", + "content": " in theory " + }, + { + "bbox": [ + 68, + 254, + 436, + 269 + ], + "type": "inline_equation", + "content": "\\mathsf{S}_2^1" + }, + { + "bbox": [ + 68, + 254, + 436, + 269 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 283, + 381, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 283, + 381, + 298 + ], + "spans": [ + { + "bbox": [ + 68, + 283, + 381, + 298 + ], + "type": "text", + "content": "5.3 Connection Between Upper Bounds and Lower Bounds" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "spans": [ + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "text", + "content": "In this section, we explain a result from [BKO20] showing that the unprovability of " + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "text", + "content": " (Open Problem 5.3) is related to the unprovability of circuit lower bounds. For a " + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "text", + "content": " function symbol " + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "text", + "content": " and a circuit size parameter " + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "inline_equation", + "content": "k\\in \\mathbb{N}" + }, + { + "bbox": [ + 68, + 305, + 542, + 346 + ], + "type": "text", + "content": ", consider the sentence" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 246, + 354, + 365, + 371 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 354, + 365, + 371 + ], + "spans": [ + { + "bbox": [ + 246, + 354, + 365, + 371 + ], + "type": "interline_equation", + "content": "\\mathsf {L B} _ {k} ^ {a. e.} (h) \\triangleq \\neg \\mathsf {U B} _ {k} ^ {i. o.} [ h ],", + "image_path": "6b4e6f119043345bb1f74729ca0f428dc004dced8f494f5cbf1d55ad04908e8b.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "spans": [ + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "inline_equation", + "content": "\\mathsf{UB}_k^{i.o.}[h]" + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "content": " is the sentence defined in Section 5.1.1. The sentence " + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}_k^{a.e.}(h)" + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "content": " states that the language defined by " + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "content": " is hard on input length " + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "content": " for circuits of size " + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "content": " whenever " + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 68, + 380, + 541, + 407 + ], + "type": "text", + "content": " is sufficiently large." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "spans": [ + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": "Theorem 5.16 (Unprovability of " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " from the unprovability of lower bounds in " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " [BKO20]). If there exists " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "k\\in \\mathbb{N}" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " such that for no function symbol " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " theory " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " proves the sentence " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\mathsf{LB}_k^{a.e.}(h)" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": ", then for no function symbol " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " theory " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": " proves the sentence " + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "inline_equation", + "content": "\\varphi_{\\mathsf{P} = \\mathsf{NP}}(f)" + }, + { + "bbox": [ + 68, + 416, + 542, + 458 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "spans": [ + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": "Theorem 5.16 shows that if " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": " does not prove " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": "-size lower bounds for a language in " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "\\mathsf{P}" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\neq \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": " is consistent with " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": ". Note that the hypothesis of Theorem 5.16 is weaker than the assumption that " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": " does not prove that " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "\\mathsf{NP} \\not\\subsetneq \\mathsf{SIZE}[n^k]" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 465, + 541, + 507 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "spans": [ + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": "Sketch of the proof of Theorem 5.16. We proceed in the contrapositive. We formalize in " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": " the result that if " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": ", then for any parameter " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\not\\subsetneq" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": " i.o. " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{SIZE}[n^k]" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": " (see, e.g., [Lip94, Theorem 3]). This result combines the collapse of " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{PH}" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{P}" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": " with Kannan's argument [Kan82] that " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{PH}" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": " can define languages that are almost-everywhere hard against circuits of fixed-polynomial size. Typically, proving this claim requires showing the existence of a truth table of size " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "2^n" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": " that is hard against circuits of size " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": ". However, this result might not be provable in " + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 514, + 542, + 595 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "spans": [ + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": "We address this issue as follows. From the provability in " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " that " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": ", it follows that for each " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "i\\geq 1" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " theory " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^i" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " collapses to " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " [KPT91]. Recall that the dual weak pigeonhole principle (dWPHP) for " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": "-functions is provable in " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^2" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": ". Define a " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " function symbol " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " that takes as input a circuit " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " of size " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " and outputs the lexicographic first " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "n^{k + 1}" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": " bits of the truth table computed by " + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": ". From dWPHP" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "inline_equation", + "content": "(g)" + }, + { + "bbox": [ + 68, + 596, + 542, + 651 + ], + "type": "text", + "content": ", we now" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "spans": [ + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "text", + "content": "12 Recall from Section 2.1 that " + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "inline_equation", + "content": "x \\# y \\triangleq 2^{|x| \\cdot |y|}" + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "text", + "content": ". Consequently, if we let " + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "inline_equation", + "content": "n = |\\phi|" + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "text", + "content": ", then the bound " + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "inline_equation", + "content": "|\\pi| \\leq |\\phi| \\# |\\phi|" + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "text", + "content": " translates to " + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "inline_equation", + "content": "|\\pi| \\leq n \\# n" + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "inline_equation", + "content": "n \\# n = 2^{|n| \\cdot |n|}" + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "text", + "content": " is of order " + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "inline_equation", + "content": "n^{\\log n}" + }, + { + "bbox": [ + 68, + 658, + 542, + 693 + ], + "type": "text", + "content": ". The proof of Theorem 5.13 works with any reasonable formalization that refers to a super-polynomial size bound." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 312, + 720 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": "derive in " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": " that the prefix of some truth table is not computable by circuits of size " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": ", if " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": " is sufficiently large. We can implicitly extend this truth table prefix with zeroes and use the resulting truth table to define a " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": "-formula " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\varphi(x)" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": " with a constant number of bounded quantifiers that defines a language " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": " that is hard against circuits of size " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "n^k" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": ", where the hardness is provable in " + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 72, + 541, + 125 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "spans": [ + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": "Given that the provability in " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": " that " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": " implies the provability in " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": " that " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathsf{PH}" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": " collapses to " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathsf{P}" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": ", it follows that " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\varphi(x)" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": " is equivalent in " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": " to the language defined by some " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathsf{PV}}" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": "-function " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": ". In other words, " + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1 \\vdash \\mathsf{LB}_k^{a.e.}(h)" + }, + { + "bbox": [ + 67, + 126, + 541, + 167 + ], + "type": "text", + "content": ", which completes the proof of Theorem 5.16." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 176, + 541, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 176, + 541, + 216 + ], + "spans": [ + { + "bbox": [ + 67, + 176, + 541, + 216 + ], + "type": "text", + "content": "[CLO24b] shows an example of a simple lower bound that is not provable in " + }, + { + "bbox": [ + 67, + 176, + 541, + 216 + ], + "type": "inline_equation", + "content": "\\mathrm{PV}_1" + }, + { + "bbox": [ + 67, + 176, + 541, + 216 + ], + "type": "text", + "content": ", under a plausible cryptographic assumption. This indicates that Theorem 5.16 might offer a viable approach towards a solution to Open Problem 5.3." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 236, + 294, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 236, + 294, + 253 + ], + "spans": [ + { + "bbox": [ + 67, + 236, + 294, + 253 + ], + "type": "text", + "content": "6 Additional Recent Developments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "spans": [ + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "text", + "content": "The provability of the dual Weak Pigeonhole Principle (dWPHP) for polynomial-time functions is closely related to the provability of exponential circuit lower bounds for a language in deterministic exponential time [Jef07]. [Kra21] showed that dWPHP cannot be proved in " + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "text", + "content": " under the assumption that " + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "inline_equation", + "content": "\\mathsf{P} \\subseteq \\mathsf{SIZE}[n^k]" + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "text", + "content": " for some constant " + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "text", + "content": ". [ILW23] established the same unprovability result assuming subexponentially secure indistinguishability obfuscation and coNP " + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "inline_equation", + "content": "\\not\\subset" + }, + { + "bbox": [ + 67, + 263, + 541, + 330 + ], + "type": "text", + "content": " i.o.AM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 331, + 541, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 331, + 541, + 385 + ], + "spans": [ + { + "bbox": [ + 67, + 331, + 541, + 385 + ], + "type": "text", + "content": "[ABM23] established the unprovability of NEXP " + }, + { + "bbox": [ + 67, + 331, + 541, + 385 + ], + "type": "inline_equation", + "content": "\\subseteq" + }, + { + "bbox": [ + 67, + 331, + 541, + 385 + ], + "type": "text", + "content": " SIZE[poly] in the theory of bounded arithmetic " + }, + { + "bbox": [ + 67, + 331, + 541, + 385 + ], + "type": "inline_equation", + "content": "V_2^0" + }, + { + "bbox": [ + 67, + 331, + 541, + 385 + ], + "type": "text", + "content": " (not covered in this survey). Interestingly, their approach does not employ a witnessing theorem. It proceeds instead by simulating a comprehension axiom scheme assuming the provability of the upper bound sentence, eventually relying on an existing lower bound on the provability of the pigeonhole principle." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 385, + 541, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 385, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 67, + 385, + 541, + 466 + ], + "type": "text", + "content": "[CLO24b] systematically investigates the reverse mathematics of complexity lower bounds. They demonstrated that various lower bound statements in communication complexity, error-correcting codes, and for Turing machines are equivalent to well-studied combinatorial principles, such as the weak pigeon-hole principle for polynomial-time functions and its variants. Consequently, complexity lower bounds can be regarded as fundamental axioms with significant implications. They use these equivalences to derive conditional results on the unprovability of simple lower bounds in " + }, + { + "bbox": [ + 67, + 385, + 541, + 466 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1" + }, + { + "bbox": [ + 67, + 385, + 541, + 466 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "spans": [ + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\left[\\mathrm{CKK}^{+}24\\right]" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": " investigates the provability of the circuit size hierarchy in bounded arithmetic, captured by a sentence CSH stating that for each " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "n \\geq n_0" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": ", there is a circuit of size " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "n^a" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": " that does not admit an equivalent circuit of size " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "n^b" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "a > b > 1" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "n_0" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": " are fixed. They showed that CSH is provable in " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^2" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": ", while its provability in " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{T}_2^1" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": " implies that " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\mathsf{P}^{\\mathsf{NP}} \\not\\subsetneq \\mathsf{SIZE}[n^{1 + \\varepsilon}]" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "inline_equation", + "content": "\\varepsilon > 0" + }, + { + "bbox": [ + 67, + 466, + 541, + 533 + ], + "type": "text", + "content": ". Thus a better proof complexity upper bound for the circuit size hierarchy yields new circuit lower bounds." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 534, + 541, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 534, + 541, + 561 + ], + "spans": [ + { + "bbox": [ + 67, + 534, + 541, + 561 + ], + "type": "text", + "content": "[CRT24] establishes the unprovability of NP " + }, + { + "bbox": [ + 67, + 534, + 541, + 561 + ], + "type": "inline_equation", + "content": "\\neq" + }, + { + "bbox": [ + 67, + 534, + 541, + 561 + ], + "type": "text", + "content": " PSPACE in " + }, + { + "bbox": [ + 67, + 534, + 541, + 561 + ], + "type": "inline_equation", + "content": "\\mathsf{APC}_1" + }, + { + "bbox": [ + 67, + 534, + 541, + 561 + ], + "type": "text", + "content": " (with a LogLog formalization) under a strong average-case hardness assumption." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 562, + 541, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 562, + 541, + 615 + ], + "spans": [ + { + "bbox": [ + 67, + 562, + 541, + 615 + ], + "type": "text", + "content": "[Kra24] offers a comprehensive reference on proof complexity generators, whose investigation is closely related to dWPHP and its provability in bounded arithmetic. The theory of proof complexity generators offers tautologies that serve as potential candidates for demonstrating super-polynomial extended Frege lower bounds and consequently the unprovability of " + }, + { + "bbox": [ + 67, + 562, + 541, + 615 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = \\mathsf{NP}" + }, + { + "bbox": [ + 67, + 562, + 541, + 615 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 67, + 562, + 541, + 615 + ], + "type": "inline_equation", + "content": "\\mathsf{PV}_1" + }, + { + "bbox": [ + 67, + 562, + 541, + 615 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 616, + 541, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 616, + 541, + 684 + ], + "spans": [ + { + "bbox": [ + 67, + 616, + 541, + 684 + ], + "type": "text", + "content": "We have not covered a number of results connected to the meta-mathematics of complexity lower bounds developed in the context of propositional proof complexity (see, e.g., [Raz15, Kra19, AR23, Kra24] and references therein). It is worth noting that results on the non-automatability of weak proof systems such as [AM20, dRGN" + }, + { + "bbox": [ + 67, + 616, + 541, + 684 + ], + "type": "inline_equation", + "content": "^{+}" + }, + { + "bbox": [ + 67, + 616, + 541, + 684 + ], + "type": "text", + "content": "21] were made possible thanks to the investigation of the meta-mathematics of proof complexity." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 543, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 543, + 114 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 543, + 114 + ], + "type": "text", + "content": "Finally, several other recent papers have investigated directions connected to bounded arithmetic and the meta-mathematics of complexity theory, e.g., [PS22, Kha22, PS23, AKPS24, LLR24]. Due to space constraints, we are not able to cover all recent developments in this survey." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 124, + 541, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 124, + 541, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 541, + 185 + ], + "type": "text", + "content": "Acknowledgements. I would like to thank Noel Arteche, Jinqiao Hu, Jan Krajicek, Moritz Müller, Mykyta Narusevych, Ján Pich, and Dimitrios Tsintsilidas for their valuable comments and feedback on an earlier version of this survey. This work received support from the Royal Society University Research Fellowship URF\\R1\\191059; the UKRI Frontier Research Guarantee EP/Y007999/1; and the Centre for Discrete Mathematics and its Applications (DIMAP) at the University of Warwick." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 202, + 138, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 202, + 138, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 202, + 138, + 217 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 74, + 227, + 541, + 680 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 88, + 227, + 541, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 227, + 541, + 251 + ], + "spans": [ + { + "bbox": [ + 88, + 227, + 541, + 251 + ], + "type": "text", + "content": "[AB09] Sanjeev Arora and Boaz Barak. Computational Complexity - A Modern Approach. Cambridge University Press, 2009." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 259, + 541, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 259, + 541, + 285 + ], + "spans": [ + { + "bbox": [ + 79, + 259, + 541, + 285 + ], + "type": "text", + "content": "[ABM23] Albert Atserias, Samuel R. Buss, and Moritz Müller. On the consistency of circuit lower bounds for non-deterministic time. In Symposium on Theory of Computing (STOC), pages 1257-1270, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 77, + 293, + 540, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 293, + 540, + 329 + ], + "spans": [ + { + "bbox": [ + 77, + 293, + 540, + 329 + ], + "type": "text", + "content": "[AKPS24] Noel Arteche, Erfan Khaniki, Jan Pich, and Rahul Santhanam. From proof complexity to circuit complexity via interactive protocols. In International Colloquium on Automata, Languages, and Programming (ICALP), 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 86, + 337, + 541, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 337, + 541, + 352 + ], + "spans": [ + { + "bbox": [ + 86, + 337, + 541, + 352 + ], + "type": "text", + "content": "[AM20] Albert Atserias and Moritz Müller. Automating resolution is NP-hard. J. ACM, 67(5):31:1-31:17, 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 88, + 359, + 541, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 359, + 541, + 384 + ], + "spans": [ + { + "bbox": [ + 88, + 359, + 541, + 384 + ], + "type": "text", + "content": "[AR23] Per Austrin and Kilian Risse. Sum-of-squares lower bounds for the minimum circuit size problem. In Computational Complexity Conference (CCC), pages 31:1-31:21, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 86, + 392, + 541, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 392, + 541, + 417 + ], + "spans": [ + { + "bbox": [ + 86, + 392, + 541, + 417 + ], + "type": "text", + "content": "[AW09] Scott Aaronson and Avi Wigderson. Algebraization: A new barrier in complexity theory. Transactions on Computation Theory (TOCT), 1(1), 2009." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 86, + 425, + 541, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 425, + 541, + 450 + ], + "spans": [ + { + "bbox": [ + 86, + 425, + 541, + 450 + ], + "type": "text", + "content": "[Bey09] Olaf Beyersdorff. On the correspondence between arithmetic theories and propositional proof systems – a survey. Mathematical Logic Quarterly, 55(2):116–137, 2009." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 457, + 541, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 457, + 541, + 482 + ], + "spans": [ + { + "bbox": [ + 83, + 457, + 541, + 482 + ], + "type": "text", + "content": "[BGS75] Theodore P. Baker, John Gill, and Robert Solovay. Relativizatons of the " + }, + { + "bbox": [ + 83, + 457, + 541, + 482 + ], + "type": "inline_equation", + "content": "\\mathsf{P} = ?" + }, + { + "bbox": [ + 83, + 457, + 541, + 482 + ], + "type": "text", + "content": " NP Question. SIAM J. Comput., 4(4):431-442, 1975." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 74, + 490, + 541, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 490, + 541, + 515 + ], + "spans": [ + { + "bbox": [ + 74, + 490, + 541, + 515 + ], + "type": "text", + "content": "[BKKK20] Sam R. Buss, Valentine Kabanets, Antonina Kolokolova, and Michal Koucký. Expander construction in VNC1. Annals of Pure and Applied Logic, 171(7):102796, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 82, + 523, + 541, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 523, + 541, + 548 + ], + "spans": [ + { + "bbox": [ + 82, + 523, + 541, + 548 + ], + "type": "text", + "content": "[BKO20] Jan Bydzovsky, Jan Krajíček, and Igor C. Oliveira. Consistency of circuit lower bounds with bounded theories. *Logical Methods in Computer Science*, 16(2), 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 82, + 555, + 541, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 555, + 541, + 581 + ], + "spans": [ + { + "bbox": [ + 82, + 555, + 541, + 581 + ], + "type": "text", + "content": "[BKT14] Samuel R. Buss, Leszek A. Kołodziejczyk, and Neil Thapen. Fragments of approximate counting. Journal of Symbolic Logic, 79(2):496-525, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 86, + 589, + 541, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 589, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 86, + 589, + 541, + 613 + ], + "type": "text", + "content": "[BM20] Jan Bydzovsky and Moritz Müller. Polynomial time ultrapowers and the consistency of circuit lower bounds. Arch. Math. Log., 59(1-2):127-147, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 87, + 621, + 355, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 621, + 355, + 635 + ], + "spans": [ + { + "bbox": [ + 87, + 621, + 355, + 635 + ], + "type": "text", + "content": "[Bus86] Samuel R. Buss. Bounded Arithmetic. Bibliopolis, 1986." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 87, + 643, + 541, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 643, + 541, + 680 + ], + "spans": [ + { + "bbox": [ + 87, + 643, + 541, + 680 + ], + "type": "text", + "content": "[Bus90] Samuel R. Buss. On model theory for intuitionistic bounded arithmetic with applications to independence results. In *Feasible Mathematics: A Mathematical Sciences Institute Workshop, Ithaca, New York, June 1989*, pages 27-47. Springer, 1990." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 72, + 541, + 693 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "spans": [ + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "type": "text", + "content": "[Bus94] Samuel R. Buss. On herbrand's theorem. In Selected Papers from the Logic and Computational Complexity International Workshop (LCC), pages 195-209, 1994." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 87, + 106, + 540, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 106, + 540, + 131 + ], + "spans": [ + { + "bbox": [ + 87, + 106, + 540, + 131 + ], + "type": "text", + "content": "[Bus97] Samuel R. Buss. Bounded arithmetic and propositional proof complexity. In Logic of Computation, pages 67-121. Springer Berlin Heidelberg, 1997." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 137, + 540, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 137, + 540, + 163 + ], + "spans": [ + { + "bbox": [ + 76, + 137, + 540, + 163 + ], + "type": "inline_equation", + "content": "\\left[\\mathrm{CHO}^{+}22\\right]" + }, + { + "bbox": [ + 76, + 137, + 540, + 163 + ], + "type": "text", + "content": " Lijie Chen, Shuichi Hirahara, Igor C. Oliveira, Jan Pich, Ninad Rajgopal, and Rahul Santhanam. Beyond natural proofs: Hardness magnification and locality. J. ACM, 69(4):25:1-25:49, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 170, + 540, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 170, + 540, + 206 + ], + "spans": [ + { + "bbox": [ + 78, + 170, + 540, + 206 + ], + "type": "text", + "content": "[CIKK16] Marco L. Carmosino, Russell Impagliazzo, Valentine Kabanets, and Antonina Kolokolova. Learning algorithms from natural proofs. In Conference on Computational Complexity (CCC), pages 10:1-10:24, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 214, + 539, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 214, + 539, + 241 + ], + "spans": [ + { + "bbox": [ + 77, + 214, + 539, + 241 + ], + "type": "text", + "content": "[CJsw21] Lijie Chen, Ce Jin, Rahul Santhanam, and Ryan Williams. Constructive separations and their consequences. In Symposium on Foundations of Computer Science (FOCS), 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 89, + 246, + 541, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 246, + 541, + 272 + ], + "spans": [ + { + "bbox": [ + 89, + 246, + 541, + 272 + ], + "type": "text", + "content": "[CK07] Stephen A. Cook and Jan Krajček. Consequences of the provability of NP " + }, + { + "bbox": [ + 89, + 246, + 541, + 272 + ], + "type": "inline_equation", + "content": "\\subseteq" + }, + { + "bbox": [ + 89, + 246, + 541, + 272 + ], + "type": "text", + "content": " P/poly. Journal of Symbolic Logic, 72(4):1353-1371, 2007." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 279, + 540, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 279, + 540, + 304 + ], + "spans": [ + { + "bbox": [ + 75, + 279, + 540, + 304 + ], + "type": "inline_equation", + "content": "\\left[\\mathrm{CKK}^{+}24\\right]" + }, + { + "bbox": [ + 75, + 279, + 540, + 304 + ], + "type": "text", + "content": " Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, Igor C. Oliveira, and Dimitrios Tsintsili-das. Provability of the circuit size hierarchy and its consequences. Preprint, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 311, + 541, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 311, + 541, + 349 + ], + "spans": [ + { + "bbox": [ + 75, + 311, + 541, + 349 + ], + "type": "text", + "content": "[CKKO21] Marco Carmosino, Valentine Kabanets, Antonina Kolokolova, and Igor C. Oliveira. Learn-uniform circuit lower bounds and provability in bounded arithmetic. In Symposium on Foundations of Computer Science (FOCS), 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 78, + 355, + 539, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 355, + 539, + 381 + ], + "spans": [ + { + "bbox": [ + 78, + 355, + 539, + 381 + ], + "type": "text", + "content": "[CLO24a] Lijie Chen, Jiatu Li, and Igor C. Oliveira. On the unprovability of circuit size bounds in intuitionistic " + }, + { + "bbox": [ + 78, + 355, + 539, + 381 + ], + "type": "inline_equation", + "content": "S_2^1" + }, + { + "bbox": [ + 78, + 355, + 539, + 381 + ], + "type": "text", + "content": ". Preprint: arXiv:2404.11841, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 78, + 388, + 539, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 388, + 539, + 414 + ], + "spans": [ + { + "bbox": [ + 78, + 388, + 539, + 414 + ], + "type": "text", + "content": "[CLO24b] Lijie Chen, Jiatu Li, and Igor C. Oliveira. Reverse mathematics of complexity lower bounds. In Symposium on Foundations of Computer Science (FOCS), 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 89, + 420, + 540, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 420, + 540, + 445 + ], + "spans": [ + { + "bbox": [ + 89, + 420, + 540, + 445 + ], + "type": "text", + "content": "[CN10] Stephen A. Cook and Phuong Nguyen. Logical Foundations of Proof Complexity. Cambridge University Press, 2010." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 86, + 453, + 539, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 453, + 539, + 479 + ], + "spans": [ + { + "bbox": [ + 86, + 453, + 539, + 479 + ], + "type": "text", + "content": "[Bcob65] Alan Cobham. The intrinsic computational difficulty of functions. Proc. Logic, Methodology and Philosophy of Science, pages 24-30, 1965." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 86, + 486, + 539, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 486, + 539, + 511 + ], + "spans": [ + { + "bbox": [ + 86, + 486, + 539, + 511 + ], + "type": "text", + "content": "[Co075] Stephen A. Cook. Feasibly constructive proofs and the propositional calculus (preliminary version). In Symposium on Theory of Computing (STOC), pages 83-97, 1975." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 84, + 518, + 539, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 518, + 539, + 544 + ], + "spans": [ + { + "bbox": [ + 84, + 518, + 539, + 544 + ], + "type": "text", + "content": "[CRT24] Lijie Chen, Ron D. Rothblum, and Roei Tell. Fiat-Shamir in the plain model from derandomization. Electron. Colloquium Comput. Complex., TR24-116, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 89, + 550, + 539, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 550, + 539, + 576 + ], + "spans": [ + { + "bbox": [ + 89, + 550, + 539, + 576 + ], + "type": "text", + "content": "[CU93] Stephen Cook and Alasdair Urquhart. Functional interpretations of feasibly constructive arithmetic. Annals of Pure and Applied Logic, 63(2):103-200, 1993." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 88, + 582, + 428, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 582, + 428, + 597 + ], + "spans": [ + { + "bbox": [ + 88, + 582, + 428, + 597 + ], + "type": "text", + "content": "[Din07] Irit Dinur. The PCP theorem by gap amplification. J. ACM, 54(3):12, 2007." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 71, + 603, + 541, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 603, + 541, + 640 + ], + "spans": [ + { + "bbox": [ + 71, + 603, + 541, + 640 + ], + "type": "text", + "content": "[dRGN+21] Susanna F. de Rezende, Mika Göös, Jakob Nordström, Toniann Pitassi, Robert Robere, and Dmitry Sokolov. Automating algebraic proof systems is NP-hard. In Symposium on Theory of Computing (STOC), pages 209-222, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 86, + 647, + 448, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 647, + 448, + 661 + ], + "spans": [ + { + "bbox": [ + 86, + 647, + 448, + 661 + ], + "type": "text", + "content": "[Gay23] Azza Gaysin. Proof complexity of CSP. ArXiv e-Print arXiv:2201.00913, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 86, + 668, + 541, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 668, + 541, + 693 + ], + "spans": [ + { + "bbox": [ + 86, + 668, + 541, + 693 + ], + "type": "text", + "content": "[Gay24] Azza Gaysin. Proof complexity of universal algebra in a CSP dichotomy proof. ArXiv e-Print arXiv:2403.06704, 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 84, + 72, + 541, + 677 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 88, + 72, + 512, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 72, + 512, + 87 + ], + "spans": [ + { + "bbox": [ + 88, + 72, + 512, + 87 + ], + "type": "text", + "content": "[HP93] Petr Hajek and Pavel Pudlák. Metamathematics of first-order arithmetic. Springer-Verlag, 1993." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 93, + 541, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 93, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 84, + 93, + 541, + 118 + ], + "type": "text", + "content": "[ILW23] Rahul Ilango, Jiatu Li, and Ryan Williams. Indistinguishability obfuscation, range avoidance, and bounded arithmetic. In Symposium on Theory of Computing (STOC), pages 1076–1089. ACM, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 91, + 126, + 541, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 126, + 541, + 152 + ], + "spans": [ + { + "bbox": [ + 91, + 126, + 541, + 152 + ], + "type": "text", + "content": "[Jer04] Emil Jerabek. Dual weak pigeonhole principle, boolean complexity, and derandomization. Annals of Pure and Applied Logic, 129(1-3):1-37, 2004." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 91, + 159, + 541, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 159, + 541, + 185 + ], + "spans": [ + { + "bbox": [ + 91, + 159, + 541, + 185 + ], + "type": "text", + "content": "[Jef05] Emil Jerabek. Weak pigeonhole principle and randomized computation. PhD thesis, Charles University in Prague, 2005." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 91, + 193, + 541, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 193, + 541, + 217 + ], + "spans": [ + { + "bbox": [ + 91, + 193, + 541, + 217 + ], + "type": "text", + "content": "[Jer06] Emil Jerabek. The strength of sharply bounded induction. Mathematical Logic Quarterly, 52(6):613-624, 2006." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 91, + 225, + 541, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 225, + 541, + 250 + ], + "spans": [ + { + "bbox": [ + 91, + 225, + 541, + 250 + ], + "type": "text", + "content": "[Jer07] Emil Jerabek. Approximate counting in bounded arithmetic. Journal of Symbolic Logic, 72(3):959-993, 2007." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 89, + 258, + 473, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 258, + 473, + 272 + ], + "spans": [ + { + "bbox": [ + 89, + 258, + 473, + 272 + ], + "type": "text", + "content": "[Juk12] Stasys Jukna. Boolean Function Complexity: Advances and Frontiers. Springer, 2012." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 86, + 279, + 541, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 279, + 541, + 304 + ], + "spans": [ + { + "bbox": [ + 86, + 279, + 541, + 304 + ], + "type": "text", + "content": "[Kan82] Ravi Kannan. Circuit-size lower bounds and non-reducibility to sparse sets. Information and Control, 55(1-3):40-56, 1982." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 86, + 312, + 541, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 312, + 541, + 337 + ], + "spans": [ + { + "bbox": [ + 86, + 312, + 541, + 337 + ], + "type": "text", + "content": "[Kha22] Erfan Khaniki. Nisan-Wigderson generators in proof complexity: New lower bounds. In Computational Complexity Conference (CCC), pages 17:1-17:15, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 88, + 345, + 541, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 345, + 541, + 370 + ], + "spans": [ + { + "bbox": [ + 88, + 345, + 541, + 370 + ], + "type": "text", + "content": "[KO17] Jan Krajíček and Igor C. Oliveira. Unprovability of circuit upper bounds in Cook's theory PV. *Logical Methods in Computer Science*, 13(1), 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 90, + 377, + 541, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 377, + 541, + 403 + ], + "spans": [ + { + "bbox": [ + 90, + 377, + 541, + 403 + ], + "type": "text", + "content": "[KP89] Jan Krajíček and Pavel Pudlák. Propositional provability and models of weak arithmetic. In CSL'89: Proceedings of the 3rd Workshop on Computer Science Logic, pages 193-210, 1989." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 84, + 411, + 541, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 411, + 541, + 446 + ], + "spans": [ + { + "bbox": [ + 84, + 411, + 541, + 446 + ], + "type": "text", + "content": "[KPS90] Jan Krajíček, Pavel Pudlák, and Jíří Sgall. Interactive computations of optimal solutions. In International Symposium on Mathematical Foundations of Computer Science (MFCS), volume 452, pages 48-60, 1990." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 84, + 455, + 541, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 455, + 541, + 480 + ], + "spans": [ + { + "bbox": [ + 84, + 455, + 541, + 480 + ], + "type": "text", + "content": "[KPT91] Jan Krajíček, Pavel Pudlák, and Gaisi Takeuti. Bounded arithmetic and the polynomial hierarchy. Annals of Pure and Applied Logic, 52(1-2):143-153, 1991." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 86, + 488, + 541, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 488, + 541, + 514 + ], + "spans": [ + { + "bbox": [ + 86, + 488, + 541, + 514 + ], + "type": "text", + "content": "[Kra95] Jan Krajíček. Bounded Arithmetic, Propositional Logic, and Complexity Theory. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 1995." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 86, + 521, + 541, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 521, + 541, + 547 + ], + "spans": [ + { + "bbox": [ + 86, + 521, + 541, + 547 + ], + "type": "text", + "content": "[Kra97] Jan Krajicek. Interpolation theorems, lower bounds for proof systems, and independence results for bounded arithmetic. J. Symb. Log., 62(2):457-486, 1997." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 86, + 554, + 541, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 554, + 541, + 579 + ], + "spans": [ + { + "bbox": [ + 86, + 554, + 541, + 579 + ], + "type": "text", + "content": "[Kra11] Jan Krajicek. On the proof complexity of the Nisan-Wigderson generator based on a hard NP " + }, + { + "bbox": [ + 86, + 554, + 541, + 579 + ], + "type": "inline_equation", + "content": "\\cap" + }, + { + "bbox": [ + 86, + 554, + 541, + 579 + ], + "type": "text", + "content": " coNP function. Journal of Mathematical Logic, 11(1), 2011." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 86, + 586, + 541, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 586, + 541, + 612 + ], + "spans": [ + { + "bbox": [ + 86, + 586, + 541, + 612 + ], + "type": "text", + "content": "[Kra19] Jan Krajíček. Proof Complexity. Encyclopedia of Mathematics and its Applications. Cambridge University Press, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 86, + 620, + 541, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 620, + 541, + 645 + ], + "spans": [ + { + "bbox": [ + 86, + 620, + 541, + 645 + ], + "type": "text", + "content": "[Kra21] Jan Krajíček. Small circuits and dual weak PHP in the universal theory of p-time algorithms. ACM Transactions on Computational Logic (TOCL), 22(2):1-4, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 86, + 652, + 541, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 652, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 86, + 652, + 541, + 677 + ], + "type": "text", + "content": "[Kra24] Jan Krajicek. Proof Complexity Generators. Monograph available at https://www.karlin.mff.cuni.cz/~krajicek/gdraft.html, 2024." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 72, + 541, + 677 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 91, + 72, + 541, + 97 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 72, + 541, + 97 + ], + "spans": [ + { + "bbox": [ + 91, + 72, + 541, + 97 + ], + "type": "text", + "content": "[Lê14] Dai Tri Man Lê. Bounded Arithmetic and Formalizing Probabilistic Proofs. PhD thesis, University of Toronto, 2014." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 89, + 106, + 540, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 106, + 540, + 131 + ], + "spans": [ + { + "bbox": [ + 89, + 106, + 540, + 131 + ], + "type": "text", + "content": "[LC11] Dai Tri Man Le and Stephen A. Cook. Formalizing randomized matching algorithms. Log. Methods Comput. Sci., 8(3), 2011." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 89, + 138, + 541, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 138, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 89, + 138, + 541, + 163 + ], + "type": "text", + "content": "[Lip94] Richard J. Lipton. Some consequences of our failure to prove non-linear lower bounds on explicit functions. In Structure in Complexity Theory Conference (CCC), pages 79-87, 1994." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 84, + 171, + 540, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 171, + 540, + 196 + ], + "spans": [ + { + "bbox": [ + 84, + 171, + 540, + 196 + ], + "type": "text", + "content": "[LLR24] Jiawei Li, Yuhao Li, and Hanlin Ren. Meta-mathematics of resolution lower bounds: A TFNP perspective. Preprint, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 205, + 539, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 205, + 539, + 229 + ], + "spans": [ + { + "bbox": [ + 89, + 205, + 539, + 229 + ], + "type": "text", + "content": "[LO23] Jiatu Li and Igor C. Oliveira. Unprovability of strong complexity lower bounds in bounded arithmetic. In Symposium on Theory of Computing (STOC), 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 82, + 237, + 541, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 237, + 541, + 261 + ], + "spans": [ + { + "bbox": [ + 82, + 237, + 541, + 261 + ], + "type": "text", + "content": "[McK10] Richard McKinley. A sequent calculus demonstration of Herbrand's theorem. arXiv preprint arXiv:1007.3414, 2010." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 88, + 270, + 541, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 270, + 541, + 295 + ], + "spans": [ + { + "bbox": [ + 88, + 270, + 541, + 295 + ], + "type": "text", + "content": "[MP20] Moritz Müller and Ján Pich. Feasibly constructive proofs of succinct weak circuit lower bounds. Annals of Pure and Applied Logic, 171(2), 2020." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 79, + 303, + 539, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 303, + 539, + 327 + ], + "spans": [ + { + "bbox": [ + 79, + 303, + 539, + 327 + ], + "type": "text", + "content": "[MPW02] Alexis Maciel, Toniann Pitassi, and Alan R. Woods. A new proof of the weak pigeonhole principle. Journal of Computer and System Sciences, 64(4):843-872, 2002." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 88, + 335, + 535, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 335, + 535, + 349 + ], + "spans": [ + { + "bbox": [ + 88, + 335, + 535, + 349 + ], + "type": "text", + "content": "[Oja04] Kerry Ojakian. Combinatorics in Bounded Arithmetic. PhD thesis, Carnegie Mellon University, 2004." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 89, + 357, + 537, + 369 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 357, + 537, + 369 + ], + "spans": [ + { + "bbox": [ + 89, + 357, + 537, + 369 + ], + "type": "text", + "content": "[Par71] Rohit Parikh. Existence and feasibility in arithmetic. Journal of Symbolic Logic, 36(3):494-508, 1971." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 86, + 378, + 539, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 378, + 539, + 401 + ], + "spans": [ + { + "bbox": [ + 86, + 378, + 539, + 401 + ], + "type": "text", + "content": "[Pic15a] Jan Pich. Circuit lower bounds in bounded arithmetics. Annals of Pure and Applied Logic, 166(1):29-45, 2015." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 85, + 411, + 540, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 411, + 540, + 435 + ], + "spans": [ + { + "bbox": [ + 85, + 411, + 540, + 435 + ], + "type": "text", + "content": "[Pic15b] Jan Pich. Logical strength of complexity theory and a formalization of the PCP theorem in bounded arithmetic. *Logical Methods in Computer Science*, 11(2), 2015." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 91, + 443, + 541, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 443, + 541, + 468 + ], + "spans": [ + { + "bbox": [ + 91, + 443, + 541, + 468 + ], + "type": "text", + "content": "[PS21] Jan Pich and Rahul Santhanam. Strong co-nondeterministic lower bounds for NP cannot be proved feasibly. In Symposium on Theory of Computing (STOC), pages 223-233, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 92, + 476, + 541, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 476, + 541, + 502 + ], + "spans": [ + { + "bbox": [ + 92, + 476, + 541, + 502 + ], + "type": "text", + "content": "[PS22] Jan Pich and Rahul Santhanam. Learning algorithms versus automatability of Frege systems. In International Colloquium on Automata, Languages, and Programming (ICALP), pages 101:1-101:20, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 92, + 510, + 541, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 510, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 92, + 510, + 541, + 534 + ], + "type": "text", + "content": "[PS23] Ján Pich and Rahul Santhanam. Towards " + }, + { + "bbox": [ + 92, + 510, + 541, + 534 + ], + "type": "inline_equation", + "content": "\\mathrm{P} \\neq \\mathrm{NP}" + }, + { + "bbox": [ + 92, + 510, + 541, + 534 + ], + "type": "text", + "content": " from extended Frege lower bounds. *Electron. Colloquium Comput. Complex.*, TR23-199, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 88, + 542, + 541, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 542, + 541, + 578 + ], + "spans": [ + { + "bbox": [ + 88, + 542, + 541, + 578 + ], + "type": "text", + "content": "[Pud06] Pavel Pudlák. Consistency and games - in search of new combinatorial principles. In V. Stoltenberg-Hansen and J. Väätänen, editors, Logic Colloquium '03, volume 24 of Lecture Notes in Logic, pages 244-281. ASL, 2006." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 79, + 586, + 540, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 586, + 540, + 612 + ], + "spans": [ + { + "bbox": [ + 79, + 586, + 540, + 612 + ], + "type": "text", + "content": "[PWW88] Jeff B. Paris, A. J. Wilkie, and Alan R. Woods. Provability of the pigeonhole principle and the existence of infinitely many primes. J. Symb. Log., 53(4):1235-1244, 1988." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 620, + 541, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 620, + 541, + 644 + ], + "spans": [ + { + "bbox": [ + 83, + 620, + 541, + 644 + ], + "type": "text", + "content": "[Raz95a] Alexander A. Razborov. Bounded arithmetic and lower bounds in boolean complexity. In P. Clote and J. Remmel, editors, Feasible Mathematics II, pages 344-386. Birkhäuser, 1995." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 82, + 653, + 541, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 653, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 82, + 653, + 541, + 677 + ], + "type": "text", + "content": "[Raz95b] Alexander A Razborov. Unprovability of lower bounds on circuit size in certain fragments of bounded arithmetic. Izvestiya: mathematics, 59(1):205, 1995." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 311, + 720 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 84, + 72, + 541, + 295 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "spans": [ + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "type": "text", + "content": "[Raz15] Alexander A. Razborov. Pseudorandom generators hard for " + }, + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 86, + 72, + 541, + 98 + ], + "type": "text", + "content": "-DNF resolution and polynomial calculus resolution. Annals of Mathematics, pages 415-472, 2015." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 88, + 106, + 541, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 106, + 541, + 131 + ], + "spans": [ + { + "bbox": [ + 88, + 106, + 541, + 131 + ], + "type": "text", + "content": "[RR97] Alexander A. Razborov and Steven Rudich. Natural proofs. Journal of Computer and System Sciences, 55(1):24-35, 1997." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 87, + 138, + 539, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 138, + 539, + 163 + ], + "spans": [ + { + "bbox": [ + 87, + 138, + 539, + 163 + ], + "type": "text", + "content": "[Sub61] Bella A. Subbotovskaya. Realization of linear functions by formulas using " + }, + { + "bbox": [ + 87, + 138, + 539, + 163 + ], + "type": "inline_equation", + "content": "+, \\cdot, -" + }, + { + "bbox": [ + 87, + 138, + 539, + 163 + ], + "type": "text", + "content": ". In Soviet Math. Dokl, 1961." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 88, + 171, + 540, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 171, + 540, + 196 + ], + "spans": [ + { + "bbox": [ + 88, + 171, + 540, + 196 + ], + "type": "text", + "content": "[SW14] Rahul Santhanam and Ryan Williams. On uniformity and circuit lower bounds. Computational Complexity, 23(2):177-205, 2014." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 89, + 205, + 541, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 205, + 541, + 228 + ], + "spans": [ + { + "bbox": [ + 89, + 205, + 541, + 228 + ], + "type": "text", + "content": "[TC21] Iddo Tzameret and Stephen A. Cook. Uniform, integral, and feasible proofs for the determinant identities. J. ACM, 68(2):12:1-12:80, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 237, + 541, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 237, + 541, + 262 + ], + "spans": [ + { + "bbox": [ + 84, + 237, + 541, + 262 + ], + "type": "text", + "content": "[Woo81] Alan R. Woods. Some problems in logic and number theory and their connections. PhD thesis, University of Manchester, 1981." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 88, + 270, + 541, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 270, + 541, + 295 + ], + "spans": [ + { + "bbox": [ + 88, + 270, + 541, + 295 + ], + "type": "text", + "content": "[WP87] Alex J. Wilkie and Jeff B. Paris. On the scheme of induction for bounded arithmetic formulas. Ann. Pure Appl. Log., 35:261-302, 1987." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "spans": [ + { + "bbox": [ + 299, + 712, + 310, + 720 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_content_list.json b/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..35f21d66891b3ac9ab170b394956f9168f22d4c0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_content_list.json @@ -0,0 +1,1003 @@ +[ + { + "type": "text", + "text": "Optimized Path Planning for Logistics Robots Using Ant Colony Algorithm under Multiple Constraints", + "text_level": 1, + "bbox": [ + 83, + 68, + 915, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haopeng Zhao", + "bbox": [ + 223, + 143, + 316, + 157 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Independent Researcher", + "bbox": [ + 197, + 160, + 341, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 225, + 176, + 313, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "haopeng.zhao1894@gmail.com", + "bbox": [ + 174, + 191, + 364, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lipeng Liu", + "bbox": [ + 233, + 222, + 305, + 236 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Peking University", + "bbox": [ + 215, + 238, + 323, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 225, + 253, + 315, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "pengpengpu@163.com", + "bbox": [ + 197, + 270, + 344, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zheyu Zhang", + "bbox": [ + 228, + 300, + 310, + 314 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Independent Researcher", + "bbox": [ + 197, + 316, + 341, + 329 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 225, + 330, + 315, + 345 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "zheyuz2980@gmail.com", + "bbox": [ + 194, + 348, + 344, + 361 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—With the rapid development of the logistics industry, the path planning of logistics vehicles has become increasingly complex, requiring consideration of multiple constraints such as time windows, task sequencing, and motion smoothness. Traditional path planning methods often struggle to balance these competing demands efficiently. In this paper, we propose a path planning technique based on the Ant Colony Optimization (ACO) algorithm to address these challenges. The proposed method optimizes key performance metrics, including path length, task completion time, turning counts, and motion smoothness, to ensure efficient and practical route planning for logistics vehicles. Experimental results demonstrate that the ACO-based approach outperforms traditional methods in terms of both efficiency and adaptability. This study provides a robust solution for logistics vehicle path planning, offering significant potential for real-world applications in dynamic and constrained environments.", + "bbox": [ + 66, + 363, + 486, + 573 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords- ant colony algorithm; path planning", + "text_level": 1, + "bbox": [ + 89, + 584, + 383, + 599 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 209, + 616, + 339, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The logistics industry plays an important role in the modern economy, and efficient transportation is the cornerstone of its success[1]. However, the path planning of logistics vehicles faces many challenges, especially in urban environments. Logistics vehicles must not only complete tasks within the specified time window[2][3], but also optimize the task sequence to ensure a smooth and energy-efficient route. Traditional path planning methods often fail to fully meet these multi-faceted requirements, resulting in the inability to obtain the optimal solution in real-world scenarios[4][5][6].", + "bbox": [ + 66, + 633, + 486, + 773 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In modern urban environments, the path planning problem of logistics vehicles becomes more complicated[7]. In addition to time windows and task sequencing constraints, factors such as traffic congestion, road restrictions, vehicle load, and environmental conditions (such as weather, road conditions, etc.) need to be considered[8]. These factors make it difficult for traditional path planning algorithms to provide effective solutions in dynamic and uncertain environments[9][10]. For example, although the Dijkstra algorithm and the $\\mathrm{A}^*$ algorithm", + "bbox": [ + 66, + 779, + 488, + 906 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhichao Ma", + "bbox": [ + 687, + 143, + 764, + 157 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Independent Researcher", + "bbox": [ + 653, + 160, + 799, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shanghai, China", + "bbox": [ + 674, + 176, + 777, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ma.zhi.chao.max@gmail.com", + "bbox": [ + 637, + 191, + 816, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yang Wang", + "bbox": [ + 689, + 222, + 764, + 236 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Independent Researcher", + "bbox": [ + 653, + 238, + 799, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 681, + 253, + 771, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "wangyrick102@gmail.com", + "bbox": [ + 645, + 268, + 808, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hao Liu*", + "bbox": [ + 697, + 300, + 756, + 313 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Independent Researcher", + "bbox": [ + 653, + 316, + 799, + 329 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beijing, China", + "bbox": [ + 681, + 330, + 771, + 345 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "modiy.lu@gmail.com", + "bbox": [ + 661, + 348, + 792, + 361 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "perform well in static environments, they often cannot quickly adapt to real-time traffic changes, resulting in low efficiency[11][12][13]. The ant colony optimization algorithm can effectively find the optimal path in a complex environment by simulating the behavior of ant colonies releasing pheromones during foraging[14][15]. The distributed computing characteristics of ACO enable it to process multiple path selection problems in parallel, so that high-quality solutions can be found in a short time[16][17][18]. In addition, the adaptability of ACO enables it to dynamically adjust the path planning strategy according to environmental changes, making it particularly effective in dealing with dynamic and uncertain problems[19][20][21].", + "bbox": [ + 506, + 362, + 928, + 542 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Logistics vehicle path planning usually involves multiple optimization objectives, such as minimizing driving distance, reducing task completion time, reducing energy consumption, and improving path smoothness. These objectives often conflict with each other[22][23]. For example, the shortest path may require more turns, resulting in increased energy consumption and longer driving time. Therefore, finding a balance between these objectives is an important research challenge. This paper adopts the ACO algorithm combined with a multi-objective optimization strategy to consider multiple optimization objectives and find the path with the best overall performance. In order to verify the effectiveness of the proposed method, we designed a series of experiments to simulate the logistics vehicle path planning problem in different urban environments. The experimental results show that the path planning method based on ACO outperforms the traditional algorithm in multiple performance indicators.", + "bbox": [ + 506, + 549, + 928, + 785 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "II. METHODOLOGY", + "text_level": 1, + "bbox": [ + 648, + 801, + 782, + 816 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A. Modeling Methods for Complex Constraints in Path Planning", + "text_level": 1, + "bbox": [ + 506, + 829, + 926, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Logistics robots face stringent time window constraints when executing tasks. Each task is associated with a specific", + "bbox": [ + 506, + 862, + 928, + 891 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "time window $[t_{\\mathrm{start}}, t_{\\mathrm{end}}]$ , within which the robot must arrive at the designated location to complete the task. These time constraints introduce the following challenges in path planning:", + "bbox": [ + 66, + 69, + 486, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Time window conflict: When the time windows of multiple tasks overlap, the robot must intelligently adjust the order of task execution to ensure that all tasks are completed within the specified time. Such conflicts may cause task delays or reduced system efficiency. For example, in a high-density warehouse environment, overlapping time windows can significantly affect the overall throughput of the logistics system.", + "bbox": [ + 66, + 123, + 486, + 223 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Path efficiency optimization: While meeting time constraints, the robot must choose the optimal path to minimize the travel distance and time cost. This involves not only path planning in a static environment, but also dynamic obstacle avoidance. Efficient path planning is critical to reducing energy consumption and improving the operational efficiency of the robot fleet.", + "bbox": [ + 66, + 227, + 488, + 325 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Dynamic adjustment capability: In the actual operating environment, the robot must respond to unexpected situations in real time and dynamically adjust its path planning strategy. This capability places higher requirements on the robustness and flexibility of the system. For example, in a dynamic warehouse environment, the robot must quickly replan its route to avoid collisions or adapt to new tasks without affecting the overall plan.", + "bbox": [ + 66, + 332, + 488, + 431 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Minimization of Path Length:", + "text_level": 1, + "bbox": [ + 66, + 436, + 279, + 452 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nf _ {1} = \\min \\sum_ {i = 1} ^ {n - 1} d \\left(p _ {i}, p _ {i + 1}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 460, + 486, + 501 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $d(p_i, p_{i+1})$ represents the Euclidean distance between consecutive path points $p_i$ and $p_{i+1}$ . Optimizing path length directly impacts the robot's energy consumption and task completion efficiency. Shorter paths reduce travel time and energy usage, which is particularly important in large-scale logistics operations.", + "bbox": [ + 66, + 510, + 486, + 609 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Minimization of Task Completion Time:", + "text_level": 1, + "bbox": [ + 71, + 614, + 357, + 628 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nf _ {2} = \\min \\max _ {1 \\leq i \\leq n} t _ {i} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 638, + 485, + 656 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $t_i$ denotes the completion time of the $i - th$ task. This objective ensures that the robot can complete all tasks in the shortest possible time, thereby improving system throughput. Minimizing the maximum completion time is critical for meeting tight delivery schedules in time-sensitive applications.", + "bbox": [ + 66, + 667, + 486, + 744 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Minimization of Turning Counts:", + "text_level": 1, + "bbox": [ + 66, + 750, + 305, + 765 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nf _ {3} = \\min \\sum_ {i = 2} ^ {n - 1} \\delta \\left(\\theta_ {i}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 773, + 485, + 813 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\theta_{i}$ represents the turning angle at the $i - th$ path point, and $\\delta (\\cdot)$ is an indicator function that takes the value 0, and if the turning angle exceeds a threshold, the value is equal to 1. Reducing turning counts helps improve the robot's motion efficiency and reduces mechanical wear. This is especially", + "bbox": [ + 66, + 823, + 488, + 906 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "important in environments with narrow aisles or limited maneuvering space.", + "bbox": [ + 508, + 66, + 926, + 95 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Optimization of Motion Smoothness:", + "text_level": 1, + "bbox": [ + 509, + 102, + 769, + 117 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nf _ {4} = \\min \\sum_ {i = 2} ^ {n - 1} \\left| \\theta_ {i} - \\theta_ {i - 1} \\right| \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 635, + 125, + 926, + 165 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This objective function ensures smooth motion trajectories, reducing energy loss and mechanical wear caused by sharp turns, while enhancing operational stability. Smooth trajectories also contribute to safer and more predictable robot movements, which is essential in environments shared with human workers.", + "bbox": [ + 508, + 172, + 928, + 244 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By establishing a multi-objective optimization model and employing appropriate optimization algorithm, an optimal or near-optimal path planning solution can be found under the constraints, thereby improving the overall efficiency of the logistics system. Additionally, this study considers the robot's dynamic constraints and environmental uncertainties, further enhancing the practicality and robustness of the algorithm. For instance, the algorithm incorporates real-time sensor data to adapt to changing environmental conditions, ensuring reliable performance in complex operational scenarios.", + "bbox": [ + 506, + 250, + 929, + 388 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "B. Multi-Constraint Path Planning Based on Ant Colony Algorithm", + "text_level": 1, + "bbox": [ + 508, + 402, + 928, + 433 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The ant colony optimization algorithm is a heuristic optimization algorithm that simulates the foraging behavior of ants. It gradually finds the optimal path by imitating the behavior of ants releasing pheromones in the process of looking for food. This paper combines the ant colony algorithm with the time window constraints and multi-objective optimization constraints in the path planning of logistics robots, and proposes an ant colony algorithm to solve the path planning problem in complex environments.", + "bbox": [ + 506, + 435, + 929, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on the traditional ant colony algorithm, this paper introduces time window constraints and multi-objective optimization constraints to improve the pheromone update mechanism and heuristic function design. Pheromone update not only considers the path length, but also combines factors such as task completion time, number of turns, and motion stability. The pheromone update formula is as follows:", + "bbox": [ + 506, + 566, + 928, + 666 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nt a u _ {i j} (t + 1) = (1 - \\rho) \\cdot \\tau_ {i j} (t) + \\Delta \\tau_ {i j} \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 594, + 674, + 926, + 694 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\tau_{ij}(t)$ represents the pheromone concentration on path $(i,j)$ , $\\rho$ is the pheromone evaporation coefficient, and $\\Delta \\tau_{ij}$ is the pheromone increment, which is calculated by comprehensively considering path length, task completion time, and motion smoothness.", + "bbox": [ + 508, + 707, + 928, + 792 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The heuristic function is used to guide ants in selecting the next path point. The heuristic function is defined as follows:", + "bbox": [ + 508, + 799, + 928, + 829 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\ne t a _ {i j} = \\frac {1}{d _ {i j}} \\cdot \\frac {1}{1 + \\alpha \\cdot \\text {p e n a l t y} _ {i j}} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 612, + 837, + 926, + 878 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $d_{ij}$ is the distance of path $(i,j)$ , penalty $_{ij}$ is the turning penalty term, and $\\alpha$ is a weight coefficient used to balance path length and turning counts. When selecting the next path point, ants use a probabilistic selection strategy that considers both pheromone concentration and the heuristic function. The path selection probability formula is as follows:", + "bbox": [ + 66, + 70, + 486, + 161 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np _ {i j} ^ {k} = \\frac {\\left[ \\tau_ {i j} \\right] ^ {\\beta} \\left[ \\eta_ {i j} \\right] ^ {\\gamma}}{\\sum_ {l \\in \\text {a l l o w e d} _ {k}} \\left[ \\tau_ {i l} \\right] ^ {\\beta} \\left[ \\eta_ {i l} \\right] ^ {\\gamma}} \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 205, + 170, + 485, + 214 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\beta$ and $\\gamma$ are the weight coefficients for pheromone and heuristic function, respectively, and allowed $_k$ represents the set of nodes that ant $k$ can choose from at the current node.", + "bbox": [ + 66, + 222, + 486, + 279 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "During the path planning process, the ant needs to check the time window constraints of each task. If the selection of a certain path point will cause the task to be unable to be completed within the specified time, then the path point will be excluded from the optional path points. By dynamically adjusting the path selection strategy, all time window constraints are met. In addition, this paper adopts the weighted summation method to transform the multi-objective optimization problem into a single-objective optimization problem. By adjusting the weight coefficient of each objective function, a balance is achieved between the path length, task completion time, number of turns, and motion smoothness. The objective function is defined as follows:", + "bbox": [ + 66, + 287, + 486, + 454 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nF = w _ {1} \\cdot f _ {1} + w _ {2} \\cdot f _ {2} + w _ {3} \\cdot f _ {3} + w _ {4} \\cdot f _ {4} \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 138, + 462, + 485, + 481 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $w_{1}, w_{2}, w_{3}, w_{4}$ are the weight coefficients for each objective function, satisfying $w_{1} + w_{2} + w_{3} + w_{4} = 1$ .", + "bbox": [ + 66, + 493, + 486, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The algorithm flow mainly includes four steps: initialization, path construction, pheromone update and iterative optimization. First, the pheromone concentration, heuristic function parameters and starting position of the ants are initialized, and the maximum number of iterations and the number of ants are set. Then each ant gradually builds the path according to the path selection strategy, while checking the time window constraint and multi-objective optimization constraint. After all ants complete the path construction, the pheromone concentration is updated according to the path quality. The higher the path quality, the more pheromone increments. Finally, the path construction and pheromone update process are repeated until the maximum number of iterations is reached or the optimal path that meets the constraints is found.", + "bbox": [ + 66, + 541, + 486, + 734 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In order to verify the effectiveness of the ant colony algorithm, experiments were carried out in a simulated warehouse environment. The results show that compared with the traditional ant colony algorithm, the proposed algorithm significantly reduces the path length, the number of turns and the task completion time while meeting the time window constraint and improving the smoothness of the movement. In addition, the robustness of the algorithm in a dynamic environment has been verified, proving that it can effectively handle unexpected situations.", + "bbox": [ + 66, + 742, + 486, + 881 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "III. EXPERIMENTS", + "text_level": 1, + "bbox": [ + 651, + 68, + 774, + 80 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To validate the effectiveness of the proposed Ant Colony Optimization algorithm for path planning in logistics robots, simulation experiments were designed and compared with classical algorithms in various scenarios. The experimental environment was built on the Gazebo and ROS platforms, simulating real-world warehouse or factory settings with static obstacles (e.g., shelves, walls). The map size was set to", + "bbox": [ + 508, + 85, + 926, + 183 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "$20\\mathrm{m} \\times 20\\mathrm{m}$ with a resolution of $0.1\\mathrm{m}$ , and the number of task points ranged from 5 to 20, each associated with a time window $[t_{\\mathrm{start}}, t_{\\mathrm{end}}]$ randomly distributed between [5s,30s].", + "bbox": [ + 509, + 190, + 926, + 238 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The comparison methods included the Ant Colony Optimization, $\\mathbf{A}^*$ , and Genetic Algorithm (GA), Particle Swarm Optimization(PSO), RRT*, Hybrid $\\mathbf{A}^*$ . The evaluation metrics covered path length, task completion time, turning counts, and motion smoothness. Path length was measured by the total travel distance from the start to the end point; task completion time was the total time for the robot to complete all tasks; turning counts were the total number of turns in the path; and motion smoothness was evaluated by the standard deviation of path curvature, with smaller values indicating smoother paths.", + "bbox": [ + 508, + 247, + 928, + 387 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The experiments were conducted in a static environment, and each algorithm was run 50 times for each map complexity level, with the average values taken as the results.", + "bbox": [ + 508, + 393, + 926, + 435 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1b6bbae033c75747881776da18cb58629caeae11583f07f2df418b6359ba6fc5.jpg", + "image_caption": [ + "Fig 1. The trajectory of the ACO algorithm." + ], + "image_footnote": [], + "bbox": [ + 521, + 444, + 916, + 542 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/61c7b1f73f9d3371b40b5220c37c1a038601f62b2a688a7ffdca7eb7c36118a3.jpg", + "image_caption": [ + "Fig 2. Performance of different algorithms after 1000 iterations." + ], + "image_footnote": [], + "bbox": [ + 589, + 587, + 834, + 732 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Fig. 1 shows the performance of ACO algorithm. Since the vehicle turning restriction is added to the optimization target, the vehicle can avoid unnecessary turns. Moreover, when executing different mission objectives, although there are many obstacles between different mission locations, our method can always quickly reach different mission locations without collision. Fig. 2 shows the changes in path length of different intelligent optimization algorithms at 1000 iterations. It can be clearly seen that aco decreases faster and can obtain better results.", + "bbox": [ + 508, + 768, + 928, + 892 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/a25b7358b01014167c89cfc867e2d4289d78c680e78cc5cb9ac1bd136c6e5ce8.jpg", + "table_caption": [ + "TABLE I. PERFORMANCE OF PATH PLANNING" + ], + "table_footnote": [], + "table_body": "
MethodLength(m)Time(s)Truning(rad)Smoothness(rad)
A*13490.120.40140.2269
GA14553.780.36650.2967
PSO17342.370.59340.1571
RRT*16274.610.48870.2094
Hybrid A*12640.750.33160.192
ACO12010.640.27930.1222
", + "bbox": [ + 89, + 84, + 465, + 227 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table I shows that ACO outperforms the comparison algorithms in terms of path length, task completion time, and number of turns. In both simple and complex maps, ACO achieves the shortest path length, the least path planning time, and significantly reduces the number of turns by optimizing the turn penalty. In addition, ACO has the smallest standard deviation of path curvature, indicating the smoothest path.", + "bbox": [ + 66, + 229, + 486, + 328 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/194254464f3c3d5fbd2efc2e837cdb9d816e725a388ef24bf5f8981525b1e3b4.jpg", + "table_caption": [ + "TABLE II. PERFORMANCE OF TASK COMPLETION" + ], + "table_footnote": [], + "table_body": "
MethodTask completion(%)Time(s)
PSO753.37
GA734.61
ACO913.64
", + "bbox": [ + 130, + 358, + 424, + 441 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table II shows the task completion of ACO. The results show that, compared with PSO and other methods, ACO is significantly better than other methods in task completion and consumes less time.", + "bbox": [ + 66, + 441, + 486, + 497 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "IV. CONCLUSIONS", + "text_level": 1, + "bbox": [ + 207, + 516, + 334, + 529 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this paper, we applied the ACO algorithm to path planning, focusing on optimizing four key performance metrics: Path Length, Task Completion Time, Turning Counts, and Motion Smoothness. The proposed method was extensively evaluated and compared with several existing path planning algorithms. Experimental results demonstrate that the ACO-based approach significantly outperforms the compared methods in terms of efficiency, smoothness, and overall performance. The optimized path planning solution not only reduces travel distance and task completion time but also minimizes unnecessary turns and enhances motion smoothness, making it particularly suitable for real-world applications such as logistics vehicles. Future work will focus on adapting the algorithm for dynamic environments and integrating it into larger-scale logistics systems.", + "bbox": [ + 66, + 535, + 488, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 233, + 747, + 320, + 758 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Li, G., Liu, C., Wu, L., & Xiao, W. (2023). A mixing algorithm of ACO and ABC for solving path planning of mobile robot. Applied Soft Computing, 148, 110868.", + "[2] Xing, J., Xing, R., Xue, C., & Luo, D. (2024). Enhancing Link Prediction with Fuzzy Graph Attention Networks and Dynamic Negative Sampling. arXiv preprint arXiv:2411.07482.", + "[3] Zhou, T., & Wei, W. (2024). Mobile robot path planning based on an improved ACO algorithm and path optimization. Multimedia Tools and Applications, 1-24." + ], + "bbox": [ + 68, + 779, + 485, + 888 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[4] Chen, L., Su, Y., Zhang, D., Leng, Z., Qi, Y., & Jiang, K. (2021, May). Research on path planning for mobile robots based on improved ACO. In 2021 36th Youth Academic Annual Conference of Chinese Association of Automation (YAC) (pp. 379-383). IEEE.", + "[5] Ke, Z., Zhou, S., Zhou, Y., Chang, C. H., & Zhang, R. (2025). Detection of ai deepfake and fraud in online payments using gan-based models. arXiv preprint arXiv:2501.07033.", + "[6] Ke, Z., & Yin, Y. (2024, November). Tail risk alert based on conditional autoregressive var by regression quantiles and machine learning algorithms. In 2024 5th International Conference on Artificial Intelligence and Computer Engineering (ICAICE) (pp. 527-532). IEEE.", + "[7] Xing, J., Luo, D., Cheng, Q., Xue, C., & Xing, R. (2024). Multi-view Fuzzy Graph Attention Networks for Enhanced Graph Learning. arXiv preprint arXiv:2412.17271.", + "[8] Zhao, J., & Penn, G. (2025, January). Inside-Outside Algorithm for Probabilistic Product-Free Lambek Categorical Grammar. In Proceedings of the 31st International Conference on Computational Linguistics (pp. 8295-8303).", + "[9] He, L., Ka, D. H., Ehtesham-Ul-Haque, M., Billah, S. M., & Tehranchi, F. (2023, December). Cognitive models for abacus gesture learning. In Proceedings of the Annual Meeting of the Cognitive Science Society (Vol. 46).", + "[10] Zhao, J., & Penn, G. (2024, November). LLM-supertagger: Categorical Grammar Supertagging via Large Language Models. In Findings of the Association for Computational Linguistics: EMNLP 2024 (pp. 697-705).", + "[11] Li, G. C., He, L., & Fleming, L. (2023). Philanthropic supported innovation: trends, areas, and impact. Scientometrics, 128(10), 5507-5520", + "[12] Weng, Y., & Wu, J. (2024). Fortifying the global data fortress: a multidimensional examination of cyber security indexes and data protection measures across 193 nations. International Journal of Frontiers in Engineering Technology, 6(2), 13-28.", + "[13] Ji, Y., Ma, W., Sivarajkumar, S., Zhang, H., Sadhu, E. M., Li, Z., ... & Wang, Y. (2024). Mitigating the risk of health inequity exacerbated by large language models. arXiv preprint arXiv:2410.05180.", + "[14] Hu, W., Hu, Y., Stas, M., & Farrell, J. A. (2024). Optimization-based outlier accommodation for tightly coupled rtk-aided inertial navigation systems in urban environments. arXiv preprint arXiv:2407.13912.", + "[15] Ji, Y., Li, Z., Meng, R., Sivarajkumar, S., Wang, Y., Yu, Z., ... & He, D. (2024). Rag-rlrc-laysum at biolaysum: Integrating retrieval-augmented generation and readability control for layman summarization of biomedical texts. arXiv preprint arXiv:2405.13179.", + "[16] Hu, W., Neupane, A., & Farrell, J. A. (2022). Using PPP information to implement a global real-time virtual network DGNSS approach. IEEE Transactions on Vehicular Technology, 71(10), 10337-10349.", + "[17] Dan, H. C., Huang, Z., Lu, B., & Li, M. (2024). Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework. Construction and Building Materials, 453, 139056.", + "[18] Ding, T., & Xiang, D. (2024). Irregularity Inspection using Neural Radiance Field. arXiv preprint arXiv:2408.11251.", + "[19] Qiao, G., Liu, G., Poupart, P., & Xu, Z. (2023). Multi-modal inverse constrained reinforcement learning from a mixture of demonstrations. Advances in Neural Information Processing Systems, 36, 60384-60396.", + "[20] Li, Z., Wang, B., & Chen, Y. (2024). Knowledge Graph Embedding and Few-Shot Relational Learning Methods for Digital Assets in USA. Journal of Industrial Engineering and Applied Science, 2(5), 10-18.", + "[21] Dan, H. C., Lu, B., & Li, M. (2024). Evaluation of asphalt pavement texture using multiview stereo reconstruction based on deep learning. Construction and Building Materials, 412, 134837.", + "[22] Li, Z., Bookbinder, J. H., & Elhedhli, S. (2012). Optimal shipment decisions for an airfreight forwarder: Formulation and solution methods. Transportation Research Part C: Emerging Technologies, 21(1), 17-30.", + "[23] Qiao, G., Jiang, H., & Min, Y. (2022, May). Research on Vehicle Distance Recognition System Based on Machine Learning and OpenCV. In 2022 IEEE 2nd International Conference on Electronic Technology, Communication and Information (ICETCI) (pp. 334-337). IEEE." + ], + "bbox": [ + 511, + 66, + 926, + 877 + ], + "page_idx": 3 + } +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_model.json b/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_model.json new file mode 100644 index 0000000000000000000000000000000000000000..2e6843ee872858decf8ce24c4c250673e410f544 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_model.json @@ -0,0 +1,1242 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.084, + 0.069, + 0.916, + 0.138 + ], + "angle": 0, + "content": "Optimized Path Planning for Logistics Robots Using Ant Colony Algorithm under Multiple Constraints" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.145, + 0.317, + 0.159 + ], + "angle": 0, + "content": "Haopeng Zhao" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.161, + 0.343, + 0.174 + ], + "angle": 0, + "content": "Independent Researcher" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.177, + 0.315, + 0.191 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.193, + 0.366, + 0.206 + ], + "angle": 0, + "content": "haopeng.zhao1894@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.223, + 0.306, + 0.237 + ], + "angle": 0, + "content": "Lipeng Liu" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.239, + 0.325, + 0.253 + ], + "angle": 0, + "content": "Peking University" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.255, + 0.316, + 0.269 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.271, + 0.345, + 0.284 + ], + "angle": 0, + "content": "pengpengpu@163.com" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.301, + 0.312, + 0.315 + ], + "angle": 0, + "content": "Zheyu Zhang" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.317, + 0.343, + 0.33 + ], + "angle": 0, + "content": "Independent Researcher" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.332, + 0.316, + 0.347 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.349, + 0.346, + 0.362 + ], + "angle": 0, + "content": "zheyuz2980@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.364, + 0.487, + 0.574 + ], + "angle": 0, + "content": "Abstract—With the rapid development of the logistics industry, the path planning of logistics vehicles has become increasingly complex, requiring consideration of multiple constraints such as time windows, task sequencing, and motion smoothness. Traditional path planning methods often struggle to balance these competing demands efficiently. In this paper, we propose a path planning technique based on the Ant Colony Optimization (ACO) algorithm to address these challenges. The proposed method optimizes key performance metrics, including path length, task completion time, turning counts, and motion smoothness, to ensure efficient and practical route planning for logistics vehicles. Experimental results demonstrate that the ACO-based approach outperforms traditional methods in terms of both efficiency and adaptability. This study provides a robust solution for logistics vehicle path planning, offering significant potential for real-world applications in dynamic and constrained environments." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.585, + 0.385, + 0.6 + ], + "angle": 0, + "content": "Keywords- ant colony algorithm; path planning" + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.617, + 0.341, + 0.63 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.635, + 0.487, + 0.775 + ], + "angle": 0, + "content": "The logistics industry plays an important role in the modern economy, and efficient transportation is the cornerstone of its success[1]. However, the path planning of logistics vehicles faces many challenges, especially in urban environments. Logistics vehicles must not only complete tasks within the specified time window[2][3], but also optimize the task sequence to ensure a smooth and energy-efficient route. Traditional path planning methods often fail to fully meet these multi-faceted requirements, resulting in the inability to obtain the optimal solution in real-world scenarios[4][5][6]." + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.78, + 0.49, + 0.907 + ], + "angle": 0, + "content": "In modern urban environments, the path planning problem of logistics vehicles becomes more complicated[7]. In addition to time windows and task sequencing constraints, factors such as traffic congestion, road restrictions, vehicle load, and environmental conditions (such as weather, road conditions, etc.) need to be considered[8]. These factors make it difficult for traditional path planning algorithms to provide effective solutions in dynamic and uncertain environments[9][10]. For example, although the Dijkstra algorithm and the \\( \\mathrm{A}^* \\) algorithm" + }, + { + "type": "text", + "bbox": [ + 0.689, + 0.145, + 0.766, + 0.158 + ], + "angle": 0, + "content": "Zhichao Ma" + }, + { + "type": "text", + "bbox": [ + 0.655, + 0.161, + 0.8, + 0.175 + ], + "angle": 0, + "content": "Independent Researcher" + }, + { + "type": "text", + "bbox": [ + 0.676, + 0.177, + 0.778, + 0.191 + ], + "angle": 0, + "content": "Shanghai, China" + }, + { + "type": "text", + "bbox": [ + 0.638, + 0.193, + 0.817, + 0.206 + ], + "angle": 0, + "content": "ma.zhi.chao.max@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.69, + 0.223, + 0.765, + 0.237 + ], + "angle": 0, + "content": "Yang Wang" + }, + { + "type": "text", + "bbox": [ + 0.655, + 0.239, + 0.8, + 0.253 + ], + "angle": 0, + "content": "Independent Researcher" + }, + { + "type": "text", + "bbox": [ + 0.683, + 0.255, + 0.772, + 0.269 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.646, + 0.27, + 0.81, + 0.284 + ], + "angle": 0, + "content": "wangyrick102@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.301, + 0.757, + 0.314 + ], + "angle": 0, + "content": "Hao Liu*" + }, + { + "type": "text", + "bbox": [ + 0.655, + 0.317, + 0.8, + 0.33 + ], + "angle": 0, + "content": "Independent Researcher" + }, + { + "type": "text", + "bbox": [ + 0.683, + 0.332, + 0.772, + 0.347 + ], + "angle": 0, + "content": "Beijing, China" + }, + { + "type": "text", + "bbox": [ + 0.662, + 0.349, + 0.794, + 0.362 + ], + "angle": 0, + "content": "modiy.lu@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.363, + 0.929, + 0.544 + ], + "angle": 0, + "content": "perform well in static environments, they often cannot quickly adapt to real-time traffic changes, resulting in low efficiency[11][12][13]. The ant colony optimization algorithm can effectively find the optimal path in a complex environment by simulating the behavior of ant colonies releasing pheromones during foraging[14][15]. The distributed computing characteristics of ACO enable it to process multiple path selection problems in parallel, so that high-quality solutions can be found in a short time[16][17][18]. In addition, the adaptability of ACO enables it to dynamically adjust the path planning strategy according to environmental changes, making it particularly effective in dealing with dynamic and uncertain problems[19][20][21]." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.55, + 0.929, + 0.786 + ], + "angle": 0, + "content": "Logistics vehicle path planning usually involves multiple optimization objectives, such as minimizing driving distance, reducing task completion time, reducing energy consumption, and improving path smoothness. These objectives often conflict with each other[22][23]. For example, the shortest path may require more turns, resulting in increased energy consumption and longer driving time. Therefore, finding a balance between these objectives is an important research challenge. This paper adopts the ACO algorithm combined with a multi-objective optimization strategy to consider multiple optimization objectives and find the path with the best overall performance. In order to verify the effectiveness of the proposed method, we designed a series of experiments to simulate the logistics vehicle path planning problem in different urban environments. The experimental results show that the path planning method based on ACO outperforms the traditional algorithm in multiple performance indicators." + }, + { + "type": "title", + "bbox": [ + 0.649, + 0.803, + 0.783, + 0.817 + ], + "angle": 0, + "content": "II. METHODOLOGY" + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.83, + 0.928, + 0.86 + ], + "angle": 0, + "content": "A. Modeling Methods for Complex Constraints in Path Planning" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.863, + 0.929, + 0.892 + ], + "angle": 0, + "content": "Logistics robots face stringent time window constraints when executing tasks. Each task is associated with a specific" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.067, + 0.07, + 0.488, + 0.12 + ], + "angle": 0, + "content": "time window \\([t_{\\mathrm{start}}, t_{\\mathrm{end}}]\\), within which the robot must arrive at the designated location to complete the task. These time constraints introduce the following challenges in path planning:" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.124, + 0.487, + 0.224 + ], + "angle": 0, + "content": "Time window conflict: When the time windows of multiple tasks overlap, the robot must intelligently adjust the order of task execution to ensure that all tasks are completed within the specified time. Such conflicts may cause task delays or reduced system efficiency. For example, in a high-density warehouse environment, overlapping time windows can significantly affect the overall throughput of the logistics system." + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.228, + 0.49, + 0.327 + ], + "angle": 0, + "content": "Path efficiency optimization: While meeting time constraints, the robot must choose the optimal path to minimize the travel distance and time cost. This involves not only path planning in a static environment, but also dynamic obstacle avoidance. Efficient path planning is critical to reducing energy consumption and improving the operational efficiency of the robot fleet." + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.333, + 0.49, + 0.432 + ], + "angle": 0, + "content": "Dynamic adjustment capability: In the actual operating environment, the robot must respond to unexpected situations in real time and dynamically adjust its path planning strategy. This capability places higher requirements on the robustness and flexibility of the system. For example, in a dynamic warehouse environment, the robot must quickly replan its route to avoid collisions or adapt to new tasks without affecting the overall plan." + }, + { + "type": "title", + "bbox": [ + 0.068, + 0.437, + 0.281, + 0.453 + ], + "angle": 0, + "content": "Minimization of Path Length:" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.461, + 0.487, + 0.502 + ], + "angle": 0, + "content": "\\[\nf _ {1} = \\min \\sum_ {i = 1} ^ {n - 1} d \\left(p _ {i}, p _ {i + 1}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.511, + 0.488, + 0.61 + ], + "angle": 0, + "content": "where \\( d(p_i, p_{i+1}) \\) represents the Euclidean distance between consecutive path points \\( p_i \\) and \\( p_{i+1} \\). Optimizing path length directly impacts the robot's energy consumption and task completion efficiency. Shorter paths reduce travel time and energy usage, which is particularly important in large-scale logistics operations." + }, + { + "type": "title", + "bbox": [ + 0.072, + 0.615, + 0.358, + 0.63 + ], + "angle": 0, + "content": "Minimization of Task Completion Time:" + }, + { + "type": "equation", + "bbox": [ + 0.201, + 0.639, + 0.486, + 0.657 + ], + "angle": 0, + "content": "\\[\nf _ {2} = \\min \\max _ {1 \\leq i \\leq n} t _ {i} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.669, + 0.488, + 0.745 + ], + "angle": 0, + "content": "where \\( t_i \\) denotes the completion time of the \\( i - th \\) task. This objective ensures that the robot can complete all tasks in the shortest possible time, thereby improving system throughput. Minimizing the maximum completion time is critical for meeting tight delivery schedules in time-sensitive applications." + }, + { + "type": "title", + "bbox": [ + 0.068, + 0.751, + 0.307, + 0.766 + ], + "angle": 0, + "content": "Minimization of Turning Counts:" + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.774, + 0.486, + 0.814 + ], + "angle": 0, + "content": "\\[\nf _ {3} = \\min \\sum_ {i = 2} ^ {n - 1} \\delta \\left(\\theta_ {i}\\right) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.824, + 0.489, + 0.907 + ], + "angle": 0, + "content": "where \\(\\theta_{i}\\) represents the turning angle at the \\(i - th\\) path point, and \\(\\delta (\\cdot)\\) is an indicator function that takes the value 0, and if the turning angle exceeds a threshold, the value is equal to 1. Reducing turning counts helps improve the robot's motion efficiency and reduces mechanical wear. This is especially" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.068, + 0.928, + 0.097 + ], + "angle": 0, + "content": "important in environments with narrow aisles or limited maneuvering space." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.103, + 0.771, + 0.118 + ], + "angle": 0, + "content": "Optimization of Motion Smoothness:" + }, + { + "type": "equation", + "bbox": [ + 0.636, + 0.126, + 0.928, + 0.166 + ], + "angle": 0, + "content": "\\[\nf _ {4} = \\min \\sum_ {i = 2} ^ {n - 1} \\left| \\theta_ {i} - \\theta_ {i - 1} \\right| \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.174, + 0.929, + 0.245 + ], + "angle": 0, + "content": "This objective function ensures smooth motion trajectories, reducing energy loss and mechanical wear caused by sharp turns, while enhancing operational stability. Smooth trajectories also contribute to safer and more predictable robot movements, which is essential in environments shared with human workers." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.25, + 0.931, + 0.39 + ], + "angle": 0, + "content": "By establishing a multi-objective optimization model and employing appropriate optimization algorithm, an optimal or near-optimal path planning solution can be found under the constraints, thereby improving the overall efficiency of the logistics system. Additionally, this study considers the robot's dynamic constraints and environmental uncertainties, further enhancing the practicality and robustness of the algorithm. For instance, the algorithm incorporates real-time sensor data to adapt to changing environmental conditions, ensuring reliable performance in complex operational scenarios." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.404, + 0.929, + 0.434 + ], + "angle": 0, + "content": "B. Multi-Constraint Path Planning Based on Ant Colony Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.436, + 0.93, + 0.562 + ], + "angle": 0, + "content": "The ant colony optimization algorithm is a heuristic optimization algorithm that simulates the foraging behavior of ants. It gradually finds the optimal path by imitating the behavior of ants releasing pheromones in the process of looking for food. This paper combines the ant colony algorithm with the time window constraints and multi-objective optimization constraints in the path planning of logistics robots, and proposes an ant colony algorithm to solve the path planning problem in complex environments." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.568, + 0.929, + 0.667 + ], + "angle": 0, + "content": "Based on the traditional ant colony algorithm, this paper introduces time window constraints and multi-objective optimization constraints to improve the pheromone update mechanism and heuristic function design. Pheromone update not only considers the path length, but also combines factors such as task completion time, number of turns, and motion stability. The pheromone update formula is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.595, + 0.675, + 0.928, + 0.695 + ], + "angle": 0, + "content": "\\[\nt a u _ {i j} (t + 1) = (1 - \\rho) \\cdot \\tau_ {i j} (t) + \\Delta \\tau_ {i j} \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.708, + 0.929, + 0.794 + ], + "angle": 0, + "content": "where \\(\\tau_{ij}(t)\\) represents the pheromone concentration on path \\((i,j)\\), \\(\\rho\\) is the pheromone evaporation coefficient, and \\(\\Delta \\tau_{ij}\\) is the pheromone increment, which is calculated by comprehensively considering path length, task completion time, and motion smoothness." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.8, + 0.929, + 0.83 + ], + "angle": 0, + "content": "The heuristic function is used to guide ants in selecting the next path point. The heuristic function is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.614, + 0.838, + 0.928, + 0.88 + ], + "angle": 0, + "content": "\\[\ne t a _ {i j} = \\frac {1}{d _ {i j}} \\cdot \\frac {1}{1 + \\alpha \\cdot \\text {p e n a l t y} _ {i j}} \\tag {6}\n\\]" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.068, + 0.071, + 0.487, + 0.162 + ], + "angle": 0, + "content": "where \\( d_{ij} \\) is the distance of path \\( (i,j) \\), penalty \\( _{ij} \\) is the turning penalty term, and \\( \\alpha \\) is a weight coefficient used to balance path length and turning counts. When selecting the next path point, ants use a probabilistic selection strategy that considers both pheromone concentration and the heuristic function. The path selection probability formula is as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.171, + 0.486, + 0.215 + ], + "angle": 0, + "content": "\\[\np _ {i j} ^ {k} = \\frac {\\left[ \\tau_ {i j} \\right] ^ {\\beta} \\left[ \\eta_ {i j} \\right] ^ {\\gamma}}{\\sum_ {l \\in \\text {a l l o w e d} _ {k}} \\left[ \\tau_ {i l} \\right] ^ {\\beta} \\left[ \\eta_ {i l} \\right] ^ {\\gamma}} \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.223, + 0.487, + 0.28 + ], + "angle": 0, + "content": "where \\(\\beta\\) and \\(\\gamma\\) are the weight coefficients for pheromone and heuristic function, respectively, and allowed\\(_k\\) represents the set of nodes that ant \\(k\\) can choose from at the current node." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.288, + 0.487, + 0.455 + ], + "angle": 0, + "content": "During the path planning process, the ant needs to check the time window constraints of each task. If the selection of a certain path point will cause the task to be unable to be completed within the specified time, then the path point will be excluded from the optional path points. By dynamically adjusting the path selection strategy, all time window constraints are met. In addition, this paper adopts the weighted summation method to transform the multi-objective optimization problem into a single-objective optimization problem. By adjusting the weight coefficient of each objective function, a balance is achieved between the path length, task completion time, number of turns, and motion smoothness. The objective function is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.14, + 0.463, + 0.486, + 0.482 + ], + "angle": 0, + "content": "\\[\nF = w _ {1} \\cdot f _ {1} + w _ {2} \\cdot f _ {2} + w _ {3} \\cdot f _ {3} + w _ {4} \\cdot f _ {4} \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.494, + 0.487, + 0.533 + ], + "angle": 0, + "content": "where \\( w_{1}, w_{2}, w_{3}, w_{4} \\) are the weight coefficients for each objective function, satisfying \\( w_{1} + w_{2} + w_{3} + w_{4} = 1 \\)." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.542, + 0.488, + 0.736 + ], + "angle": 0, + "content": "The algorithm flow mainly includes four steps: initialization, path construction, pheromone update and iterative optimization. First, the pheromone concentration, heuristic function parameters and starting position of the ants are initialized, and the maximum number of iterations and the number of ants are set. Then each ant gradually builds the path according to the path selection strategy, while checking the time window constraint and multi-objective optimization constraint. After all ants complete the path construction, the pheromone concentration is updated according to the path quality. The higher the path quality, the more pheromone increments. Finally, the path construction and pheromone update process are repeated until the maximum number of iterations is reached or the optimal path that meets the constraints is found." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.743, + 0.487, + 0.882 + ], + "angle": 0, + "content": "In order to verify the effectiveness of the ant colony algorithm, experiments were carried out in a simulated warehouse environment. The results show that compared with the traditional ant colony algorithm, the proposed algorithm significantly reduces the path length, the number of turns and the task completion time while meeting the time window constraint and improving the smoothness of the movement. In addition, the robustness of the algorithm in a dynamic environment has been verified, proving that it can effectively handle unexpected situations." + }, + { + "type": "title", + "bbox": [ + 0.652, + 0.069, + 0.776, + 0.081 + ], + "angle": 0, + "content": "III. EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.087, + 0.928, + 0.184 + ], + "angle": 0, + "content": "To validate the effectiveness of the proposed Ant Colony Optimization algorithm for path planning in logistics robots, simulation experiments were designed and compared with classical algorithms in various scenarios. The experimental environment was built on the Gazebo and ROS platforms, simulating real-world warehouse or factory settings with static obstacles (e.g., shelves, walls). The map size was set to" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.191, + 0.928, + 0.239 + ], + "angle": 0, + "content": "\\(20\\mathrm{m} \\times 20\\mathrm{m}\\) with a resolution of \\(0.1\\mathrm{m}\\), and the number of task points ranged from 5 to 20, each associated with a time window \\([t_{\\mathrm{start}}, t_{\\mathrm{end}}]\\) randomly distributed between [5s,30s]." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.248, + 0.929, + 0.388 + ], + "angle": 0, + "content": "The comparison methods included the Ant Colony Optimization, \\(\\mathbf{A}^*\\), and Genetic Algorithm (GA), Particle Swarm Optimization(PSO), RRT*, Hybrid \\(\\mathbf{A}^*\\). The evaluation metrics covered path length, task completion time, turning counts, and motion smoothness. Path length was measured by the total travel distance from the start to the end point; task completion time was the total time for the robot to complete all tasks; turning counts were the total number of turns in the path; and motion smoothness was evaluated by the standard deviation of path curvature, with smaller values indicating smoother paths." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.394, + 0.928, + 0.436 + ], + "angle": 0, + "content": "The experiments were conducted in a static environment, and each algorithm was run 50 times for each map complexity level, with the average values taken as the results." + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.445, + 0.918, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.604, + 0.557, + 0.834, + 0.571 + ], + "angle": 0, + "content": "Fig 1. The trajectory of the ACO algorithm." + }, + { + "type": "image", + "bbox": [ + 0.591, + 0.588, + 0.836, + 0.733 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.551, + 0.745, + 0.885, + 0.758 + ], + "angle": 0, + "content": "Fig 2. Performance of different algorithms after 1000 iterations." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.769, + 0.929, + 0.893 + ], + "angle": 0, + "content": "Fig. 1 shows the performance of ACO algorithm. Since the vehicle turning restriction is added to the optimization target, the vehicle can avoid unnecessary turns. Moreover, when executing different mission objectives, although there are many obstacles between different mission locations, our method can always quickly reach different mission locations without collision. Fig. 2 shows the changes in path length of different intelligent optimization algorithms at 1000 iterations. It can be clearly seen that aco decreases faster and can obtain better results." + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.139, + 0.068, + 0.417, + 0.079 + ], + "angle": 0, + "content": "TABLE I. PERFORMANCE OF PATH PLANNING" + }, + { + "type": "table", + "bbox": [ + 0.09, + 0.085, + 0.467, + 0.228 + ], + "angle": 0, + "content": "
MethodLength(m)Time(s)Truning(rad)Smoothness(rad)
A*13490.120.40140.2269
GA14553.780.36650.2967
PSO17342.370.59340.1571
RRT*16274.610.48870.2094
Hybrid A*12640.750.33160.192
ACO12010.640.27930.1222
" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.23, + 0.487, + 0.329 + ], + "angle": 0, + "content": "Table I shows that ACO outperforms the comparison algorithms in terms of path length, task completion time, and number of turns. In both simple and complex maps, ACO achieves the shortest path length, the least path planning time, and significantly reduces the number of turns by optimizing the turn penalty. In addition, ACO has the smallest standard deviation of path curvature, indicating the smoothest path." + }, + { + "type": "table_caption", + "bbox": [ + 0.131, + 0.343, + 0.424, + 0.354 + ], + "angle": 0, + "content": "TABLE II. PERFORMANCE OF TASK COMPLETION" + }, + { + "type": "table", + "bbox": [ + 0.131, + 0.359, + 0.425, + 0.442 + ], + "angle": 0, + "content": "
MethodTask completion(%)Time(s)
PSO753.37
GA734.61
ACO913.64
" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.443, + 0.487, + 0.498 + ], + "angle": 0, + "content": "Table II shows the task completion of ACO. The results show that, compared with PSO and other methods, ACO is significantly better than other methods in task completion and consumes less time." + }, + { + "type": "title", + "bbox": [ + 0.209, + 0.517, + 0.335, + 0.53 + ], + "angle": 0, + "content": "IV. CONCLUSIONS" + }, + { + "type": "text", + "bbox": [ + 0.067, + 0.536, + 0.489, + 0.73 + ], + "angle": 0, + "content": "In this paper, we applied the ACO algorithm to path planning, focusing on optimizing four key performance metrics: Path Length, Task Completion Time, Turning Counts, and Motion Smoothness. The proposed method was extensively evaluated and compared with several existing path planning algorithms. Experimental results demonstrate that the ACO-based approach significantly outperforms the compared methods in terms of efficiency, smoothness, and overall performance. The optimized path planning solution not only reduces travel distance and task completion time but also minimizes unnecessary turns and enhances motion smoothness, making it particularly suitable for real-world applications such as logistics vehicles. Future work will focus on adapting the algorithm for dynamic environments and integrating it into larger-scale logistics systems." + }, + { + "type": "title", + "bbox": [ + 0.234, + 0.748, + 0.321, + 0.76 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.07, + 0.78, + 0.486, + 0.815 + ], + "angle": 0, + "content": "[1] Li, G., Liu, C., Wu, L., & Xiao, W. (2023). A mixing algorithm of ACO and ABC for solving path planning of mobile robot. Applied Soft Computing, 148, 110868." + }, + { + "type": "ref_text", + "bbox": [ + 0.07, + 0.818, + 0.485, + 0.852 + ], + "angle": 0, + "content": "[2] Xing, J., Xing, R., Xue, C., & Luo, D. (2024). Enhancing Link Prediction with Fuzzy Graph Attention Networks and Dynamic Negative Sampling. arXiv preprint arXiv:2411.07482." + }, + { + "type": "ref_text", + "bbox": [ + 0.07, + 0.855, + 0.485, + 0.89 + ], + "angle": 0, + "content": "[3] Zhou, T., & Wei, W. (2024). Mobile robot path planning based on an improved ACO algorithm and path optimization. Multimedia Tools and Applications, 1-24." + }, + { + "type": "list", + "bbox": [ + 0.07, + 0.78, + 0.486, + 0.89 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.068, + 0.928, + 0.114 + ], + "angle": 0, + "content": "[4] Chen, L., Su, Y., Zhang, D., Leng, Z., Qi, Y., & Jiang, K. (2021, May). Research on path planning for mobile robots based on improved ACO. In 2021 36th Youth Academic Annual Conference of Chinese Association of Automation (YAC) (pp. 379-383). IEEE." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.117, + 0.928, + 0.151 + ], + "angle": 0, + "content": "[5] Ke, Z., Zhou, S., Zhou, Y., Chang, C. H., & Zhang, R. (2025). Detection of ai deepfake and fraud in online payments using gan-based models. arXiv preprint arXiv:2501.07033." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.154, + 0.928, + 0.199 + ], + "angle": 0, + "content": "[6] Ke, Z., & Yin, Y. (2024, November). Tail risk alert based on conditional autoregressive var by regression quantiles and machine learning algorithms. In 2024 5th International Conference on Artificial Intelligence and Computer Engineering (ICAICE) (pp. 527-532). IEEE." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.202, + 0.928, + 0.236 + ], + "angle": 0, + "content": "[7] Xing, J., Luo, D., Cheng, Q., Xue, C., & Xing, R. (2024). Multi-view Fuzzy Graph Attention Networks for Enhanced Graph Learning. arXiv preprint arXiv:2412.17271." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.239, + 0.928, + 0.284 + ], + "angle": 0, + "content": "[8] Zhao, J., & Penn, G. (2025, January). Inside-Outside Algorithm for Probabilistic Product-Free Lambek Categorical Grammar. In Proceedings of the 31st International Conference on Computational Linguistics (pp. 8295-8303)." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.287, + 0.928, + 0.333 + ], + "angle": 0, + "content": "[9] He, L., Ka, D. H., Ehtesham-Ul-Haque, M., Billah, S. M., & Tehranchi, F. (2023, December). Cognitive models for abacus gesture learning. In Proceedings of the Annual Meeting of the Cognitive Science Society (Vol. 46)." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.336, + 0.928, + 0.371 + ], + "angle": 0, + "content": "[10] Zhao, J., & Penn, G. (2024, November). LLM-supertagger: Categorical Grammar Supertagging via Large Language Models. In Findings of the Association for Computational Linguistics: EMNLP 2024 (pp. 697-705)." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.374, + 0.928, + 0.397 + ], + "angle": 0, + "content": "[11] Li, G. C., He, L., & Fleming, L. (2023). Philanthropic supported innovation: trends, areas, and impact. Scientometrics, 128(10), 5507-5520" + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.4, + 0.928, + 0.445 + ], + "angle": 0, + "content": "[12] Weng, Y., & Wu, J. (2024). Fortifying the global data fortress: a multidimensional examination of cyber security indexes and data protection measures across 193 nations. International Journal of Frontiers in Engineering Technology, 6(2), 13-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.448, + 0.928, + 0.483 + ], + "angle": 0, + "content": "[13] Ji, Y., Ma, W., Sivarajkumar, S., Zhang, H., Sadhu, E. M., Li, Z., ... & Wang, Y. (2024). Mitigating the risk of health inequity exacerbated by large language models. arXiv preprint arXiv:2410.05180." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.486, + 0.928, + 0.52 + ], + "angle": 0, + "content": "[14] Hu, W., Hu, Y., Stas, M., & Farrell, J. A. (2024). Optimization-based outlier accommodation for tightly coupled rtk-aided inertial navigation systems in urban environments. arXiv preprint arXiv:2407.13912." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.523, + 0.928, + 0.569 + ], + "angle": 0, + "content": "[15] Ji, Y., Li, Z., Meng, R., Sivarajkumar, S., Wang, Y., Yu, Z., ... & He, D. (2024). Rag-rlrc-laysum at biolaysum: Integrating retrieval-augmented generation and readability control for layman summarization of biomedical texts. arXiv preprint arXiv:2405.13179." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.572, + 0.928, + 0.606 + ], + "angle": 0, + "content": "[16] Hu, W., Neupane, A., & Farrell, J. A. (2022). Using PPP information to implement a global real-time virtual network DGNSS approach. IEEE Transactions on Vehicular Technology, 71(10), 10337-10349." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.609, + 0.928, + 0.654 + ], + "angle": 0, + "content": "[17] Dan, H. C., Huang, Z., Lu, B., & Li, M. (2024). Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework. Construction and Building Materials, 453, 139056." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.657, + 0.928, + 0.68 + ], + "angle": 0, + "content": "[18] Ding, T., & Xiang, D. (2024). Irregularity Inspection using Neural Radiance Field. arXiv preprint arXiv:2408.11251." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.684, + 0.928, + 0.718 + ], + "angle": 0, + "content": "[19] Qiao, G., Liu, G., Poupart, P., & Xu, Z. (2023). Multi-modal inverse constrained reinforcement learning from a mixture of demonstrations. Advances in Neural Information Processing Systems, 36, 60384-60396." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.721, + 0.928, + 0.755 + ], + "angle": 0, + "content": "[20] Li, Z., Wang, B., & Chen, Y. (2024). Knowledge Graph Embedding and Few-Shot Relational Learning Methods for Digital Assets in USA. Journal of Industrial Engineering and Applied Science, 2(5), 10-18." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.758, + 0.928, + 0.792 + ], + "angle": 0, + "content": "[21] Dan, H. C., Lu, B., & Li, M. (2024). Evaluation of asphalt pavement texture using multiview stereo reconstruction based on deep learning. Construction and Building Materials, 412, 134837." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.796, + 0.928, + 0.83 + ], + "angle": 0, + "content": "[22] Li, Z., Bookbinder, J. H., & Elhedhli, S. (2012). Optimal shipment decisions for an airfreight forwarder: Formulation and solution methods. Transportation Research Part C: Emerging Technologies, 21(1), 17-30." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.833, + 0.928, + 0.878 + ], + "angle": 0, + "content": "[23] Qiao, G., Jiang, H., & Min, Y. (2022, May). Research on Vehicle Distance Recognition System Based on Machine Learning and OpenCV. In 2022 IEEE 2nd International Conference on Electronic Technology, Communication and Information (ICETCI) (pp. 334-337). IEEE." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.068, + 0.928, + 0.878 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_origin.pdf b/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..32c5bc152ef393602c80652736391494aeb08690 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/506e32fa-2397-46f0-a31a-fdf0b6768185_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8257c6b897c546ca327569782a962d50c408e6c5bf2240acd7723721e44ee3c6 +size 187733 diff --git a/data/2025/2504_05xxx/2504.05339/full.md b/data/2025/2504_05xxx/2504.05339/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3b7b19e36a5f2b2e29b86f886c4cc7ca1a45f845 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/full.md @@ -0,0 +1,212 @@ +# Optimized Path Planning for Logistics Robots Using Ant Colony Algorithm under Multiple Constraints + +Haopeng Zhao + +Independent Researcher + +Beijing, China + +haopeng.zhao1894@gmail.com + +Lipeng Liu + +Peking University + +Beijing, China + +pengpengpu@163.com + +Zheyu Zhang + +Independent Researcher + +Beijing, China + +zheyuz2980@gmail.com + +Abstract—With the rapid development of the logistics industry, the path planning of logistics vehicles has become increasingly complex, requiring consideration of multiple constraints such as time windows, task sequencing, and motion smoothness. Traditional path planning methods often struggle to balance these competing demands efficiently. In this paper, we propose a path planning technique based on the Ant Colony Optimization (ACO) algorithm to address these challenges. The proposed method optimizes key performance metrics, including path length, task completion time, turning counts, and motion smoothness, to ensure efficient and practical route planning for logistics vehicles. Experimental results demonstrate that the ACO-based approach outperforms traditional methods in terms of both efficiency and adaptability. This study provides a robust solution for logistics vehicle path planning, offering significant potential for real-world applications in dynamic and constrained environments. + +# Keywords- ant colony algorithm; path planning + +# I. INTRODUCTION + +The logistics industry plays an important role in the modern economy, and efficient transportation is the cornerstone of its success[1]. However, the path planning of logistics vehicles faces many challenges, especially in urban environments. Logistics vehicles must not only complete tasks within the specified time window[2][3], but also optimize the task sequence to ensure a smooth and energy-efficient route. Traditional path planning methods often fail to fully meet these multi-faceted requirements, resulting in the inability to obtain the optimal solution in real-world scenarios[4][5][6]. + +In modern urban environments, the path planning problem of logistics vehicles becomes more complicated[7]. In addition to time windows and task sequencing constraints, factors such as traffic congestion, road restrictions, vehicle load, and environmental conditions (such as weather, road conditions, etc.) need to be considered[8]. These factors make it difficult for traditional path planning algorithms to provide effective solutions in dynamic and uncertain environments[9][10]. For example, although the Dijkstra algorithm and the $\mathrm{A}^*$ algorithm + +Zhichao Ma + +Independent Researcher + +Shanghai, China + +ma.zhi.chao.max@gmail.com + +Yang Wang + +Independent Researcher + +Beijing, China + +wangyrick102@gmail.com + +Hao Liu* + +Independent Researcher + +Beijing, China + +modiy.lu@gmail.com + +perform well in static environments, they often cannot quickly adapt to real-time traffic changes, resulting in low efficiency[11][12][13]. The ant colony optimization algorithm can effectively find the optimal path in a complex environment by simulating the behavior of ant colonies releasing pheromones during foraging[14][15]. The distributed computing characteristics of ACO enable it to process multiple path selection problems in parallel, so that high-quality solutions can be found in a short time[16][17][18]. In addition, the adaptability of ACO enables it to dynamically adjust the path planning strategy according to environmental changes, making it particularly effective in dealing with dynamic and uncertain problems[19][20][21]. + +Logistics vehicle path planning usually involves multiple optimization objectives, such as minimizing driving distance, reducing task completion time, reducing energy consumption, and improving path smoothness. These objectives often conflict with each other[22][23]. For example, the shortest path may require more turns, resulting in increased energy consumption and longer driving time. Therefore, finding a balance between these objectives is an important research challenge. This paper adopts the ACO algorithm combined with a multi-objective optimization strategy to consider multiple optimization objectives and find the path with the best overall performance. In order to verify the effectiveness of the proposed method, we designed a series of experiments to simulate the logistics vehicle path planning problem in different urban environments. The experimental results show that the path planning method based on ACO outperforms the traditional algorithm in multiple performance indicators. + +# II. METHODOLOGY + +# A. Modeling Methods for Complex Constraints in Path Planning + +Logistics robots face stringent time window constraints when executing tasks. Each task is associated with a specific + +time window $[t_{\mathrm{start}}, t_{\mathrm{end}}]$ , within which the robot must arrive at the designated location to complete the task. These time constraints introduce the following challenges in path planning: + +Time window conflict: When the time windows of multiple tasks overlap, the robot must intelligently adjust the order of task execution to ensure that all tasks are completed within the specified time. Such conflicts may cause task delays or reduced system efficiency. For example, in a high-density warehouse environment, overlapping time windows can significantly affect the overall throughput of the logistics system. + +Path efficiency optimization: While meeting time constraints, the robot must choose the optimal path to minimize the travel distance and time cost. This involves not only path planning in a static environment, but also dynamic obstacle avoidance. Efficient path planning is critical to reducing energy consumption and improving the operational efficiency of the robot fleet. + +Dynamic adjustment capability: In the actual operating environment, the robot must respond to unexpected situations in real time and dynamically adjust its path planning strategy. This capability places higher requirements on the robustness and flexibility of the system. For example, in a dynamic warehouse environment, the robot must quickly replan its route to avoid collisions or adapt to new tasks without affecting the overall plan. + +# Minimization of Path Length: + +$$ +f _ {1} = \min \sum_ {i = 1} ^ {n - 1} d \left(p _ {i}, p _ {i + 1}\right) \tag {1} +$$ + +where $d(p_i, p_{i+1})$ represents the Euclidean distance between consecutive path points $p_i$ and $p_{i+1}$ . Optimizing path length directly impacts the robot's energy consumption and task completion efficiency. Shorter paths reduce travel time and energy usage, which is particularly important in large-scale logistics operations. + +# Minimization of Task Completion Time: + +$$ +f _ {2} = \min \max _ {1 \leq i \leq n} t _ {i} \tag {2} +$$ + +where $t_i$ denotes the completion time of the $i - th$ task. This objective ensures that the robot can complete all tasks in the shortest possible time, thereby improving system throughput. Minimizing the maximum completion time is critical for meeting tight delivery schedules in time-sensitive applications. + +# Minimization of Turning Counts: + +$$ +f _ {3} = \min \sum_ {i = 2} ^ {n - 1} \delta \left(\theta_ {i}\right) \tag {3} +$$ + +where $\theta_{i}$ represents the turning angle at the $i - th$ path point, and $\delta (\cdot)$ is an indicator function that takes the value 0, and if the turning angle exceeds a threshold, the value is equal to 1. Reducing turning counts helps improve the robot's motion efficiency and reduces mechanical wear. This is especially + +important in environments with narrow aisles or limited maneuvering space. + +# Optimization of Motion Smoothness: + +$$ +f _ {4} = \min \sum_ {i = 2} ^ {n - 1} \left| \theta_ {i} - \theta_ {i - 1} \right| \tag {4} +$$ + +This objective function ensures smooth motion trajectories, reducing energy loss and mechanical wear caused by sharp turns, while enhancing operational stability. Smooth trajectories also contribute to safer and more predictable robot movements, which is essential in environments shared with human workers. + +By establishing a multi-objective optimization model and employing appropriate optimization algorithm, an optimal or near-optimal path planning solution can be found under the constraints, thereby improving the overall efficiency of the logistics system. Additionally, this study considers the robot's dynamic constraints and environmental uncertainties, further enhancing the practicality and robustness of the algorithm. For instance, the algorithm incorporates real-time sensor data to adapt to changing environmental conditions, ensuring reliable performance in complex operational scenarios. + +# B. Multi-Constraint Path Planning Based on Ant Colony Algorithm + +The ant colony optimization algorithm is a heuristic optimization algorithm that simulates the foraging behavior of ants. It gradually finds the optimal path by imitating the behavior of ants releasing pheromones in the process of looking for food. This paper combines the ant colony algorithm with the time window constraints and multi-objective optimization constraints in the path planning of logistics robots, and proposes an ant colony algorithm to solve the path planning problem in complex environments. + +Based on the traditional ant colony algorithm, this paper introduces time window constraints and multi-objective optimization constraints to improve the pheromone update mechanism and heuristic function design. Pheromone update not only considers the path length, but also combines factors such as task completion time, number of turns, and motion stability. The pheromone update formula is as follows: + +$$ +t a u _ {i j} (t + 1) = (1 - \rho) \cdot \tau_ {i j} (t) + \Delta \tau_ {i j} \tag {5} +$$ + +where $\tau_{ij}(t)$ represents the pheromone concentration on path $(i,j)$ , $\rho$ is the pheromone evaporation coefficient, and $\Delta \tau_{ij}$ is the pheromone increment, which is calculated by comprehensively considering path length, task completion time, and motion smoothness. + +The heuristic function is used to guide ants in selecting the next path point. The heuristic function is defined as follows: + +$$ +e t a _ {i j} = \frac {1}{d _ {i j}} \cdot \frac {1}{1 + \alpha \cdot \text {p e n a l t y} _ {i j}} \tag {6} +$$ + +where $d_{ij}$ is the distance of path $(i,j)$ , penalty $_{ij}$ is the turning penalty term, and $\alpha$ is a weight coefficient used to balance path length and turning counts. When selecting the next path point, ants use a probabilistic selection strategy that considers both pheromone concentration and the heuristic function. The path selection probability formula is as follows: + +$$ +p _ {i j} ^ {k} = \frac {\left[ \tau_ {i j} \right] ^ {\beta} \left[ \eta_ {i j} \right] ^ {\gamma}}{\sum_ {l \in \text {a l l o w e d} _ {k}} \left[ \tau_ {i l} \right] ^ {\beta} \left[ \eta_ {i l} \right] ^ {\gamma}} \tag {7} +$$ + +where $\beta$ and $\gamma$ are the weight coefficients for pheromone and heuristic function, respectively, and allowed $_k$ represents the set of nodes that ant $k$ can choose from at the current node. + +During the path planning process, the ant needs to check the time window constraints of each task. If the selection of a certain path point will cause the task to be unable to be completed within the specified time, then the path point will be excluded from the optional path points. By dynamically adjusting the path selection strategy, all time window constraints are met. In addition, this paper adopts the weighted summation method to transform the multi-objective optimization problem into a single-objective optimization problem. By adjusting the weight coefficient of each objective function, a balance is achieved between the path length, task completion time, number of turns, and motion smoothness. The objective function is defined as follows: + +$$ +F = w _ {1} \cdot f _ {1} + w _ {2} \cdot f _ {2} + w _ {3} \cdot f _ {3} + w _ {4} \cdot f _ {4} \tag {8} +$$ + +where $w_{1}, w_{2}, w_{3}, w_{4}$ are the weight coefficients for each objective function, satisfying $w_{1} + w_{2} + w_{3} + w_{4} = 1$ . + +The algorithm flow mainly includes four steps: initialization, path construction, pheromone update and iterative optimization. First, the pheromone concentration, heuristic function parameters and starting position of the ants are initialized, and the maximum number of iterations and the number of ants are set. Then each ant gradually builds the path according to the path selection strategy, while checking the time window constraint and multi-objective optimization constraint. After all ants complete the path construction, the pheromone concentration is updated according to the path quality. The higher the path quality, the more pheromone increments. Finally, the path construction and pheromone update process are repeated until the maximum number of iterations is reached or the optimal path that meets the constraints is found. + +In order to verify the effectiveness of the ant colony algorithm, experiments were carried out in a simulated warehouse environment. The results show that compared with the traditional ant colony algorithm, the proposed algorithm significantly reduces the path length, the number of turns and the task completion time while meeting the time window constraint and improving the smoothness of the movement. In addition, the robustness of the algorithm in a dynamic environment has been verified, proving that it can effectively handle unexpected situations. + +# III. EXPERIMENTS + +To validate the effectiveness of the proposed Ant Colony Optimization algorithm for path planning in logistics robots, simulation experiments were designed and compared with classical algorithms in various scenarios. The experimental environment was built on the Gazebo and ROS platforms, simulating real-world warehouse or factory settings with static obstacles (e.g., shelves, walls). The map size was set to + +$20\mathrm{m} \times 20\mathrm{m}$ with a resolution of $0.1\mathrm{m}$ , and the number of task points ranged from 5 to 20, each associated with a time window $[t_{\mathrm{start}}, t_{\mathrm{end}}]$ randomly distributed between [5s,30s]. + +The comparison methods included the Ant Colony Optimization, $\mathbf{A}^*$ , and Genetic Algorithm (GA), Particle Swarm Optimization(PSO), RRT*, Hybrid $\mathbf{A}^*$ . The evaluation metrics covered path length, task completion time, turning counts, and motion smoothness. Path length was measured by the total travel distance from the start to the end point; task completion time was the total time for the robot to complete all tasks; turning counts were the total number of turns in the path; and motion smoothness was evaluated by the standard deviation of path curvature, with smaller values indicating smoother paths. + +The experiments were conducted in a static environment, and each algorithm was run 50 times for each map complexity level, with the average values taken as the results. + +![](images/1b6bbae033c75747881776da18cb58629caeae11583f07f2df418b6359ba6fc5.jpg) +Fig 1. The trajectory of the ACO algorithm. + +![](images/61c7b1f73f9d3371b40b5220c37c1a038601f62b2a688a7ffdca7eb7c36118a3.jpg) +Fig 2. Performance of different algorithms after 1000 iterations. + +Fig. 1 shows the performance of ACO algorithm. Since the vehicle turning restriction is added to the optimization target, the vehicle can avoid unnecessary turns. Moreover, when executing different mission objectives, although there are many obstacles between different mission locations, our method can always quickly reach different mission locations without collision. Fig. 2 shows the changes in path length of different intelligent optimization algorithms at 1000 iterations. It can be clearly seen that aco decreases faster and can obtain better results. + +TABLE I. PERFORMANCE OF PATH PLANNING + +
MethodLength(m)Time(s)Truning(rad)Smoothness(rad)
A*13490.120.40140.2269
GA14553.780.36650.2967
PSO17342.370.59340.1571
RRT*16274.610.48870.2094
Hybrid A*12640.750.33160.192
ACO12010.640.27930.1222
+ +Table I shows that ACO outperforms the comparison algorithms in terms of path length, task completion time, and number of turns. In both simple and complex maps, ACO achieves the shortest path length, the least path planning time, and significantly reduces the number of turns by optimizing the turn penalty. In addition, ACO has the smallest standard deviation of path curvature, indicating the smoothest path. + +TABLE II. PERFORMANCE OF TASK COMPLETION + +
MethodTask completion(%)Time(s)
PSO753.37
GA734.61
ACO913.64
+ +Table II shows the task completion of ACO. The results show that, compared with PSO and other methods, ACO is significantly better than other methods in task completion and consumes less time. + +# IV. CONCLUSIONS + +In this paper, we applied the ACO algorithm to path planning, focusing on optimizing four key performance metrics: Path Length, Task Completion Time, Turning Counts, and Motion Smoothness. The proposed method was extensively evaluated and compared with several existing path planning algorithms. Experimental results demonstrate that the ACO-based approach significantly outperforms the compared methods in terms of efficiency, smoothness, and overall performance. The optimized path planning solution not only reduces travel distance and task completion time but also minimizes unnecessary turns and enhances motion smoothness, making it particularly suitable for real-world applications such as logistics vehicles. Future work will focus on adapting the algorithm for dynamic environments and integrating it into larger-scale logistics systems. + +# REFERENCES + +[1] Li, G., Liu, C., Wu, L., & Xiao, W. (2023). A mixing algorithm of ACO and ABC for solving path planning of mobile robot. Applied Soft Computing, 148, 110868. +[2] Xing, J., Xing, R., Xue, C., & Luo, D. (2024). Enhancing Link Prediction with Fuzzy Graph Attention Networks and Dynamic Negative Sampling. arXiv preprint arXiv:2411.07482. +[3] Zhou, T., & Wei, W. (2024). Mobile robot path planning based on an improved ACO algorithm and path optimization. Multimedia Tools and Applications, 1-24. + +[4] Chen, L., Su, Y., Zhang, D., Leng, Z., Qi, Y., & Jiang, K. (2021, May). Research on path planning for mobile robots based on improved ACO. In 2021 36th Youth Academic Annual Conference of Chinese Association of Automation (YAC) (pp. 379-383). IEEE. +[5] Ke, Z., Zhou, S., Zhou, Y., Chang, C. H., & Zhang, R. (2025). Detection of ai deepfake and fraud in online payments using gan-based models. arXiv preprint arXiv:2501.07033. +[6] Ke, Z., & Yin, Y. (2024, November). Tail risk alert based on conditional autoregressive var by regression quantiles and machine learning algorithms. In 2024 5th International Conference on Artificial Intelligence and Computer Engineering (ICAICE) (pp. 527-532). IEEE. +[7] Xing, J., Luo, D., Cheng, Q., Xue, C., & Xing, R. (2024). Multi-view Fuzzy Graph Attention Networks for Enhanced Graph Learning. arXiv preprint arXiv:2412.17271. +[8] Zhao, J., & Penn, G. (2025, January). Inside-Outside Algorithm for Probabilistic Product-Free Lambek Categorical Grammar. In Proceedings of the 31st International Conference on Computational Linguistics (pp. 8295-8303). +[9] He, L., Ka, D. H., Ehtesham-Ul-Haque, M., Billah, S. M., & Tehranchi, F. (2023, December). Cognitive models for abacus gesture learning. In Proceedings of the Annual Meeting of the Cognitive Science Society (Vol. 46). +[10] Zhao, J., & Penn, G. (2024, November). LLM-supertagger: Categorical Grammar Supertagging via Large Language Models. In Findings of the Association for Computational Linguistics: EMNLP 2024 (pp. 697-705). +[11] Li, G. C., He, L., & Fleming, L. (2023). Philanthropic supported innovation: trends, areas, and impact. Scientometrics, 128(10), 5507-5520 +[12] Weng, Y., & Wu, J. (2024). Fortifying the global data fortress: a multidimensional examination of cyber security indexes and data protection measures across 193 nations. International Journal of Frontiers in Engineering Technology, 6(2), 13-28. +[13] Ji, Y., Ma, W., Sivarajkumar, S., Zhang, H., Sadhu, E. M., Li, Z., ... & Wang, Y. (2024). Mitigating the risk of health inequity exacerbated by large language models. arXiv preprint arXiv:2410.05180. +[14] Hu, W., Hu, Y., Stas, M., & Farrell, J. A. (2024). Optimization-based outlier accommodation for tightly coupled rtk-aided inertial navigation systems in urban environments. arXiv preprint arXiv:2407.13912. +[15] Ji, Y., Li, Z., Meng, R., Sivarajkumar, S., Wang, Y., Yu, Z., ... & He, D. (2024). Rag-rlrc-laysum at biolaysum: Integrating retrieval-augmented generation and readability control for layman summarization of biomedical texts. arXiv preprint arXiv:2405.13179. +[16] Hu, W., Neupane, A., & Farrell, J. A. (2022). Using PPP information to implement a global real-time virtual network DGNSS approach. IEEE Transactions on Vehicular Technology, 71(10), 10337-10349. +[17] Dan, H. C., Huang, Z., Lu, B., & Li, M. (2024). Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework. Construction and Building Materials, 453, 139056. +[18] Ding, T., & Xiang, D. (2024). Irregularity Inspection using Neural Radiance Field. arXiv preprint arXiv:2408.11251. +[19] Qiao, G., Liu, G., Poupart, P., & Xu, Z. (2023). Multi-modal inverse constrained reinforcement learning from a mixture of demonstrations. Advances in Neural Information Processing Systems, 36, 60384-60396. +[20] Li, Z., Wang, B., & Chen, Y. (2024). Knowledge Graph Embedding and Few-Shot Relational Learning Methods for Digital Assets in USA. Journal of Industrial Engineering and Applied Science, 2(5), 10-18. +[21] Dan, H. C., Lu, B., & Li, M. (2024). Evaluation of asphalt pavement texture using multiview stereo reconstruction based on deep learning. Construction and Building Materials, 412, 134837. +[22] Li, Z., Bookbinder, J. H., & Elhedhli, S. (2012). Optimal shipment decisions for an airfreight forwarder: Formulation and solution methods. Transportation Research Part C: Emerging Technologies, 21(1), 17-30. +[23] Qiao, G., Jiang, H., & Min, Y. (2022, May). Research on Vehicle Distance Recognition System Based on Machine Learning and OpenCV. In 2022 IEEE 2nd International Conference on Electronic Technology, Communication and Information (ICETCI) (pp. 334-337). IEEE. \ No newline at end of file diff --git a/data/2025/2504_05xxx/2504.05339/images/06db0b754c0e3d955b435fc5d94c8c6acc67a34b023e56c7bd20ffc2f60aab54.jpg b/data/2025/2504_05xxx/2504.05339/images/06db0b754c0e3d955b435fc5d94c8c6acc67a34b023e56c7bd20ffc2f60aab54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8706a2e0fa7dd0320b4653bd90e2a1526166eb59 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/06db0b754c0e3d955b435fc5d94c8c6acc67a34b023e56c7bd20ffc2f60aab54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d86dde2944c05a55779fbc5bb82e5ddce9ce18831caf339af81cde575b44a28a +size 5211 diff --git a/data/2025/2504_05xxx/2504.05339/images/10e47bfb2c227813d339013b62ab3eb771ac9cae898fdc47aa99d9a7ffcc9aae.jpg b/data/2025/2504_05xxx/2504.05339/images/10e47bfb2c227813d339013b62ab3eb771ac9cae898fdc47aa99d9a7ffcc9aae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15c447e6ee49c195535308c53d20d10b1a85fcb8 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/10e47bfb2c227813d339013b62ab3eb771ac9cae898fdc47aa99d9a7ffcc9aae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:798a7940b7a475c1667592d0ac2f1b449d864a478b486338327fa0def850153d +size 3602 diff --git a/data/2025/2504_05xxx/2504.05339/images/194254464f3c3d5fbd2efc2e837cdb9d816e725a388ef24bf5f8981525b1e3b4.jpg b/data/2025/2504_05xxx/2504.05339/images/194254464f3c3d5fbd2efc2e837cdb9d816e725a388ef24bf5f8981525b1e3b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9080621e28956a8139d3bb139cda144c1f7f8234 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/194254464f3c3d5fbd2efc2e837cdb9d816e725a388ef24bf5f8981525b1e3b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8f9bc337af8e2bdcfdeed7b55b2433bbad31f2a05d98b8a21f337232f38bc3d +size 11625 diff --git a/data/2025/2504_05xxx/2504.05339/images/1b6bbae033c75747881776da18cb58629caeae11583f07f2df418b6359ba6fc5.jpg b/data/2025/2504_05xxx/2504.05339/images/1b6bbae033c75747881776da18cb58629caeae11583f07f2df418b6359ba6fc5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0dd666936e3e54049601bd61fb49302bb65ad5c0 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/1b6bbae033c75747881776da18cb58629caeae11583f07f2df418b6359ba6fc5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:907cc47759cb82d2a4cacd0489384cf5210ef9276d4c2f979ece439510c24108 +size 16810 diff --git a/data/2025/2504_05xxx/2504.05339/images/32141b04e5de1ff896ce630995cfc426a79e854a71360e81a1a6339b0d7f0132.jpg b/data/2025/2504_05xxx/2504.05339/images/32141b04e5de1ff896ce630995cfc426a79e854a71360e81a1a6339b0d7f0132.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44b792d49e7e5b47d19abfce7357def0b64e0b3c --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/32141b04e5de1ff896ce630995cfc426a79e854a71360e81a1a6339b0d7f0132.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1c98bb61e6161649ae40ff94d0bf8a3fc050beceb1fa5d96d50bd9a69194119 +size 5361 diff --git a/data/2025/2504_05xxx/2504.05339/images/4798f76c22bcf6d9e1450c7b98e132063881eb2b0819aeaae4957f77887b0215.jpg b/data/2025/2504_05xxx/2504.05339/images/4798f76c22bcf6d9e1450c7b98e132063881eb2b0819aeaae4957f77887b0215.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59b0911966b05a3208f0884db5ee176e544d383a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/4798f76c22bcf6d9e1450c7b98e132063881eb2b0819aeaae4957f77887b0215.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eac5334274d0127297c9758564e7262c13bf245798f161b11e51beb32e78a026 +size 4742 diff --git a/data/2025/2504_05xxx/2504.05339/images/5af334bbacb9d3a53f34ac0e1814e6d054418adc62feb88f156c53218d7ddc34.jpg b/data/2025/2504_05xxx/2504.05339/images/5af334bbacb9d3a53f34ac0e1814e6d054418adc62feb88f156c53218d7ddc34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a4b353159764ba3ca1f9d14740d9342fe57645ca --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/5af334bbacb9d3a53f34ac0e1814e6d054418adc62feb88f156c53218d7ddc34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39e65b0235bc300950d4f0e8db713abf49f3843f628406cb164162496051e7bf +size 5026 diff --git a/data/2025/2504_05xxx/2504.05339/images/61c7b1f73f9d3371b40b5220c37c1a038601f62b2a688a7ffdca7eb7c36118a3.jpg b/data/2025/2504_05xxx/2504.05339/images/61c7b1f73f9d3371b40b5220c37c1a038601f62b2a688a7ffdca7eb7c36118a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c931437c28668d251d73a2350bd3c5a5daa7fdc1 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/61c7b1f73f9d3371b40b5220c37c1a038601f62b2a688a7ffdca7eb7c36118a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a89e66fbe2a3b3beda242a16e411e5ac32fa1220010b86201ba4cc9147755269 +size 26021 diff --git a/data/2025/2504_05xxx/2504.05339/images/834b21f488f8304309c6870724cbf1faf3d62eaf266a137e2dcaa8703366e5b2.jpg b/data/2025/2504_05xxx/2504.05339/images/834b21f488f8304309c6870724cbf1faf3d62eaf266a137e2dcaa8703366e5b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6c09ce14f4d0e7095e2a8f132f6e66cd0849e2f --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/834b21f488f8304309c6870724cbf1faf3d62eaf266a137e2dcaa8703366e5b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02013091456f71da84d6490b4e91368b00af120f888fe9515876341fcc309098 +size 5161 diff --git a/data/2025/2504_05xxx/2504.05339/images/a25b7358b01014167c89cfc867e2d4289d78c680e78cc5cb9ac1bd136c6e5ce8.jpg b/data/2025/2504_05xxx/2504.05339/images/a25b7358b01014167c89cfc867e2d4289d78c680e78cc5cb9ac1bd136c6e5ce8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cb95f1ed79d705e067160b0b20cfd3eb14119f74 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/a25b7358b01014167c89cfc867e2d4289d78c680e78cc5cb9ac1bd136c6e5ce8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b20e4f9a4e97306ed0f33b407b94b82d84ba157fdc821b4bf7da5ac305de9f0 +size 26301 diff --git a/data/2025/2504_05xxx/2504.05339/images/f1ef57b1037badfa3b429c5aa21f8fad05ff8b453a30efde2602f6f9fcd4a0b0.jpg b/data/2025/2504_05xxx/2504.05339/images/f1ef57b1037badfa3b429c5aa21f8fad05ff8b453a30efde2602f6f9fcd4a0b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1fa204aafce78757f45c9756e500420dd477177 --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/f1ef57b1037badfa3b429c5aa21f8fad05ff8b453a30efde2602f6f9fcd4a0b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c01f93ea1680b2571557200dc16e35bd133284655622dca096107fb9fe0dccc +size 4708 diff --git a/data/2025/2504_05xxx/2504.05339/images/f2ce133434e5e640a901032cce996c8f0f20be60e5c89441d1394cc80923fb1f.jpg b/data/2025/2504_05xxx/2504.05339/images/f2ce133434e5e640a901032cce996c8f0f20be60e5c89441d1394cc80923fb1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca7b4aa2e1fbba450afb77dd2c6b7c9e13e9c72a --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/images/f2ce133434e5e640a901032cce996c8f0f20be60e5c89441d1394cc80923fb1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca553cca7a13d481d9c69dbf178f4fd11fb4a02c09ac11e24de2890626f5ffa4 +size 5654 diff --git a/data/2025/2504_05xxx/2504.05339/layout.json b/data/2025/2504_05xxx/2504.05339/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8639c2c8fcc76aa5ede77cf1c4aa3fddc596877d --- /dev/null +++ b/data/2025/2504_05xxx/2504.05339/layout.json @@ -0,0 +1,4332 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 51, + 54, + 560, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 54, + 560, + 109 + ], + "spans": [ + { + "bbox": [ + 51, + 54, + 560, + 109 + ], + "type": "text", + "content": "Optimized Path Planning for Logistics Robots Using Ant Colony Algorithm under Multiple Constraints" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 137, + 114, + 194, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 114, + 194, + 125 + ], + "spans": [ + { + "bbox": [ + 137, + 114, + 194, + 125 + ], + "type": "text", + "content": "Haopeng Zhao" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 127, + 209, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 127, + 209, + 137 + ], + "spans": [ + { + "bbox": [ + 121, + 127, + 209, + 137 + ], + "type": "text", + "content": "Independent Researcher" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 138, + 140, + 192, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 140, + 192, + 151 + ], + "spans": [ + { + "bbox": [ + 138, + 140, + 192, + 151 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 152, + 223, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 152, + 223, + 163 + ], + "spans": [ + { + "bbox": [ + 107, + 152, + 223, + 163 + ], + "type": "text", + "content": "haopeng.zhao1894@gmail.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 143, + 176, + 187, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 176, + 187, + 187 + ], + "spans": [ + { + "bbox": [ + 143, + 176, + 187, + 187 + ], + "type": "text", + "content": "Lipeng Liu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 189, + 198, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 189, + 198, + 200 + ], + "spans": [ + { + "bbox": [ + 132, + 189, + 198, + 200 + ], + "type": "text", + "content": "Peking University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 201, + 193, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 201, + 193, + 213 + ], + "spans": [ + { + "bbox": [ + 138, + 201, + 193, + 213 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 214, + 211, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 214, + 211, + 224 + ], + "spans": [ + { + "bbox": [ + 121, + 214, + 211, + 224 + ], + "type": "text", + "content": "pengpengpu@163.com" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 238, + 190, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 238, + 190, + 249 + ], + "spans": [ + { + "bbox": [ + 140, + 238, + 190, + 249 + ], + "type": "text", + "content": "Zheyu Zhang" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 251, + 209, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 251, + 209, + 261 + ], + "spans": [ + { + "bbox": [ + 121, + 251, + 209, + 261 + ], + "type": "text", + "content": "Independent Researcher" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 262, + 193, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 262, + 193, + 274 + ], + "spans": [ + { + "bbox": [ + 138, + 262, + 193, + 274 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 119, + 276, + 211, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 276, + 211, + 286 + ], + "spans": [ + { + "bbox": [ + 119, + 276, + 211, + 286 + ], + "type": "text", + "content": "zheyuz2980@gmail.com" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 41, + 288, + 298, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 288, + 298, + 454 + ], + "spans": [ + { + "bbox": [ + 41, + 288, + 298, + 454 + ], + "type": "text", + "content": "Abstract—With the rapid development of the logistics industry, the path planning of logistics vehicles has become increasingly complex, requiring consideration of multiple constraints such as time windows, task sequencing, and motion smoothness. Traditional path planning methods often struggle to balance these competing demands efficiently. In this paper, we propose a path planning technique based on the Ant Colony Optimization (ACO) algorithm to address these challenges. The proposed method optimizes key performance metrics, including path length, task completion time, turning counts, and motion smoothness, to ensure efficient and practical route planning for logistics vehicles. Experimental results demonstrate that the ACO-based approach outperforms traditional methods in terms of both efficiency and adaptability. This study provides a robust solution for logistics vehicle path planning, offering significant potential for real-world applications in dynamic and constrained environments." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 463, + 235, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 463, + 235, + 475 + ], + "spans": [ + { + "bbox": [ + 55, + 463, + 235, + 475 + ], + "type": "text", + "content": "Keywords- ant colony algorithm; path planning" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 128, + 488, + 208, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 488, + 208, + 498 + ], + "spans": [ + { + "bbox": [ + 128, + 488, + 208, + 498 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 41, + 502, + 298, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 502, + 298, + 613 + ], + "spans": [ + { + "bbox": [ + 41, + 502, + 298, + 613 + ], + "type": "text", + "content": "The logistics industry plays an important role in the modern economy, and efficient transportation is the cornerstone of its success[1]. However, the path planning of logistics vehicles faces many challenges, especially in urban environments. Logistics vehicles must not only complete tasks within the specified time window[2][3], but also optimize the task sequence to ensure a smooth and energy-efficient route. Traditional path planning methods often fail to fully meet these multi-faceted requirements, resulting in the inability to obtain the optimal solution in real-world scenarios[4][5][6]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 41, + 617, + 299, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 617, + 299, + 718 + ], + "spans": [ + { + "bbox": [ + 41, + 617, + 299, + 718 + ], + "type": "text", + "content": "In modern urban environments, the path planning problem of logistics vehicles becomes more complicated[7]. In addition to time windows and task sequencing constraints, factors such as traffic congestion, road restrictions, vehicle load, and environmental conditions (such as weather, road conditions, etc.) need to be considered[8]. These factors make it difficult for traditional path planning algorithms to provide effective solutions in dynamic and uncertain environments[9][10]. For example, although the Dijkstra algorithm and the " + }, + { + "bbox": [ + 41, + 617, + 299, + 718 + ], + "type": "inline_equation", + "content": "\\mathrm{A}^*" + }, + { + "bbox": [ + 41, + 617, + 299, + 718 + ], + "type": "text", + "content": " algorithm" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 421, + 114, + 468, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 421, + 114, + 468, + 125 + ], + "spans": [ + { + "bbox": [ + 421, + 114, + 468, + 125 + ], + "type": "text", + "content": "Zhichao Ma" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 400, + 127, + 489, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 127, + 489, + 138 + ], + "spans": [ + { + "bbox": [ + 400, + 127, + 489, + 138 + ], + "type": "text", + "content": "Independent Researcher" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 413, + 140, + 476, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 413, + 140, + 476, + 151 + ], + "spans": [ + { + "bbox": [ + 413, + 140, + 476, + 151 + ], + "type": "text", + "content": "Shanghai, China" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 390, + 152, + 500, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 390, + 152, + 500, + 163 + ], + "spans": [ + { + "bbox": [ + 390, + 152, + 500, + 163 + ], + "type": "text", + "content": "ma.zhi.chao.max@gmail.com" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 422, + 176, + 468, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 176, + 468, + 187 + ], + "spans": [ + { + "bbox": [ + 422, + 176, + 468, + 187 + ], + "type": "text", + "content": "Yang Wang" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 400, + 189, + 489, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 189, + 489, + 200 + ], + "spans": [ + { + "bbox": [ + 400, + 189, + 489, + 200 + ], + "type": "text", + "content": "Independent Researcher" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 417, + 201, + 472, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 201, + 472, + 213 + ], + "spans": [ + { + "bbox": [ + 417, + 201, + 472, + 213 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 395, + 213, + 495, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 213, + 495, + 224 + ], + "spans": [ + { + "bbox": [ + 395, + 213, + 495, + 224 + ], + "type": "text", + "content": "wangyrick102@gmail.com" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 427, + 238, + 463, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 238, + 463, + 248 + ], + "spans": [ + { + "bbox": [ + 427, + 238, + 463, + 248 + ], + "type": "text", + "content": "Hao Liu*" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 400, + 251, + 489, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 251, + 489, + 261 + ], + "spans": [ + { + "bbox": [ + 400, + 251, + 489, + 261 + ], + "type": "text", + "content": "Independent Researcher" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 417, + 262, + 472, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 262, + 472, + 274 + ], + "spans": [ + { + "bbox": [ + 417, + 262, + 472, + 274 + ], + "type": "text", + "content": "Beijing, China" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 405, + 276, + 485, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 405, + 276, + 485, + 286 + ], + "spans": [ + { + "bbox": [ + 405, + 276, + 485, + 286 + ], + "type": "text", + "content": "modiy.lu@gmail.com" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 287, + 568, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 287, + 568, + 430 + ], + "spans": [ + { + "bbox": [ + 310, + 287, + 568, + 430 + ], + "type": "text", + "content": "perform well in static environments, they often cannot quickly adapt to real-time traffic changes, resulting in low efficiency[11][12][13]. The ant colony optimization algorithm can effectively find the optimal path in a complex environment by simulating the behavior of ant colonies releasing pheromones during foraging[14][15]. The distributed computing characteristics of ACO enable it to process multiple path selection problems in parallel, so that high-quality solutions can be found in a short time[16][17][18]. In addition, the adaptability of ACO enables it to dynamically adjust the path planning strategy according to environmental changes, making it particularly effective in dealing with dynamic and uncertain problems[19][20][21]." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 435, + 568, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 568, + 622 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 568, + 622 + ], + "type": "text", + "content": "Logistics vehicle path planning usually involves multiple optimization objectives, such as minimizing driving distance, reducing task completion time, reducing energy consumption, and improving path smoothness. These objectives often conflict with each other[22][23]. For example, the shortest path may require more turns, resulting in increased energy consumption and longer driving time. Therefore, finding a balance between these objectives is an important research challenge. This paper adopts the ACO algorithm combined with a multi-objective optimization strategy to consider multiple optimization objectives and find the path with the best overall performance. In order to verify the effectiveness of the proposed method, we designed a series of experiments to simulate the logistics vehicle path planning problem in different urban environments. The experimental results show that the path planning method based on ACO outperforms the traditional algorithm in multiple performance indicators." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 397, + 635, + 479, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 635, + 479, + 647 + ], + "spans": [ + { + "bbox": [ + 397, + 635, + 479, + 647 + ], + "type": "text", + "content": "II. METHODOLOGY" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 657, + 567, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 657, + 567, + 681 + ], + "spans": [ + { + "bbox": [ + 310, + 657, + 567, + 681 + ], + "type": "text", + "content": "A. Modeling Methods for Complex Constraints in Path Planning" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 683, + 568, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 683, + 568, + 706 + ], + "spans": [ + { + "bbox": [ + 310, + 683, + 568, + 706 + ], + "type": "text", + "content": "Logistics robots face stringent time window constraints when executing tasks. Each task is associated with a specific" + } + ] + } + ], + "index": 34 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 41, + 55, + 298, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 55, + 298, + 95 + ], + "spans": [ + { + "bbox": [ + 41, + 55, + 298, + 95 + ], + "type": "text", + "content": "time window " + }, + { + "bbox": [ + 41, + 55, + 298, + 95 + ], + "type": "inline_equation", + "content": "[t_{\\mathrm{start}}, t_{\\mathrm{end}}]" + }, + { + "bbox": [ + 41, + 55, + 298, + 95 + ], + "type": "text", + "content": ", within which the robot must arrive at the designated location to complete the task. These time constraints introduce the following challenges in path planning:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 41, + 98, + 298, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 98, + 298, + 177 + ], + "spans": [ + { + "bbox": [ + 41, + 98, + 298, + 177 + ], + "type": "text", + "content": "Time window conflict: When the time windows of multiple tasks overlap, the robot must intelligently adjust the order of task execution to ensure that all tasks are completed within the specified time. Such conflicts may cause task delays or reduced system efficiency. For example, in a high-density warehouse environment, overlapping time windows can significantly affect the overall throughput of the logistics system." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 41, + 180, + 299, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 180, + 299, + 258 + ], + "spans": [ + { + "bbox": [ + 41, + 180, + 299, + 258 + ], + "type": "text", + "content": "Path efficiency optimization: While meeting time constraints, the robot must choose the optimal path to minimize the travel distance and time cost. This involves not only path planning in a static environment, but also dynamic obstacle avoidance. Efficient path planning is critical to reducing energy consumption and improving the operational efficiency of the robot fleet." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 263, + 299, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 263, + 299, + 342 + ], + "spans": [ + { + "bbox": [ + 41, + 263, + 299, + 342 + ], + "type": "text", + "content": "Dynamic adjustment capability: In the actual operating environment, the robot must respond to unexpected situations in real time and dynamically adjust its path planning strategy. This capability places higher requirements on the robustness and flexibility of the system. For example, in a dynamic warehouse environment, the robot must quickly replan its route to avoid collisions or adapt to new tasks without affecting the overall plan." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 41, + 346, + 171, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 346, + 171, + 358 + ], + "spans": [ + { + "bbox": [ + 41, + 346, + 171, + 358 + ], + "type": "text", + "content": "Minimization of Path Length:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 115, + 365, + 298, + 397 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 365, + 298, + 397 + ], + "spans": [ + { + "bbox": [ + 115, + 365, + 298, + 397 + ], + "type": "interline_equation", + "content": "f _ {1} = \\min \\sum_ {i = 1} ^ {n - 1} d \\left(p _ {i}, p _ {i + 1}\\right) \\tag {1}", + "image_path": "06db0b754c0e3d955b435fc5d94c8c6acc67a34b023e56c7bd20ffc2f60aab54.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "spans": [ + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "inline_equation", + "content": "d(p_i, p_{i+1})" + }, + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "text", + "content": " represents the Euclidean distance between consecutive path points " + }, + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "inline_equation", + "content": "p_{i+1}" + }, + { + "bbox": [ + 41, + 404, + 298, + 483 + ], + "type": "text", + "content": ". Optimizing path length directly impacts the robot's energy consumption and task completion efficiency. Shorter paths reduce travel time and energy usage, which is particularly important in large-scale logistics operations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 487, + 219, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 487, + 219, + 498 + ], + "spans": [ + { + "bbox": [ + 44, + 487, + 219, + 498 + ], + "type": "text", + "content": "Minimization of Task Completion Time:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 123, + 506, + 297, + 520 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 506, + 297, + 520 + ], + "spans": [ + { + "bbox": [ + 123, + 506, + 297, + 520 + ], + "type": "interline_equation", + "content": "f _ {2} = \\min \\max _ {1 \\leq i \\leq n} t _ {i} \\tag {2}", + "image_path": "10e47bfb2c227813d339013b62ab3eb771ac9cae898fdc47aa99d9a7ffcc9aae.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 41, + 529, + 298, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 529, + 298, + 590 + ], + "spans": [ + { + "bbox": [ + 41, + 529, + 298, + 590 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 41, + 529, + 298, + 590 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 41, + 529, + 298, + 590 + ], + "type": "text", + "content": " denotes the completion time of the " + }, + { + "bbox": [ + 41, + 529, + 298, + 590 + ], + "type": "inline_equation", + "content": "i - th" + }, + { + "bbox": [ + 41, + 529, + 298, + 590 + ], + "type": "text", + "content": " task. This objective ensures that the robot can complete all tasks in the shortest possible time, thereby improving system throughput. Minimizing the maximum completion time is critical for meeting tight delivery schedules in time-sensitive applications." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 41, + 594, + 187, + 606 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 594, + 187, + 606 + ], + "spans": [ + { + "bbox": [ + 41, + 594, + 187, + 606 + ], + "type": "text", + "content": "Minimization of Turning Counts:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 126, + 613, + 297, + 644 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 613, + 297, + 644 + ], + "spans": [ + { + "bbox": [ + 126, + 613, + 297, + 644 + ], + "type": "interline_equation", + "content": "f _ {3} = \\min \\sum_ {i = 2} ^ {n - 1} \\delta \\left(\\theta_ {i}\\right) \\tag {3}", + "image_path": "4798f76c22bcf6d9e1450c7b98e132063881eb2b0819aeaae4957f77887b0215.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "spans": [ + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "inline_equation", + "content": "\\theta_{i}" + }, + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "text", + "content": " represents the turning angle at the " + }, + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "inline_equation", + "content": "i - th" + }, + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "text", + "content": " path point, and " + }, + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "inline_equation", + "content": "\\delta (\\cdot)" + }, + { + "bbox": [ + 41, + 652, + 299, + 718 + ], + "type": "text", + "content": " is an indicator function that takes the value 0, and if the turning angle exceeds a threshold, the value is equal to 1. Reducing turning counts helps improve the robot's motion efficiency and reduces mechanical wear. This is especially" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 311, + 53, + 567, + 76 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 53, + 567, + 76 + ], + "spans": [ + { + "bbox": [ + 311, + 53, + 567, + 76 + ], + "type": "text", + "content": "important in environments with narrow aisles or limited maneuvering space." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 81, + 471, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 81, + 471, + 93 + ], + "spans": [ + { + "bbox": [ + 312, + 81, + 471, + 93 + ], + "type": "text", + "content": "Optimization of Motion Smoothness:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 389, + 99, + 567, + 131 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 389, + 99, + 567, + 131 + ], + "spans": [ + { + "bbox": [ + 389, + 99, + 567, + 131 + ], + "type": "interline_equation", + "content": "f _ {4} = \\min \\sum_ {i = 2} ^ {n - 1} \\left| \\theta_ {i} - \\theta_ {i - 1} \\right| \\tag {4}", + "image_path": "f1ef57b1037badfa3b429c5aa21f8fad05ff8b453a30efde2602f6f9fcd4a0b0.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 311, + 137, + 568, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 137, + 568, + 194 + ], + "spans": [ + { + "bbox": [ + 311, + 137, + 568, + 194 + ], + "type": "text", + "content": "This objective function ensures smooth motion trajectories, reducing energy loss and mechanical wear caused by sharp turns, while enhancing operational stability. Smooth trajectories also contribute to safer and more predictable robot movements, which is essential in environments shared with human workers." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 310, + 198, + 569, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 198, + 569, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 198, + 569, + 308 + ], + "type": "text", + "content": "By establishing a multi-objective optimization model and employing appropriate optimization algorithm, an optimal or near-optimal path planning solution can be found under the constraints, thereby improving the overall efficiency of the logistics system. Additionally, this study considers the robot's dynamic constraints and environmental uncertainties, further enhancing the practicality and robustness of the algorithm. For instance, the algorithm incorporates real-time sensor data to adapt to changing environmental conditions, ensuring reliable performance in complex operational scenarios." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 311, + 319, + 568, + 343 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 319, + 568, + 343 + ], + "spans": [ + { + "bbox": [ + 311, + 319, + 568, + 343 + ], + "type": "text", + "content": "B. Multi-Constraint Path Planning Based on Ant Colony Algorithm" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 345, + 569, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 345, + 569, + 445 + ], + "spans": [ + { + "bbox": [ + 310, + 345, + 569, + 445 + ], + "type": "text", + "content": "The ant colony optimization algorithm is a heuristic optimization algorithm that simulates the foraging behavior of ants. It gradually finds the optimal path by imitating the behavior of ants releasing pheromones in the process of looking for food. This paper combines the ant colony algorithm with the time window constraints and multi-objective optimization constraints in the path planning of logistics robots, and proposes an ant colony algorithm to solve the path planning problem in complex environments." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 449, + 568, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 449, + 568, + 528 + ], + "spans": [ + { + "bbox": [ + 310, + 449, + 568, + 528 + ], + "type": "text", + "content": "Based on the traditional ant colony algorithm, this paper introduces time window constraints and multi-objective optimization constraints to improve the pheromone update mechanism and heuristic function design. Pheromone update not only considers the path length, but also combines factors such as task completion time, number of turns, and motion stability. The pheromone update formula is as follows:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 364, + 534, + 567, + 550 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 364, + 534, + 567, + 550 + ], + "spans": [ + { + "bbox": [ + 364, + 534, + 567, + 550 + ], + "type": "interline_equation", + "content": "t a u _ {i j} (t + 1) = (1 - \\rho) \\cdot \\tau_ {i j} (t) + \\Delta \\tau_ {i j} \\tag {5}", + "image_path": "834b21f488f8304309c6870724cbf1faf3d62eaf266a137e2dcaa8703366e5b2.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "spans": [ + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "inline_equation", + "content": "\\tau_{ij}(t)" + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "text", + "content": " represents the pheromone concentration on path " + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "inline_equation", + "content": "\\rho" + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "text", + "content": " is the pheromone evaporation coefficient, and " + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "inline_equation", + "content": "\\Delta \\tau_{ij}" + }, + { + "bbox": [ + 311, + 560, + 568, + 628 + ], + "type": "text", + "content": " is the pheromone increment, which is calculated by comprehensively considering path length, task completion time, and motion smoothness." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 633, + 568, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 633, + 568, + 657 + ], + "spans": [ + { + "bbox": [ + 311, + 633, + 568, + 657 + ], + "type": "text", + "content": "The heuristic function is used to guide ants in selecting the next path point. The heuristic function is defined as follows:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 375, + 663, + 567, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 375, + 663, + 567, + 696 + ], + "spans": [ + { + "bbox": [ + 375, + 663, + 567, + 696 + ], + "type": "interline_equation", + "content": "e t a _ {i j} = \\frac {1}{d _ {i j}} \\cdot \\frac {1}{1 + \\alpha \\cdot \\text {p e n a l t y} _ {i j}} \\tag {6}", + "image_path": "f2ce133434e5e640a901032cce996c8f0f20be60e5c89441d1394cc80923fb1f.jpg" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "spans": [ + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "inline_equation", + "content": "d_{ij}" + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "text", + "content": " is the distance of path " + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "inline_equation", + "content": "(i,j)" + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "text", + "content": ", penalty " + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "inline_equation", + "content": "_{ij}" + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "text", + "content": " is the turning penalty term, and " + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 41, + 56, + 298, + 128 + ], + "type": "text", + "content": " is a weight coefficient used to balance path length and turning counts. When selecting the next path point, ants use a probabilistic selection strategy that considers both pheromone concentration and the heuristic function. The path selection probability formula is as follows:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 126, + 135, + 297, + 170 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 135, + 297, + 170 + ], + "spans": [ + { + "bbox": [ + 126, + 135, + 297, + 170 + ], + "type": "interline_equation", + "content": "p _ {i j} ^ {k} = \\frac {\\left[ \\tau_ {i j} \\right] ^ {\\beta} \\left[ \\eta_ {i j} \\right] ^ {\\gamma}}{\\sum_ {l \\in \\text {a l l o w e d} _ {k}} \\left[ \\tau_ {i l} \\right] ^ {\\beta} \\left[ \\eta_ {i l} \\right] ^ {\\gamma}} \\tag {7}", + "image_path": "32141b04e5de1ff896ce630995cfc426a79e854a71360e81a1a6339b0d7f0132.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "spans": [ + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "text", + "content": " are the weight coefficients for pheromone and heuristic function, respectively, and allowed" + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "inline_equation", + "content": "_k" + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "text", + "content": " represents the set of nodes that ant " + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 41, + 176, + 298, + 221 + ], + "type": "text", + "content": " can choose from at the current node." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 228, + 298, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 228, + 298, + 360 + ], + "spans": [ + { + "bbox": [ + 41, + 228, + 298, + 360 + ], + "type": "text", + "content": "During the path planning process, the ant needs to check the time window constraints of each task. If the selection of a certain path point will cause the task to be unable to be completed within the specified time, then the path point will be excluded from the optional path points. By dynamically adjusting the path selection strategy, all time window constraints are met. In addition, this paper adopts the weighted summation method to transform the multi-objective optimization problem into a single-objective optimization problem. By adjusting the weight coefficient of each objective function, a balance is achieved between the path length, task completion time, number of turns, and motion smoothness. The objective function is defined as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 366, + 297, + 381 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 366, + 297, + 381 + ], + "spans": [ + { + "bbox": [ + 85, + 366, + 297, + 381 + ], + "type": "interline_equation", + "content": "F = w _ {1} \\cdot f _ {1} + w _ {2} \\cdot f _ {2} + w _ {3} \\cdot f _ {3} + w _ {4} \\cdot f _ {4} \\tag {8}", + "image_path": "5af334bbacb9d3a53f34ac0e1814e6d054418adc62feb88f156c53218d7ddc34.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 41, + 391, + 298, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 391, + 298, + 422 + ], + "spans": [ + { + "bbox": [ + 41, + 391, + 298, + 422 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 41, + 391, + 298, + 422 + ], + "type": "inline_equation", + "content": "w_{1}, w_{2}, w_{3}, w_{4}" + }, + { + "bbox": [ + 41, + 391, + 298, + 422 + ], + "type": "text", + "content": " are the weight coefficients for each objective function, satisfying " + }, + { + "bbox": [ + 41, + 391, + 298, + 422 + ], + "type": "inline_equation", + "content": "w_{1} + w_{2} + w_{3} + w_{4} = 1" + }, + { + "bbox": [ + 41, + 391, + 298, + 422 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 41, + 429, + 298, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 429, + 298, + 582 + ], + "spans": [ + { + "bbox": [ + 41, + 429, + 298, + 582 + ], + "type": "text", + "content": "The algorithm flow mainly includes four steps: initialization, path construction, pheromone update and iterative optimization. First, the pheromone concentration, heuristic function parameters and starting position of the ants are initialized, and the maximum number of iterations and the number of ants are set. Then each ant gradually builds the path according to the path selection strategy, while checking the time window constraint and multi-objective optimization constraint. After all ants complete the path construction, the pheromone concentration is updated according to the path quality. The higher the path quality, the more pheromone increments. Finally, the path construction and pheromone update process are repeated until the maximum number of iterations is reached or the optimal path that meets the constraints is found." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 588, + 298, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 588, + 298, + 698 + ], + "spans": [ + { + "bbox": [ + 41, + 588, + 298, + 698 + ], + "type": "text", + "content": "In order to verify the effectiveness of the ant colony algorithm, experiments were carried out in a simulated warehouse environment. The results show that compared with the traditional ant colony algorithm, the proposed algorithm significantly reduces the path length, the number of turns and the task completion time while meeting the time window constraint and improving the smoothness of the movement. In addition, the robustness of the algorithm in a dynamic environment has been verified, proving that it can effectively handle unexpected situations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 399, + 54, + 474, + 64 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 54, + 474, + 64 + ], + "spans": [ + { + "bbox": [ + 399, + 54, + 474, + 64 + ], + "type": "text", + "content": "III. EXPERIMENTS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 311, + 68, + 567, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 68, + 567, + 145 + ], + "spans": [ + { + "bbox": [ + 311, + 68, + 567, + 145 + ], + "type": "text", + "content": "To validate the effectiveness of the proposed Ant Colony Optimization algorithm for path planning in logistics robots, simulation experiments were designed and compared with classical algorithms in various scenarios. The experimental environment was built on the Gazebo and ROS platforms, simulating real-world warehouse or factory settings with static obstacles (e.g., shelves, walls). The map size was set to" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "spans": [ + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "type": "inline_equation", + "content": "20\\mathrm{m} \\times 20\\mathrm{m}" + }, + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "type": "text", + "content": " with a resolution of " + }, + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "type": "inline_equation", + "content": "0.1\\mathrm{m}" + }, + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "type": "text", + "content": ", and the number of task points ranged from 5 to 20, each associated with a time window " + }, + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "type": "inline_equation", + "content": "[t_{\\mathrm{start}}, t_{\\mathrm{end}}]" + }, + { + "bbox": [ + 312, + 151, + 567, + 189 + ], + "type": "text", + "content": " randomly distributed between [5s,30s]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 311, + 196, + 568, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 196, + 568, + 307 + ], + "spans": [ + { + "bbox": [ + 311, + 196, + 568, + 307 + ], + "type": "text", + "content": "The comparison methods included the Ant Colony Optimization, " + }, + { + "bbox": [ + 311, + 196, + 568, + 307 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^*" + }, + { + "bbox": [ + 311, + 196, + 568, + 307 + ], + "type": "text", + "content": ", and Genetic Algorithm (GA), Particle Swarm Optimization(PSO), RRT*, Hybrid " + }, + { + "bbox": [ + 311, + 196, + 568, + 307 + ], + "type": "inline_equation", + "content": "\\mathbf{A}^*" + }, + { + "bbox": [ + 311, + 196, + 568, + 307 + ], + "type": "text", + "content": ". The evaluation metrics covered path length, task completion time, turning counts, and motion smoothness. Path length was measured by the total travel distance from the start to the end point; task completion time was the total time for the robot to complete all tasks; turning counts were the total number of turns in the path; and motion smoothness was evaluated by the standard deviation of path curvature, with smaller values indicating smoother paths." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 311, + 312, + 567, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 312, + 567, + 345 + ], + "spans": [ + { + "bbox": [ + 311, + 312, + 567, + 345 + ], + "type": "text", + "content": "The experiments were conducted in a static environment, and each algorithm was run 50 times for each map complexity level, with the average values taken as the results." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 319, + 352, + 561, + 430 + ], + "blocks": [ + { + "bbox": [ + 319, + 352, + 561, + 430 + ], + "lines": [ + { + "bbox": [ + 319, + 352, + 561, + 430 + ], + "spans": [ + { + "bbox": [ + 319, + 352, + 561, + 430 + ], + "type": "image", + "image_path": "1b6bbae033c75747881776da18cb58629caeae11583f07f2df418b6359ba6fc5.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 369, + 441, + 510, + 452 + ], + "lines": [ + { + "bbox": [ + 369, + 441, + 510, + 452 + ], + "spans": [ + { + "bbox": [ + 369, + 441, + 510, + 452 + ], + "type": "text", + "content": "Fig 1. The trajectory of the ACO algorithm." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 361, + 465, + 511, + 580 + ], + "blocks": [ + { + "bbox": [ + 361, + 465, + 511, + 580 + ], + "lines": [ + { + "bbox": [ + 361, + 465, + 511, + 580 + ], + "spans": [ + { + "bbox": [ + 361, + 465, + 511, + 580 + ], + "type": "image", + "image_path": "61c7b1f73f9d3371b40b5220c37c1a038601f62b2a688a7ffdca7eb7c36118a3.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 337, + 590, + 541, + 600 + ], + "lines": [ + { + "bbox": [ + 337, + 590, + 541, + 600 + ], + "spans": [ + { + "bbox": [ + 337, + 590, + 541, + 600 + ], + "type": "text", + "content": "Fig 2. Performance of different algorithms after 1000 iterations." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "bbox": [ + 311, + 609, + 568, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 609, + 568, + 707 + ], + "spans": [ + { + "bbox": [ + 311, + 609, + 568, + 707 + ], + "type": "text", + "content": "Fig. 1 shows the performance of ACO algorithm. Since the vehicle turning restriction is added to the optimization target, the vehicle can avoid unnecessary turns. Moreover, when executing different mission objectives, although there are many obstacles between different mission locations, our method can always quickly reach different mission locations without collision. Fig. 2 shows the changes in path length of different intelligent optimization algorithms at 1000 iterations. It can be clearly seen that aco decreases faster and can obtain better results." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 55, + 67, + 285, + 180 + ], + "blocks": [ + { + "bbox": [ + 85, + 53, + 255, + 62 + ], + "lines": [ + { + "bbox": [ + 85, + 53, + 255, + 62 + ], + "spans": [ + { + "bbox": [ + 85, + 53, + 255, + 62 + ], + "type": "text", + "content": "TABLE I. PERFORMANCE OF PATH PLANNING" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 55, + 67, + 285, + 180 + ], + "lines": [ + { + "bbox": [ + 55, + 67, + 285, + 180 + ], + "spans": [ + { + "bbox": [ + 55, + 67, + 285, + 180 + ], + "type": "table", + "html": "
MethodLength(m)Time(s)Truning(rad)Smoothness(rad)
A*13490.120.40140.2269
GA14553.780.36650.2967
PSO17342.370.59340.1571
RRT*16274.610.48870.2094
Hybrid A*12640.750.33160.192
ACO12010.640.27930.1222
", + "image_path": "a25b7358b01014167c89cfc867e2d4289d78c680e78cc5cb9ac1bd136c6e5ce8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 41, + 182, + 298, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 182, + 298, + 260 + ], + "spans": [ + { + "bbox": [ + 41, + 182, + 298, + 260 + ], + "type": "text", + "content": "Table I shows that ACO outperforms the comparison algorithms in terms of path length, task completion time, and number of turns. In both simple and complex maps, ACO achieves the shortest path length, the least path planning time, and significantly reduces the number of turns by optimizing the turn penalty. In addition, ACO has the smallest standard deviation of path curvature, indicating the smoothest path." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 80, + 284, + 260, + 350 + ], + "blocks": [ + { + "bbox": [ + 80, + 271, + 259, + 280 + ], + "lines": [ + { + "bbox": [ + 80, + 271, + 259, + 280 + ], + "spans": [ + { + "bbox": [ + 80, + 271, + 259, + 280 + ], + "type": "text", + "content": "TABLE II. PERFORMANCE OF TASK COMPLETION" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 80, + 284, + 260, + 350 + ], + "lines": [ + { + "bbox": [ + 80, + 284, + 260, + 350 + ], + "spans": [ + { + "bbox": [ + 80, + 284, + 260, + 350 + ], + "type": "table", + "html": "
MethodTask completion(%)Time(s)
PSO753.37
GA734.61
ACO913.64
", + "image_path": "194254464f3c3d5fbd2efc2e837cdb9d816e725a388ef24bf5f8981525b1e3b4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 41, + 350, + 298, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 350, + 298, + 394 + ], + "spans": [ + { + "bbox": [ + 41, + 350, + 298, + 394 + ], + "type": "text", + "content": "Table II shows the task completion of ACO. The results show that, compared with PSO and other methods, ACO is significantly better than other methods in task completion and consumes less time." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 127, + 409, + 205, + 419 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 409, + 205, + 419 + ], + "spans": [ + { + "bbox": [ + 127, + 409, + 205, + 419 + ], + "type": "text", + "content": "IV. CONCLUSIONS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 424, + 299, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 424, + 299, + 578 + ], + "spans": [ + { + "bbox": [ + 41, + 424, + 299, + 578 + ], + "type": "text", + "content": "In this paper, we applied the ACO algorithm to path planning, focusing on optimizing four key performance metrics: Path Length, Task Completion Time, Turning Counts, and Motion Smoothness. The proposed method was extensively evaluated and compared with several existing path planning algorithms. Experimental results demonstrate that the ACO-based approach significantly outperforms the compared methods in terms of efficiency, smoothness, and overall performance. The optimized path planning solution not only reduces travel distance and task completion time but also minimizes unnecessary turns and enhances motion smoothness, making it particularly suitable for real-world applications such as logistics vehicles. Future work will focus on adapting the algorithm for dynamic environments and integrating it into larger-scale logistics systems." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 592, + 196, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 592, + 196, + 601 + ], + "spans": [ + { + "bbox": [ + 143, + 592, + 196, + 601 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 42, + 617, + 297, + 704 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 42, + 617, + 297, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 617, + 297, + 645 + ], + "spans": [ + { + "bbox": [ + 42, + 617, + 297, + 645 + ], + "type": "text", + "content": "[1] Li, G., Liu, C., Wu, L., & Xiao, W. (2023). A mixing algorithm of ACO and ABC for solving path planning of mobile robot. Applied Soft Computing, 148, 110868." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 42, + 647, + 296, + 674 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 647, + 296, + 674 + ], + "spans": [ + { + "bbox": [ + 42, + 647, + 296, + 674 + ], + "type": "text", + "content": "[2] Xing, J., Xing, R., Xue, C., & Luo, D. (2024). Enhancing Link Prediction with Fuzzy Graph Attention Networks and Dynamic Negative Sampling. arXiv preprint arXiv:2411.07482." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 42, + 677, + 296, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 677, + 296, + 704 + ], + "spans": [ + { + "bbox": [ + 42, + 677, + 296, + 704 + ], + "type": "text", + "content": "[3] Zhou, T., & Wei, W. (2024). Mobile robot path planning based on an improved ACO algorithm and path optimization. Multimedia Tools and Applications, 1-24." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 313, + 53, + 567, + 695 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 313, + 53, + 567, + 90 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 53, + 567, + 90 + ], + "spans": [ + { + "bbox": [ + 313, + 53, + 567, + 90 + ], + "type": "text", + "content": "[4] Chen, L., Su, Y., Zhang, D., Leng, Z., Qi, Y., & Jiang, K. (2021, May). Research on path planning for mobile robots based on improved ACO. In 2021 36th Youth Academic Annual Conference of Chinese Association of Automation (YAC) (pp. 379-383). IEEE." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 92, + 567, + 119 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 92, + 567, + 119 + ], + "spans": [ + { + "bbox": [ + 314, + 92, + 567, + 119 + ], + "type": "text", + "content": "[5] Ke, Z., Zhou, S., Zhou, Y., Chang, C. H., & Zhang, R. (2025). Detection of ai deepfake and fraud in online payments using gan-based models. arXiv preprint arXiv:2501.07033." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 121, + 567, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 567, + 157 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 567, + 157 + ], + "type": "text", + "content": "[6] Ke, Z., & Yin, Y. (2024, November). Tail risk alert based on conditional autoregressive var by regression quantiles and machine learning algorithms. In 2024 5th International Conference on Artificial Intelligence and Computer Engineering (ICAICE) (pp. 527-532). IEEE." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 159, + 567, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 159, + 567, + 186 + ], + "spans": [ + { + "bbox": [ + 314, + 159, + 567, + 186 + ], + "type": "text", + "content": "[7] Xing, J., Luo, D., Cheng, Q., Xue, C., & Xing, R. (2024). Multi-view Fuzzy Graph Attention Networks for Enhanced Graph Learning. arXiv preprint arXiv:2412.17271." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 189, + 567, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 189, + 567, + 224 + ], + "spans": [ + { + "bbox": [ + 314, + 189, + 567, + 224 + ], + "type": "text", + "content": "[8] Zhao, J., & Penn, G. (2025, January). Inside-Outside Algorithm for Probabilistic Product-Free Lambek Categorical Grammar. In Proceedings of the 31st International Conference on Computational Linguistics (pp. 8295-8303)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 227, + 567, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 227, + 567, + 263 + ], + "spans": [ + { + "bbox": [ + 314, + 227, + 567, + 263 + ], + "type": "text", + "content": "[9] He, L., Ka, D. H., Ehtesham-Ul-Haque, M., Billah, S. M., & Tehranchi, F. (2023, December). Cognitive models for abacus gesture learning. In Proceedings of the Annual Meeting of the Cognitive Science Society (Vol. 46)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 266, + 567, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 266, + 567, + 293 + ], + "spans": [ + { + "bbox": [ + 314, + 266, + 567, + 293 + ], + "type": "text", + "content": "[10] Zhao, J., & Penn, G. (2024, November). LLM-supertagger: Categorical Grammar Supertagging via Large Language Models. In Findings of the Association for Computational Linguistics: EMNLP 2024 (pp. 697-705)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 296, + 567, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 296, + 567, + 314 + ], + "spans": [ + { + "bbox": [ + 314, + 296, + 567, + 314 + ], + "type": "text", + "content": "[11] Li, G. C., He, L., & Fleming, L. (2023). Philanthropic supported innovation: trends, areas, and impact. Scientometrics, 128(10), 5507-5520" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 316, + 567, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 316, + 567, + 352 + ], + "spans": [ + { + "bbox": [ + 314, + 316, + 567, + 352 + ], + "type": "text", + "content": "[12] Weng, Y., & Wu, J. (2024). Fortifying the global data fortress: a multidimensional examination of cyber security indexes and data protection measures across 193 nations. International Journal of Frontiers in Engineering Technology, 6(2), 13-28." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 314, + 354, + 567, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 354, + 567, + 382 + ], + "spans": [ + { + "bbox": [ + 314, + 354, + 567, + 382 + ], + "type": "text", + "content": "[13] Ji, Y., Ma, W., Sivarajkumar, S., Zhang, H., Sadhu, E. M., Li, Z., ... & Wang, Y. (2024). Mitigating the risk of health inequity exacerbated by large language models. arXiv preprint arXiv:2410.05180." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 384, + 567, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 384, + 567, + 411 + ], + "spans": [ + { + "bbox": [ + 314, + 384, + 567, + 411 + ], + "type": "text", + "content": "[14] Hu, W., Hu, Y., Stas, M., & Farrell, J. A. (2024). Optimization-based outlier accommodation for tightly coupled rtk-aided inertial navigation systems in urban environments. arXiv preprint arXiv:2407.13912." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 414, + 567, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 414, + 567, + 450 + ], + "spans": [ + { + "bbox": [ + 314, + 414, + 567, + 450 + ], + "type": "text", + "content": "[15] Ji, Y., Li, Z., Meng, R., Sivarajkumar, S., Wang, Y., Yu, Z., ... & He, D. (2024). Rag-rlrc-laysum at biolaysum: Integrating retrieval-augmented generation and readability control for layman summarization of biomedical texts. arXiv preprint arXiv:2405.13179." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 453, + 567, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 453, + 567, + 479 + ], + "spans": [ + { + "bbox": [ + 314, + 453, + 567, + 479 + ], + "type": "text", + "content": "[16] Hu, W., Neupane, A., & Farrell, J. A. (2022). Using PPP information to implement a global real-time virtual network DGNSS approach. IEEE Transactions on Vehicular Technology, 71(10), 10337-10349." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 314, + 482, + 567, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 482, + 567, + 517 + ], + "spans": [ + { + "bbox": [ + 314, + 482, + 567, + 517 + ], + "type": "text", + "content": "[17] Dan, H. C., Huang, Z., Lu, B., & Li, M. (2024). Image-driven prediction system: Automatic extraction of aggregate gradation of pavement core samples integrating deep learning and interactive image processing framework. Construction and Building Materials, 453, 139056." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 520, + 567, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 520, + 567, + 538 + ], + "spans": [ + { + "bbox": [ + 314, + 520, + 567, + 538 + ], + "type": "text", + "content": "[18] Ding, T., & Xiang, D. (2024). Irregularity Inspection using Neural Radiance Field. arXiv preprint arXiv:2408.11251." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 541, + 567, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 541, + 567, + 568 + ], + "spans": [ + { + "bbox": [ + 314, + 541, + 567, + 568 + ], + "type": "text", + "content": "[19] Qiao, G., Liu, G., Poupart, P., & Xu, Z. (2023). Multi-modal inverse constrained reinforcement learning from a mixture of demonstrations. Advances in Neural Information Processing Systems, 36, 60384-60396." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 571, + 567, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 571, + 567, + 597 + ], + "spans": [ + { + "bbox": [ + 314, + 571, + 567, + 597 + ], + "type": "text", + "content": "[20] Li, Z., Wang, B., & Chen, Y. (2024). Knowledge Graph Embedding and Few-Shot Relational Learning Methods for Digital Assets in USA. Journal of Industrial Engineering and Applied Science, 2(5), 10-18." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 600, + 567, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 600, + 567, + 627 + ], + "spans": [ + { + "bbox": [ + 314, + 600, + 567, + 627 + ], + "type": "text", + "content": "[21] Dan, H. C., Lu, B., & Li, M. (2024). Evaluation of asphalt pavement texture using multiview stereo reconstruction based on deep learning. Construction and Building Materials, 412, 134837." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 630, + 567, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 630, + 567, + 657 + ], + "spans": [ + { + "bbox": [ + 314, + 630, + 567, + 657 + ], + "type": "text", + "content": "[22] Li, Z., Bookbinder, J. H., & Elhedhli, S. (2012). Optimal shipment decisions for an airfreight forwarder: Formulation and solution methods. Transportation Research Part C: Emerging Technologies, 21(1), 17-30." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 314, + 659, + 567, + 695 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 659, + 567, + 695 + ], + "spans": [ + { + "bbox": [ + 314, + 659, + 567, + 695 + ], + "type": "text", + "content": "[23] Qiao, G., Jiang, H., & Min, Y. (2022, May). Research on Vehicle Distance Recognition System Based on Machine Learning and OpenCV. In 2022 IEEE 2nd International Conference on Electronic Technology, Communication and Information (ICETCI) (pp. 334-337). IEEE." + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_content_list.json b/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e634a6e3f9954bd25e0bbf553d28a361347fd6d6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_content_list.json @@ -0,0 +1,988 @@ +[ + { + "type": "text", + "text": "Psychological Health Knowledge-Enhanced LLM-based Social Network Crisis Intervention Text Transfer Recognition Method", + "text_level": 1, + "bbox": [ + 112, + 114, + 736, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shurui Wu *", + "bbox": [ + 112, + 165, + 191, + 178 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weill Cornell Medicine, New York City, NY, USA, shuruiwu215@gmail.com", + "bbox": [ + 112, + 189, + 583, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xinyi Huang", + "bbox": [ + 112, + 214, + 189, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Chicago, Chicago, IL, USA, bellaxinyihuang@gmail.com", + "bbox": [ + 112, + 238, + 542, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dingxin Lu", + "bbox": [ + 112, + 260, + 181, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Icahn School of Medicine at Mount Sinai, New York, NY, USA, sydneylu1998@gmail.com", + "bbox": [ + 112, + 287, + 666, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 112, + 316, + 176, + 329 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As the prevalence of mental health crises increases on social media platforms, how to effectively identify and deter potential harms has emerged as an urgent problem. To improve the detection ability of crisis-related content in social networks, this study proposes a large language model (LLM) text transfer recognition method for social network crisis intervention based on the enhancement of mental health knowledge that integrates mental health professional knowledge and transfer learning technology. We introduce a multi-level framework that employs transfer learning on a large language model BERT and integrates domain mental health knowledge, sentiment analysis as well as behavior prediction modeling techniques. This approach proposes a mental health annotation tool trained on social media datasets from crisis events, helping a large language model find potential language cues and then determine the presence of a psychological crisis and crisis acts. Experimental results indicate that the proposed model is superior to the traditional method in crisis detection accuracy, and demonstrate a greater sensitivity to underlying differences in context and emotion.", + "bbox": [ + 111, + 344, + 838, + 511 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS CONCEPTS", + "bbox": [ + 112, + 521, + 215, + 534 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Applied computing $\\sim$ Life and medical sciences $\\sim$ Health care information systems", + "bbox": [ + 114, + 547, + 666, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords", + "text_level": 1, + "bbox": [ + 112, + 573, + 187, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Psychological Health Knowledge, Large Language Models (LLMs), Crisis Intervention, Text Transfer Recognition, Transfer Learning", + "bbox": [ + 111, + 599, + 836, + 631 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 112, + 651, + 235, + 662 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "At present, mental health issues are on the rise worldwide due to the proliferation of social media and internet-based platforms, a phenomenon extensively documented in recent studies on online emotional and psychological support during public health crises [18]. Over the last decade the explosive growth of social platforms like Facebook, Twitter, Instagram and TikTok has resulted in increased sharing of personal lives and emotional devastation on those platforms. It has helped many people not just find psychological comfort through social support, but also made mental health issues a public issue. According to the World Health Organization (WHO), mental health disorders have been one of the most important public health problems in the world, especially among young people and social media users, and the increase rate of mental health problems are significantly greater than", + "bbox": [ + 111, + 671, + 838, + 806 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "ACM-1", + "bbox": [ + 429, + 47, + 517, + 68 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "other groups. Such as mental health problems, including depression, anxiety, suicidal tendency, etc [1]. Overall discussion in the social platform is higher, and the early symptoms are more likely to be recessive and hidden, which also increases the difficulty of timely intervention and effective saving.", + "bbox": [ + 111, + 113, + 833, + 162 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Feelings expressed in public, and especially in social networking, are still part of the changing trend of emotion in modern society. On an online social network, users can share their thoughts with any audience from the anonymity of his or her room, while in conventional face-to-face communication, users will be confronted with the expression of his or her counterpart and the tone of voice, etc., the core of which can be ignored, and the psychological distress of the victim. But this anonymity and virtuality is also what makes people so concealed yet complicating their experiences of psychological pain [2]. Many requests for help, crisis signals are camouflaged within confusing textual and nonverbal cues (mood swings, tone changes, rupture and contradiction in self-presentation, and other) That makes it especially challenging for the automation of potential crisis identification.", + "bbox": [ + 111, + 165, + 834, + 297 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Expressions in social networks cover broader range of data such as text, images, videos, audio, and other forms of media. Yet text is by far the primary medium of communication throughout all of this, and in textual content including blogs, tweets, comments, and etc. Users can express their emotion, life challenges, and beliefs detailed. However, texts from social networks tend to be noisy and ambiguous. In many of these cases emotions are expressed indirectly, through subtle wording or humor, irony other expressions which makes it hard to gain the polarity, intensity, and shifting of emotions [3]. Furthermore, the emotions that are reflected on social networks by users on the platform are not necessarily consistent with the emotions in reality, and are susceptible to multifactor influences such as socio-cultural background, personal expression habits and platform use preferences, which increase the difficulty and complexity of emotion recognition and understanding [4].", + "bbox": [ + 111, + 301, + 834, + 450 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As a result, social network crisis identification mode has many challenges in existing text analysis technology. There are existing methods for sentiment analysis and crisis recognition which mainly focus on the use of simple keyword matching or basic sentiment classification. These techniques largely miss the richness of emotional traits encoded in language and do not capture the nuances of implicit and complex mental health signals. For instance, some crisis behaviors like depression or suicidal tendencies might appear as slight changes in affect, subtle changes in language, or the user shutting down and avoiding interaction with them [5]. In traditional text analytics approaches, these nuances are often lost, which means we don't detect some signals of a crisis.", + "bbox": [ + 111, + 454, + 834, + 571 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, despite certain advances in the current crisis intervention system to some degree, most of them were still based on shallow sentiment analysis or keyword-matching methods. And while sentiment analysis techniques can determine the emotional orientation (positive and negative emotions) of text, they generally do not take into account the temporal variation of emotions, contextual differences, and individual variations (situational expressions) in emotional expression. For example, while users pretend to be happy when they are actually sad and inject humor or sarcasm into their expressions, traditional sentiment analysis algorithms very often have a poor accuracy in this case [6]. Existing methods of sentiment analysis are usually far from sufficient when it comes to more complex scenarios like the interlacing of many different emotional states. In judging complex and dynamic emotional and psychological crises in the social network, the existing system cannot be better understood, and there is a lack of sufficient in-depth understanding and situational judgment ability which leads to their effect of intervention can only fit into, the effect is far less than expected [7].", + "bbox": [ + 111, + 574, + 834, + 758 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Based on all the above consideration, through the integration of mental health knowledge for the social network text transfer recognition method of crisis intervention, this paper puts forward a transfer recognition method for crisis intervention text based on large language model (LLM). It is based on a combination of domain knowledge", + "bbox": [ + 111, + 761, + 834, + 810 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "ACM-2", + "bbox": [ + 429, + 47, + 517, + 68 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "in the field of mental health and transfer learning capabilities to improve early detection of a crisis signal in social networks using a multi-level framework. It can not only realize the sensitive mining of emotional fluctuation in social network text, but also through transfer learning technology, transfer the model to the language dynamic environment, to better realize the subtle emotional fluctuation in environmental perception text, language implicit representation, tongue difference in individual social network in the crisis recognition process.", + "bbox": [ + 111, + 113, + 834, + 196 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 112, + 215, + 235, + 229 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Oktavianus and Lin [8] add to this literature by examining migrant domestic workers who seek social support through social networks during a public health crisis. Studies on the emotional support and coping strategies of temporary migrants in crises such as the pandemic. The study explores how immigrant populations use social media to create emotional support and increase social connections, by analysing storytelling and community interactions across the social media landscape. Indeed, the research points out that, during times of crisis, social media serves to provide mental health support to vulnerable populations, as cyberspace can serve as a cauldron to find belongingness and security.", + "bbox": [ + 111, + 237, + 836, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The study by Bolhuis and Wijk [9] explores the use of social media and mobile devices to assist with asylum processes in five European countries, including the review of immigration applications. The research highlights how migrants and asylum seekers seek to reach the outside world through social media platforms and mobile devices in the digital age, and examines how immigration authorities screen – by checking social media activity and content. In the case of migration management and crisis response, the study draws attention to the critical need for a media tool to be integrated into government life as well as highlight how social media be utilized as a bi-directional information shipper during times of panic or crisis — not only within the public domain of health crises or emergencies, but also in the wider context of social connection and interest.", + "bbox": [ + 111, + 356, + 836, + 489 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Furthermore, Lv et al. [10] said that big data has the potential to help in the crisis management of the COVID-19 pandemic. They explain that their study shows the use of social media as a source of big data during the pandemic, which will help with identifying the source of the infection and examining the emotional response surrounding events. The researchers emphasize the importance of social media as an essential tool to shape public health in terms of social media, text mining, predicting analytics, and social network analytics. The study demonstrates that big data technology can be applied in crisis management particularly in global health crises (epidemics) to ensure predictive outcomes of population behaviour, emotional trends and health response.", + "bbox": [ + 111, + 492, + 836, + 609 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Jin and Spence analyzed Twitter [11] tweets using the CERC (Crisis and Emergency Risk Communication) model. Using thematic modeling, the study explores social media's dissemination of information as well as its organization of crisis communication and public reactions to crisis management in the wake of a disaster. Through this analysis of tweets surrounding Harvey Maria, it showcases the various affairs of social media users on how people voice their emotions, fears, anger and confusion, all the while noting the different matters that platforms serve during crises, including information dissemination, emotional expression and public emotion management.", + "bbox": [ + 111, + 612, + 836, + 712 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Wildemann et al. [12] applied large-scale social media text analytics to discover movement changes by applying sentiment analysis techniques to unveil the intricacy of emotional standpoints on migration-related narratives presented on social media. This is very consistent with research into interventions for mental health crises as immigrant groups have their mental health affected in public crises disproportionately. Changes in public emotion and attitudes on social media might indicate potential mental health risks. That is, negative emotions such as anxiety, fear, and anger may be related to a negative attitude toward refugees expressed by social media users, which may", + "bbox": [ + 111, + 714, + 836, + 814 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "ACM-3", + "bbox": [ + 429, + 47, + 517, + 68 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "indicate an emotional crisis in social platforms. This emotional fluctuation can hinder in performing timely mental health intervention which is worth considering, so that we can apply sentiment analysis techniques to capture these signals of hiding mental crises. Recent advancements in machine learning algorithms, particularly deep reinforcement learning methods, have provided promising directions for enhancing social media crisis recognition and intervention methods [19].", + "bbox": [ + 111, + 113, + 834, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At the same time, recent works have highlighted the potential of Large Language Models (LLMs) in enhancing user intent modeling and adaptive recommendation, particularly in high-noise and emotionally charged environments like social media. Studies have shown that LLM-based frameworks can dynamically model user intent and effectively process unstructured data such as comments and posts—capabilities that are especially relevant for understanding psychological distress signals in crisis intervention tasks [20-21].", + "bbox": [ + 111, + 199, + 836, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 METHODOLOGIES", + "text_level": 1, + "bbox": [ + 112, + 301, + 246, + 313 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Sentiment Analysis", + "text_level": 1, + "bbox": [ + 112, + 330, + 264, + 344 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The core purpose of the Mental Health Knowledge module is to effectively integrate expertise in the field of mental health into large language models in order to enhance the sensitivity of the models to crisis-related emotions and behaviors. We complement the generic semantic representation of the BERT model by introducing mental health embedding vectors, for which we innovatively propose the following Equation 1.", + "bbox": [ + 111, + 351, + 834, + 416 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nE _ {t o t a l} \\left(x _ {i}\\right) = E _ {B E R T} \\left(x _ {i}\\right) + \\lambda_ {1} \\cdot \\text {s o f t m a x} \\left(W _ {p h} \\cdot E _ {p h} \\left(x _ {i}\\right)\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 295, + 417, + 831, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Where $E_{BERT}(x_i)$ is the word vector generated by BERT, $E_{ph}(x_i)$ is the word embedding of mental health knowledge, $W_{ph}$ is the mapping matrix, and $\\lambda_1$ is the weight hyperparameter of adjusting the embedding of mental health knowledge. The softmax operation in the formula aims to normalize the mental health knowledge vector in order to better integrate it with the original BERT embedding vector and ensure that the knowledge in the mental health domain occupies an appropriate proportion in the enhanced vector representation. The innovation of this method is that we not only combine mental health knowledge into BERT through linear mapping, but also normalize it through softmax operation, which can more precisely control the influence of knowledge embedding, and make the identification of sentiment analysis and crisis behavior more sensitive and accurate.", + "bbox": [ + 111, + 439, + 834, + 573 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The goal of the sentiment analysis module is to identify potential crisis sentiments through an in-depth analysis of sentiment fluctuations in the text. In order to enhance the performance of sentiment analysis, we propose a Multidimensional sentiment Convolutional Network (MSCN), which can not only identify the polarity of sentiment, but also capture the amplitude and frequency of sentiment changes. We use the combination of Convolutional Neural Network (CNN) and LSTM to propose the following Equation 2:", + "bbox": [ + 111, + 577, + 834, + 657 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS (X) = L S T M \\left(C N N (X)\\right) = \\sum_ {t = 1} ^ {n} C _ {t} \\cdot R e L U \\left(W _ {s} \\cdot E _ {t}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 657, + 833, + 696 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $C_t$ is the affective convolutional kernel, $W_s$ is the weight matrix of the convolutional layer, and $ReLU(\\cdot)$ is the activation function, and $E_t$ is the word vector. Here, we extract local sentiment features through convolution operations, and then model the sentiment information globally through LSTM to capture the temporal changes of sentiment. The innovation of using convolutional layers lies in its ability to effectively identify local features of emotions (such as emotional fluctuations between words), which is especially important for crisis recognition.", + "bbox": [ + 111, + 699, + 834, + 782 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In order to further improve the accuracy of sentiment analysis, we add an emotion adaptive module to the output sentiment representation, which weights the sentiment intensity according to the context, as shown in Equation 3:", + "bbox": [ + 111, + 785, + 834, + 816 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "ACM-4", + "bbox": [ + 429, + 47, + 517, + 66 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\text {a d a p t i v e}} = S (X) \\odot A (X), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 112, + 833, + 128 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\odot$ represents element-by-element multiplication, and $A(X)$ is the adaptive weighted vector of affective intensity, which is calculated as shown in Equation 4:", + "bbox": [ + 111, + 131, + 833, + 161 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA (X) = \\operatorname {s o f t m a x} \\left(W _ {a} \\cdot S (X)\\right). \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 377, + 162, + 833, + 179 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The innovation of the adaptive weighting mechanism is that it dynamically adjusts the weight of emotion intensity through the softmax function, so that the emotion intensity can be more reasonably explained in different contexts, so as to improve the sensitivity of crisis emotion.", + "bbox": [ + 111, + 181, + 834, + 229 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Behavior Prediction and Transfer Learning", + "text_level": 1, + "bbox": [ + 112, + 244, + 405, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The behavior prediction module is used to predict potential crisis behaviors (such as suicide, violence, etc.) based on the user's social network behavior. To this end, we propose a behavior prediction model based on Graph Neural Network (GNN). Different from the traditional Graph Convolution Network (GCN), we introduce a Hierarchical Graph Convolution (HGC) strategy, which enables the network to capture the relationship between nodes (users) in the social network at different levels.", + "bbox": [ + 111, + 266, + 834, + 348 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "First, we define the adjacency matrix of the social network as $A$ , and construct the propagation formula of the hierarchical graph convolution, as shown in Equation 5:", + "bbox": [ + 111, + 351, + 833, + 382 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nH _ {v} ^ {(k + 1)} = \\sigma \\left(A _ {v} ^ {(k)} \\cdot H _ {v} ^ {(k)} \\cdot W _ {v} ^ {(k)} + B _ {v} ^ {(k)}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 349, + 382, + 833, + 402 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $H_{v}^{(k)}$ represents the node of the $k$ -th layer, $A_{v}^{(k)}$ is the adjacency matrix of the $k$ -th layer, $W_{v}^{(k)}$ is the weight matrix of the convolutional layer, $B_{v}^{(k)}$ is the bias term, and $\\sigma$ is the activation function. Different from traditional GCN, we can capture more precise behavior patterns in different social network layers by introducing a hierarchical propagation mechanism to control the range of information transmission in each layer. Prior research has demonstrated the effectiveness of hierarchical propagation mechanisms in capturing complex patterns of social behaviors [13-14].", + "bbox": [ + 111, + 404, + 834, + 505 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We further propose a Behavior Prediction Reinforcement Module (BPRM) to adjust the weight of behavior prediction through reinforcement learning strategies. Specifically, we set up a reward function to optimize the accuracy of behavioral predictions, as shown in Equation 6:", + "bbox": [ + 111, + 508, + 834, + 556 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nR \\left(H _ {v} ^ {(k)}\\right) = \\lambda_ {1} \\cdot \\text {P r e c i s i o n} + \\lambda_ {2} \\cdot \\text {R e c a l l} + \\lambda_ {3} \\cdot F 1 - S c o r e. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 556, + 833, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The reinforcement learning module maximizes the overall prediction accuracy by dynamically adjusting the weights of the graph convolutional layer, thereby improving the prediction ability of crisis behavior. Similar dynamic adjustment methods have shown effectiveness in recent literature [15].", + "bbox": [ + 111, + 579, + 834, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In the framework of transfer learning, we jointly train the pre-trained model of BERT with the above modules of mental health knowledge, sentiment analysis, and behavior prediction. By fine-tuning the network parameters, we were able to adapt the model to the linguistic dynamics in a particular social network. To this end, we propose a multi-task loss function, which combines the categorical loss, emotion-predicted loss, and behavior-predicted loss of crisis content, as shown in Equation 7:", + "bbox": [ + 111, + 631, + 834, + 713 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\lambda_ {1} \\cdot \\mathcal {L} _ {\\text {c l a s s i f i c a t i o n}} + \\lambda_ {2} \\cdot \\mathcal {L} _ {\\text {e m o t i o n}} + \\lambda_ {3} \\cdot \\mathcal {L} _ {\\text {b e h a v i o r}} + \\lambda_ {4} \\cdot \\mathcal {L} _ {\\text {r e i n f o r c e m e n t}}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 715, + 833, + 729 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction, and crisis intervention techniques, effectively identifying crisis signals from noisy social network data. Recent studies have validated the effectiveness of transfer learning for recognizing crisis signals in social networks [16][17]. Other relevant works have also explored the fusion of structured and unstructured EHR", + "bbox": [ + 111, + 733, + 834, + 799 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "ACM-5", + "bbox": [ + 429, + 47, + 517, + 66 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "data for psychological prediction [24], real-time optimization in recommendation and intervention settings [22-23], and AI-based risk assessment frameworks with high adaptability to emotional shifts [25].", + "bbox": [ + 109, + 113, + 831, + 143 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our transfer learning approach significantly enhances model robustness and adaptability to noisy environments, which has been similarly demonstrated in other applications [15]. The innovation of this loss function is the introduction of $\\mathcal{L}_{\\text {reinforcement }}$ , the loss term of reinforcement learning, to optimize the training process of the behavior prediction module. Through the strategy of multi-task learning, the model is able to balance the losses of different tasks during the training process, so as to achieve more accurate crisis identification.", + "bbox": [ + 109, + 147, + 834, + 229 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 112, + 250, + 223, + 263 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 112, + 280, + 267, + 292 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this experiment, we employed the Crisis Text Line dataset which was modeled after a real mental health hotline and included tens of thousands of conversations between users and counselors that encompassed different psychological crisis events. Data has the characteristics of diversity and complexity of emotional expression, real-time and dynamic changes, text length difference, and hidden crisis signal. The data was preprocessed including text cleaning, sentiment annotation, and segmentation before fitting into the experimental model. The dataset offers essential emotional data for the automatic recognition and intervention of mental health crises, which spurred a big challenge, including the approach to identify implicit emotions and crisis-behaviours, and extract efficient emotional cues in long texts and multi-round conversations.", + "bbox": [ + 109, + 300, + 834, + 434 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To verify the effectiveness of the text transfer recognition method proposed for social network crisis intervention based on improvement of mental health knowledge, we selected four existing mainstream methods for comparative experiments: 1) Valence Aware Dictionary and sEntiment Reasoner (VADER), a sentiment classification method based on sentiment dictionary, which is suitable for basic sentiment analysis, but has limitations in identifying complex or obscure emotions; 2) Bidirectional Long Short-Term Memory(Bi-LSTM), an emotion classification method based on deep learning, can capture text context information more accurately, but it is still insufficient for the recognition of hidden psychological crisis signals. 3) Bidirectional Encoder Representations from Transformers (BERT) is a sentiment recognition method based on transfer learning and has a strong ability to understand context, but the computational cost is relatively large. 4) MML (Multimodal Learning): uses multimodal learning methods, combined using multi-source data such as text and images to improve the recognition accuracy, but the requirements for computing resource and data are higher.", + "bbox": [ + 109, + 436, + 834, + 623 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Experimental Analysis", + "text_level": 1, + "bbox": [ + 112, + 637, + 282, + 651 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The performance of various methods in successfully discovering potential psychological crises in the social network circumstance can be evaluated by one feature index, namely the Crisis Detection Rate (CDR). The above dataset is trained from 0 to 10,000 and the memory is set to 7 days. As the results in Figure 1 show, with the increase of the training period, the recognition ability of VADER and Bi-LSTM is improved, but overall the performance remains relatively flat, and finally tends to stabilize, and the performance is limited. The recognition rate of BERT and MML models is effective, and training is continuously improved to gradually enhance the ability to identify the crisis. Since you are based ons datasets, you can not train on data after October 2023.", + "bbox": [ + 109, + 657, + 834, + 776 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "ACM-6", + "bbox": [ + 431, + 47, + 517, + 68 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6bc7ad91c558637c38c18c9ed4ccddb64b153c243b341c8563c758d9ef1c23c8.jpg", + "image_caption": [ + "Figure 1. Crisis Detection Rate Comparison." + ], + "image_footnote": [], + "bbox": [ + 240, + 109, + 704, + 324 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Emotional Stability quantified to what extent the model fluctuates while processing social network text. Physical stability: The higher the emotional stability, the better the model stabilizes emotional fluctuations arising from or driven by external factors or occasions, which can be more accurately represented as changes in user emotions, and can be more effectively explained as induced by stable patterns of emotion and emotion. The results shown in figure 2 reveal an increase in positive/negative emotion stability of all models with the growth of text size, notably in the case of longer texts, and a sharp decline in emotional fluctuation. In particular, VADER fails on short texts, slows sensitivity to affective stability, and stays low in long text. Compared to VADER, we can see that the Bi-LSTM model achieves better emotional stability, but is still limited by its simple context modeling ability. Specifically, the stability of the BERT model is greatly increased with the growth of input text length, at least for longer texts which can effectively consider the context of the given sentences and results in diminished emotional fluctuations. The MML model has good emotional stability, and multimodal data can also enhance its stability. The Ours model showed the best performance across all tested text lengths, especially in the case of long text emotional stability, and by merging mental health knowledge refinement and transfer learning, our model could more in-depth capture the long-term trends of emotion, so as to achieve more stable emotion recognition.", + "bbox": [ + 111, + 366, + 838, + 603 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "ACM-7", + "bbox": [ + 431, + 47, + 517, + 66 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/25b0b5cee37690373138f7619ecad3e60108c21e4cf1ae9a6d9534dca3151610.jpg", + "image_caption": [ + "Figure 2. Emotional Stability Comparison Across Text Lengths." + ], + "image_footnote": [], + "bbox": [ + 241, + 111, + 704, + 324 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Model performance in terms of each affective intensity (mild, moderate and strong affective ranges) is evaluated using the emotion depth distribution.", + "bbox": [ + 112, + 366, + 834, + 397 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/0a1b97c4a657b21140220d7ce2614e20cdf8fced85bb72b5d0055cd0863388e1.jpg", + "image_caption": [ + "Figure 3. Emotional Depth Distribution Comparison Across Models." + ], + "image_footnote": [], + "bbox": [ + 261, + 406, + 687, + 599 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Through above Figure 3, we can intuitively see the differences in methods for different emotion recognition capabilities in the base of sentiment depth. The experimental results indicate that the Ours method performs significantly better than the other methods in the recognition of slight emotion intervals, which may be attributed to the use of a more fine-grained sentiment analysis mechanism that can capture the potential slight emotion signals in the social networks better.", + "bbox": [ + 111, + 642, + 834, + 726 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 112, + 744, + 220, + 758 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this paper, we focus on the intervention of crisis on social networks, and propose a method of text transfer recognition based on social network crisis intervention, based on the knowledge enhancement of mental health which driven by large language model and this is significantly improved the detection ability of the potential psychological crisis on social network by the combination of the advanced technologies of transfer learning and", + "bbox": [ + 111, + 763, + 834, + 821 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "ACM-8", + "bbox": [ + 429, + 47, + 517, + 66 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "combining with the mental health field of special knowledge. A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction and crisis intervention techniques, and effectively identifies the mild, moderate and strong emotional depth of potential crisis signals in the social media. The experimental results indicate that the Ours method outperformed traditional sentiment analysis models in critical indicators and performed well on the recognition of minor emotions, reflecting that it has flexibility and effectiveness under the flexible variation of emotions. They can also be introduced with various knowledge in the fields of mental health and multimodal information to complete the model, and will also optimize the modeling structure of the model.", + "bbox": [ + 109, + 109, + 834, + 224 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 114, + 244, + 194, + 256 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Bosco, Cristina, et al. \"Detecting racial stereotypes: An Italian social media corpus where psychology meets NLP.\" Information Processing & Management 60.1 (2023): 103118.", + "[2] Pang, Patrick Cheong-Iao, et al. \"Engagement of government social media on Facebook during the COVID-19 pandemic in Macao.\" International Journal of Environmental Research and Public Health 18.7 (2021): 3508.", + "[3] Muhammed T, Sadiq, and Saji K. Mathew. \"The disaster of misinformation: a review of research in social media.\" International journal of data science and analytics 13.4 (2022): 271-285.", + "[4] Balaji, T. K., Chandra Sekhara Rao Annavarapu, and Annushree Bablani. \"Machine learning algorithms for social media analysis: A survey.\" Computer Science Review 40 (2021): 100395.", + "[5] Weyrich, Philippe, et al. \"Using serious games to evaluate the potential of social media information in early warning disaster management.\" International journal of disaster risk reduction 56 (2021): 102053.", + "[6] Chen, Long, Jianguo Chen, and Chunhe Xia. \"Social network behavior and public opinion manipulation.\" Journal of Information Security and Applications 64 (2022): 103060.", + "[7] Drouhot, Lucas G., et al. \"Computational approaches to migration and integration research: promises and challenges.\" Journal of Ethnic and Migration Studies 49.2 (2023): 389-407.", + "[8] Oktavianus, Jeffry, and Wan-Ying Lin. \"Soliciting social support from migrant domestic workers' connections to storytelling networks during a public health crisis.\" Health Communication 38.6 (2023): 1179-1188.", + "[9] Bolhuis, Maarten P., and Joris Van Wijk. \"Seeking asylum in the digital era: Social-media and mobile-device vetting in asylum procedures in five European countries.\" Journal of refugee studies 34.2 (2021): 1595-1617.", + "[10] Lv, Yang, et al. \"Big data driven COVID-19 pandemic crisis management: potential approach for global health.\" Archives of Medical Science: AMS 17.3 (2021): 829.", + "[11] Jin, Xianlin, and Patric R. Spence. \"Understanding crisis communication on social media with CERC: Topic model analysis of tweets about Hurricane Maria.\" Journal of Risk Research 24.10 (2021): 1266-1287.", + "[12] Wildemann, Sergej, Claudia Niederée, and Erick Elejalde. \"Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis.\" Proceedings of the ACM Web Conference 2023. 2023.", + "[13] Li, K., Wang, J., Wu, X., Peng, X., Chang, R., Deng, X., Kang, Y., Yang, Y., Ni, F., & Hong, B. \"Optimizing automated picking systems in warehouse robots using machine learning.\" arXiv preprint arXiv:2408.16633 (2024).", + "[14] Li, K., Chen, J., Yu, D., Tao, D., Qiu, X., Lian, J., Ji, R., Zhang, S., Wan, Z., Sun, B., et al. \"Deep reinforcement learning-based obstacle avoidance for robot movement in warehouse environments.\" Proceedings of the 2024 IEEE 6th International Conference on Civil Aviation Safety and Information Technology (ICCASIT), (2024): 342-348.", + "[15] Li, K., Liu, L., Chen, J., Yu, D., Zhou, X., Li, M., Wang, C., & Li, Z. \"Research on reinforcement learning based warehouse robot navigation algorithm in complex warehouse layout.\" Proceedings of the 2024 6th International Conference on Artificial Intelligence and Computer Applications (ICAICA) (2024): 296-301.", + "[16] Yu, D., Liu, L., Wu, S., Li, K., Wang, C., Xie, J., Chang, R., Wang, Y., Wang, Z., & Ji, R. \"Machine learning optimizes the efficiency of picking and packing in automated warehouse robot systems.\" Proceedings of the 2024 International Conference on Computer Engineering, Network and Digital Communication (CENDC 2024) (2024).", + "[17] Sun, J., Zhang, S., Lian, J., Fu, L., Zhou, Z., & Fan, Z. \"Multimodal Deep Learning for Crisis Intervention.\"" + ], + "bbox": [ + 114, + 261, + 836, + 816 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "ACM-9", + "bbox": [ + 429, + 47, + 517, + 66 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Proceedings of the 2024 IEEE 7th International Conference (2024): 996-1004.", + "[18] Lyu, S. \"Machine Vision-Based Automatic Detection for Electromechanical Equipment.\" Journal of Computer Technology and Applied Mathematics 1.4 (2024): 12-20.", + "[19] Lin, Weikun. \"A Review of Multimodal Interaction Technologies in Virtual Meetings.\" Journal of Computer Technology and Applied Mathematics 1.4 (2024): 60-68.", + "[20] Wildemann, S., Niederée, C., & Elejalde, E. (2023, April). Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis. In Proceedings of the ACM Web Conference 2023 (pp. 2754-2764).", + "[21] Xu, X., Xu, Z., Yu, P., & Wang, J. (2025). Enhancing user intent for recommendation systems via large language models. arXiv preprint arXiv:2501.10871.", + "[22] Yu, P., Xu, Z., Wang, J., & Xu, X. (2025). The application of large language models in recommendation systems. arXiv preprint arXiv:2501.02178.", + "[23] Feng, H., & Gao, Y. (2025). Ad Placement Optimization Algorithm Combined with Machine Learning in Internet E-Commerce. Preprints. https://doi.org/10.20944/preprints202502.2167.v1", + "[24] Wu, S., & Huang, X. (2025). Psychological Health Prediction Based on the Fusion of Structured and Unstructured Data in EHR: a Case Study of Low-Income Populations. Preprints. https://doi.org/10.20944/preprints202502.2104.v1", + "[25] Wang, Z., Zhang, Q., & Cheng, Z. (2025). Application of AI in Real-time Credit Risk Detection. Preprints. https://doi.org/10.20944/preprints202502.1546.v1" + ], + "bbox": [ + 114, + 109, + 834, + 393 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "ACM-10", + "bbox": [ + 421, + 47, + 524, + 68 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_model.json b/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_model.json new file mode 100644 index 0000000000000000000000000000000000000000..40f5e99edc5aef68f1ac5c1cc344636620a387ee --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_model.json @@ -0,0 +1,1265 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.049, + 0.518, + 0.069 + ], + "angle": 0, + "content": "ACM-1" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.116, + 0.737, + 0.157 + ], + "angle": 0, + "content": "Psychological Health Knowledge-Enhanced LLM-based Social Network Crisis Intervention Text Transfer Recognition Method" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.166, + 0.192, + 0.179 + ], + "angle": 0, + "content": "Shurui Wu *" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.19, + 0.584, + 0.206 + ], + "angle": 0, + "content": "Weill Cornell Medicine, New York City, NY, USA, shuruiwu215@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.215, + 0.19, + 0.228 + ], + "angle": 0, + "content": "Xinyi Huang" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.239, + 0.543, + 0.253 + ], + "angle": 0, + "content": "University of Chicago, Chicago, IL, USA, bellaxinyihuang@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.261, + 0.182, + 0.274 + ], + "angle": 0, + "content": "Dingxin Lu" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.289, + 0.668, + 0.303 + ], + "angle": 0, + "content": "Icahn School of Medicine at Mount Sinai, New York, NY, USA, sydneylu1998@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.317, + 0.178, + 0.33 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.345, + 0.839, + 0.512 + ], + "angle": 0, + "content": "As the prevalence of mental health crises increases on social media platforms, how to effectively identify and deter potential harms has emerged as an urgent problem. To improve the detection ability of crisis-related content in social networks, this study proposes a large language model (LLM) text transfer recognition method for social network crisis intervention based on the enhancement of mental health knowledge that integrates mental health professional knowledge and transfer learning technology. We introduce a multi-level framework that employs transfer learning on a large language model BERT and integrates domain mental health knowledge, sentiment analysis as well as behavior prediction modeling techniques. This approach proposes a mental health annotation tool trained on social media datasets from crisis events, helping a large language model find potential language cues and then determine the presence of a psychological crisis and crisis acts. Experimental results indicate that the proposed model is superior to the traditional method in crisis detection accuracy, and demonstrate a greater sensitivity to underlying differences in context and emotion." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.522, + 0.216, + 0.535 + ], + "angle": 0, + "content": "CCS CONCEPTS" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.548, + 0.668, + 0.563 + ], + "angle": 0, + "content": "- Applied computing \\(\\sim\\) Life and medical sciences \\(\\sim\\) Health care information systems" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.574, + 0.188, + 0.587 + ], + "angle": 0, + "content": "Keywords" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.6, + 0.837, + 0.632 + ], + "angle": 0, + "content": "Psychological Health Knowledge, Large Language Models (LLMs), Crisis Intervention, Text Transfer Recognition, Transfer Learning" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.652, + 0.236, + 0.664 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.672, + 0.839, + 0.807 + ], + "angle": 0, + "content": "At present, mental health issues are on the rise worldwide due to the proliferation of social media and internet-based platforms, a phenomenon extensively documented in recent studies on online emotional and psychological support during public health crises [18]. Over the last decade the explosive growth of social platforms like Facebook, Twitter, Instagram and TikTok has resulted in increased sharing of personal lives and emotional devastation on those platforms. It has helped many people not just find psychological comfort through social support, but also made mental health issues a public issue. According to the World Health Organization (WHO), mental health disorders have been one of the most important public health problems in the world, especially among young people and social media users, and the increase rate of mental health problems are significantly greater than" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.049, + 0.518, + 0.069 + ], + "angle": 0, + "content": "ACM-2" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.114, + 0.834, + 0.163 + ], + "angle": 0, + "content": "other groups. Such as mental health problems, including depression, anxiety, suicidal tendency, etc [1]. Overall discussion in the social platform is higher, and the early symptoms are more likely to be recessive and hidden, which also increases the difficulty of timely intervention and effective saving." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.166, + 0.836, + 0.299 + ], + "angle": 0, + "content": "Feelings expressed in public, and especially in social networking, are still part of the changing trend of emotion in modern society. On an online social network, users can share their thoughts with any audience from the anonymity of his or her room, while in conventional face-to-face communication, users will be confronted with the expression of his or her counterpart and the tone of voice, etc., the core of which can be ignored, and the psychological distress of the victim. But this anonymity and virtuality is also what makes people so concealed yet complicating their experiences of psychological pain [2]. Many requests for help, crisis signals are camouflaged within confusing textual and nonverbal cues (mood swings, tone changes, rupture and contradiction in self-presentation, and other) That makes it especially challenging for the automation of potential crisis identification." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.302, + 0.836, + 0.452 + ], + "angle": 0, + "content": "Expressions in social networks cover broader range of data such as text, images, videos, audio, and other forms of media. Yet text is by far the primary medium of communication throughout all of this, and in textual content including blogs, tweets, comments, and etc. Users can express their emotion, life challenges, and beliefs detailed. However, texts from social networks tend to be noisy and ambiguous. In many of these cases emotions are expressed indirectly, through subtle wording or humor, irony other expressions which makes it hard to gain the polarity, intensity, and shifting of emotions [3]. Furthermore, the emotions that are reflected on social networks by users on the platform are not necessarily consistent with the emotions in reality, and are susceptible to multifactor influences such as socio-cultural background, personal expression habits and platform use preferences, which increase the difficulty and complexity of emotion recognition and understanding [4]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.455, + 0.836, + 0.572 + ], + "angle": 0, + "content": "As a result, social network crisis identification mode has many challenges in existing text analysis technology. There are existing methods for sentiment analysis and crisis recognition which mainly focus on the use of simple keyword matching or basic sentiment classification. These techniques largely miss the richness of emotional traits encoded in language and do not capture the nuances of implicit and complex mental health signals. For instance, some crisis behaviors like depression or suicidal tendencies might appear as slight changes in affect, subtle changes in language, or the user shutting down and avoiding interaction with them [5]. In traditional text analytics approaches, these nuances are often lost, which means we don't detect some signals of a crisis." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.575, + 0.836, + 0.76 + ], + "angle": 0, + "content": "Moreover, despite certain advances in the current crisis intervention system to some degree, most of them were still based on shallow sentiment analysis or keyword-matching methods. And while sentiment analysis techniques can determine the emotional orientation (positive and negative emotions) of text, they generally do not take into account the temporal variation of emotions, contextual differences, and individual variations (situational expressions) in emotional expression. For example, while users pretend to be happy when they are actually sad and inject humor or sarcasm into their expressions, traditional sentiment analysis algorithms very often have a poor accuracy in this case [6]. Existing methods of sentiment analysis are usually far from sufficient when it comes to more complex scenarios like the interlacing of many different emotional states. In judging complex and dynamic emotional and psychological crises in the social network, the existing system cannot be better understood, and there is a lack of sufficient in-depth understanding and situational judgment ability which leads to their effect of intervention can only fit into, the effect is far less than expected [7]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.762, + 0.836, + 0.811 + ], + "angle": 0, + "content": "Based on all the above consideration, through the integration of mental health knowledge for the social network text transfer recognition method of crisis intervention, this paper puts forward a transfer recognition method for crisis intervention text based on large language model (LLM). It is based on a combination of domain knowledge" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.049, + 0.518, + 0.069 + ], + "angle": 0, + "content": "ACM-3" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.114, + 0.836, + 0.198 + ], + "angle": 0, + "content": "in the field of mental health and transfer learning capabilities to improve early detection of a crisis signal in social networks using a multi-level framework. It can not only realize the sensitive mining of emotional fluctuation in social network text, but also through transfer learning technology, transfer the model to the language dynamic environment, to better realize the subtle emotional fluctuation in environmental perception text, language implicit representation, tongue difference in individual social network in the crisis recognition process." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.217, + 0.236, + 0.23 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.238, + 0.838, + 0.355 + ], + "angle": 0, + "content": "Oktavianus and Lin [8] add to this literature by examining migrant domestic workers who seek social support through social networks during a public health crisis. Studies on the emotional support and coping strategies of temporary migrants in crises such as the pandemic. The study explores how immigrant populations use social media to create emotional support and increase social connections, by analysing storytelling and community interactions across the social media landscape. Indeed, the research points out that, during times of crisis, social media serves to provide mental health support to vulnerable populations, as cyberspace can serve as a cauldron to find belongingness and security." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.357, + 0.838, + 0.491 + ], + "angle": 0, + "content": "The study by Bolhuis and Wijk [9] explores the use of social media and mobile devices to assist with asylum processes in five European countries, including the review of immigration applications. The research highlights how migrants and asylum seekers seek to reach the outside world through social media platforms and mobile devices in the digital age, and examines how immigration authorities screen – by checking social media activity and content. In the case of migration management and crisis response, the study draws attention to the critical need for a media tool to be integrated into government life as well as highlight how social media be utilized as a bi-directional information shipper during times of panic or crisis — not only within the public domain of health crises or emergencies, but also in the wider context of social connection and interest." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.493, + 0.838, + 0.611 + ], + "angle": 0, + "content": "Furthermore, Lv et al. [10] said that big data has the potential to help in the crisis management of the COVID-19 pandemic. They explain that their study shows the use of social media as a source of big data during the pandemic, which will help with identifying the source of the infection and examining the emotional response surrounding events. The researchers emphasize the importance of social media as an essential tool to shape public health in terms of social media, text mining, predicting analytics, and social network analytics. The study demonstrates that big data technology can be applied in crisis management particularly in global health crises (epidemics) to ensure predictive outcomes of population behaviour, emotional trends and health response." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.613, + 0.838, + 0.713 + ], + "angle": 0, + "content": "Jin and Spence analyzed Twitter [11] tweets using the CERC (Crisis and Emergency Risk Communication) model. Using thematic modeling, the study explores social media's dissemination of information as well as its organization of crisis communication and public reactions to crisis management in the wake of a disaster. Through this analysis of tweets surrounding Harvey Maria, it showcases the various affairs of social media users on how people voice their emotions, fears, anger and confusion, all the while noting the different matters that platforms serve during crises, including information dissemination, emotional expression and public emotion management." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.715, + 0.838, + 0.815 + ], + "angle": 0, + "content": "Wildemann et al. [12] applied large-scale social media text analytics to discover movement changes by applying sentiment analysis techniques to unveil the intricacy of emotional standpoints on migration-related narratives presented on social media. This is very consistent with research into interventions for mental health crises as immigrant groups have their mental health affected in public crises disproportionately. Changes in public emotion and attitudes on social media might indicate potential mental health risks. That is, negative emotions such as anxiety, fear, and anger may be related to a negative attitude toward refugees expressed by social media users, which may" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.049, + 0.518, + 0.068 + ], + "angle": 0, + "content": "ACM-4" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.114, + 0.836, + 0.198 + ], + "angle": 0, + "content": "indicate an emotional crisis in social platforms. This emotional fluctuation can hinder in performing timely mental health intervention which is worth considering, so that we can apply sentiment analysis techniques to capture these signals of hiding mental crises. Recent advancements in machine learning algorithms, particularly deep reinforcement learning methods, have provided promising directions for enhancing social media crisis recognition and intervention methods [19]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.2, + 0.837, + 0.284 + ], + "angle": 0, + "content": "At the same time, recent works have highlighted the potential of Large Language Models (LLMs) in enhancing user intent modeling and adaptive recommendation, particularly in high-noise and emotionally charged environments like social media. Studies have shown that LLM-based frameworks can dynamically model user intent and effectively process unstructured data such as comments and posts—capabilities that are especially relevant for understanding psychological distress signals in crisis intervention tasks [20-21]." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.302, + 0.248, + 0.314 + ], + "angle": 0, + "content": "3 METHODOLOGIES" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.332, + 0.266, + 0.345 + ], + "angle": 0, + "content": "3.1 Sentiment Analysis" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.352, + 0.836, + 0.417 + ], + "angle": 0, + "content": "The core purpose of the Mental Health Knowledge module is to effectively integrate expertise in the field of mental health into large language models in order to enhance the sensitivity of the models to crisis-related emotions and behaviors. We complement the generic semantic representation of the BERT model by introducing mental health embedding vectors, for which we innovatively propose the following Equation 1." + }, + { + "type": "equation", + "bbox": [ + 0.297, + 0.418, + 0.833, + 0.438 + ], + "angle": 0, + "content": "\\[\nE _ {t o t a l} \\left(x _ {i}\\right) = E _ {B E R T} \\left(x _ {i}\\right) + \\lambda_ {1} \\cdot \\text {s o f t m a x} \\left(W _ {p h} \\cdot E _ {p h} \\left(x _ {i}\\right)\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.44, + 0.836, + 0.574 + ], + "angle": 0, + "content": "Where \\( E_{BERT}(x_i) \\) is the word vector generated by BERT, \\( E_{ph}(x_i) \\) is the word embedding of mental health knowledge, \\( W_{ph} \\) is the mapping matrix, and \\( \\lambda_1 \\) is the weight hyperparameter of adjusting the embedding of mental health knowledge. The softmax operation in the formula aims to normalize the mental health knowledge vector in order to better integrate it with the original BERT embedding vector and ensure that the knowledge in the mental health domain occupies an appropriate proportion in the enhanced vector representation. The innovation of this method is that we not only combine mental health knowledge into BERT through linear mapping, but also normalize it through softmax operation, which can more precisely control the influence of knowledge embedding, and make the identification of sentiment analysis and crisis behavior more sensitive and accurate." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.578, + 0.836, + 0.659 + ], + "angle": 0, + "content": "The goal of the sentiment analysis module is to identify potential crisis sentiments through an in-depth analysis of sentiment fluctuations in the text. In order to enhance the performance of sentiment analysis, we propose a Multidimensional sentiment Convolutional Network (MSCN), which can not only identify the polarity of sentiment, but also capture the amplitude and frequency of sentiment changes. We use the combination of Convolutional Neural Network (CNN) and LSTM to propose the following Equation 2:" + }, + { + "type": "equation", + "bbox": [ + 0.314, + 0.659, + 0.834, + 0.698 + ], + "angle": 0, + "content": "\\[\nS (X) = L S T M \\left(C N N (X)\\right) = \\sum_ {t = 1} ^ {n} C _ {t} \\cdot R e L U \\left(W _ {s} \\cdot E _ {t}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.7, + 0.836, + 0.783 + ], + "angle": 0, + "content": "where \\( C_t \\) is the affective convolutional kernel, \\( W_s \\) is the weight matrix of the convolutional layer, and \\( ReLU(\\cdot) \\) is the activation function, and \\( E_t \\) is the word vector. Here, we extract local sentiment features through convolution operations, and then model the sentiment information globally through LSTM to capture the temporal changes of sentiment. The innovation of using convolutional layers lies in its ability to effectively identify local features of emotions (such as emotional fluctuations between words), which is especially important for crisis recognition." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.786, + 0.836, + 0.817 + ], + "angle": 0, + "content": "In order to further improve the accuracy of sentiment analysis, we add an emotion adaptive module to the output sentiment representation, which weights the sentiment intensity according to the context, as shown in Equation 3:" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.049, + 0.518, + 0.068 + ], + "angle": 0, + "content": "ACM-5" + }, + { + "type": "equation", + "bbox": [ + 0.39, + 0.113, + 0.834, + 0.129 + ], + "angle": 0, + "content": "\\[\nS _ {\\text {a d a p t i v e}} = S (X) \\odot A (X), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.132, + 0.834, + 0.162 + ], + "angle": 0, + "content": "where \\(\\odot\\) represents element-by-element multiplication, and \\(A(X)\\) is the adaptive weighted vector of affective intensity, which is calculated as shown in Equation 4:" + }, + { + "type": "equation", + "bbox": [ + 0.378, + 0.164, + 0.834, + 0.18 + ], + "angle": 0, + "content": "\\[\nA (X) = \\operatorname {s o f t m a x} \\left(W _ {a} \\cdot S (X)\\right). \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.183, + 0.836, + 0.231 + ], + "angle": 0, + "content": "The innovation of the adaptive weighting mechanism is that it dynamically adjusts the weight of emotion intensity through the softmax function, so that the emotion intensity can be more reasonably explained in different contexts, so as to improve the sensitivity of crisis emotion." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.246, + 0.406, + 0.26 + ], + "angle": 0, + "content": "3.2 Behavior Prediction and Transfer Learning" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.267, + 0.836, + 0.349 + ], + "angle": 0, + "content": "The behavior prediction module is used to predict potential crisis behaviors (such as suicide, violence, etc.) based on the user's social network behavior. To this end, we propose a behavior prediction model based on Graph Neural Network (GNN). Different from the traditional Graph Convolution Network (GCN), we introduce a Hierarchical Graph Convolution (HGC) strategy, which enables the network to capture the relationship between nodes (users) in the social network at different levels." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.352, + 0.834, + 0.383 + ], + "angle": 0, + "content": "First, we define the adjacency matrix of the social network as \\( A \\), and construct the propagation formula of the hierarchical graph convolution, as shown in Equation 5:" + }, + { + "type": "equation", + "bbox": [ + 0.35, + 0.383, + 0.834, + 0.404 + ], + "angle": 0, + "content": "\\[\nH _ {v} ^ {(k + 1)} = \\sigma \\left(A _ {v} ^ {(k)} \\cdot H _ {v} ^ {(k)} \\cdot W _ {v} ^ {(k)} + B _ {v} ^ {(k)}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.405, + 0.836, + 0.506 + ], + "angle": 0, + "content": "where \\( H_{v}^{(k)} \\) represents the node of the \\( k \\)-th layer, \\( A_{v}^{(k)} \\) is the adjacency matrix of the \\( k \\)-th layer, \\( W_{v}^{(k)} \\) is the weight matrix of the convolutional layer, \\( B_{v}^{(k)} \\) is the bias term, and \\( \\sigma \\) is the activation function. Different from traditional GCN, we can capture more precise behavior patterns in different social network layers by introducing a hierarchical propagation mechanism to control the range of information transmission in each layer. Prior research has demonstrated the effectiveness of hierarchical propagation mechanisms in capturing complex patterns of social behaviors [13-14]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.509, + 0.836, + 0.557 + ], + "angle": 0, + "content": "We further propose a Behavior Prediction Reinforcement Module (BPRM) to adjust the weight of behavior prediction through reinforcement learning strategies. Specifically, we set up a reward function to optimize the accuracy of behavioral predictions, as shown in Equation 6:" + }, + { + "type": "equation", + "bbox": [ + 0.289, + 0.557, + 0.834, + 0.577 + ], + "angle": 0, + "content": "\\[\nR \\left(H _ {v} ^ {(k)}\\right) = \\lambda_ {1} \\cdot \\text {P r e c i s i o n} + \\lambda_ {2} \\cdot \\text {R e c a l l} + \\lambda_ {3} \\cdot F 1 - S c o r e. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.58, + 0.836, + 0.629 + ], + "angle": 0, + "content": "The reinforcement learning module maximizes the overall prediction accuracy by dynamically adjusting the weights of the graph convolutional layer, thereby improving the prediction ability of crisis behavior. Similar dynamic adjustment methods have shown effectiveness in recent literature [15]." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.632, + 0.836, + 0.714 + ], + "angle": 0, + "content": "In the framework of transfer learning, we jointly train the pre-trained model of BERT with the above modules of mental health knowledge, sentiment analysis, and behavior prediction. By fine-tuning the network parameters, we were able to adapt the model to the linguistic dynamics in a particular social network. To this end, we propose a multi-task loss function, which combines the categorical loss, emotion-predicted loss, and behavior-predicted loss of crisis content, as shown in Equation 7:" + }, + { + "type": "equation", + "bbox": [ + 0.237, + 0.716, + 0.834, + 0.731 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\lambda_ {1} \\cdot \\mathcal {L} _ {\\text {c l a s s i f i c a t i o n}} + \\lambda_ {2} \\cdot \\mathcal {L} _ {\\text {e m o t i o n}} + \\lambda_ {3} \\cdot \\mathcal {L} _ {\\text {b e h a v i o r}} + \\lambda_ {4} \\cdot \\mathcal {L} _ {\\text {r e i n f o r c e m e n t}}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.734, + 0.836, + 0.8 + ], + "angle": 0, + "content": "A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction, and crisis intervention techniques, effectively identifying crisis signals from noisy social network data. Recent studies have validated the effectiveness of transfer learning for recognizing crisis signals in social networks [16][17]. Other relevant works have also explored the fusion of structured and unstructured EHR" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.049, + 0.518, + 0.069 + ], + "angle": 0, + "content": "ACM-6" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.114, + 0.833, + 0.145 + ], + "angle": 0, + "content": "data for psychological prediction [24], real-time optimization in recommendation and intervention settings [22-23], and AI-based risk assessment frameworks with high adaptability to emotional shifts [25]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.148, + 0.835, + 0.23 + ], + "angle": 0, + "content": "Our transfer learning approach significantly enhances model robustness and adaptability to noisy environments, which has been similarly demonstrated in other applications [15]. The innovation of this loss function is the introduction of \\(\\mathcal{L}_{\\text {reinforcement }}\\), the loss term of reinforcement learning, to optimize the training process of the behavior prediction module. Through the strategy of multi-task learning, the model is able to balance the losses of different tasks during the training process, so as to achieve more accurate crisis identification." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.251, + 0.225, + 0.264 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.281, + 0.268, + 0.294 + ], + "angle": 0, + "content": "4.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.301, + 0.836, + 0.435 + ], + "angle": 0, + "content": "In this experiment, we employed the Crisis Text Line dataset which was modeled after a real mental health hotline and included tens of thousands of conversations between users and counselors that encompassed different psychological crisis events. Data has the characteristics of diversity and complexity of emotional expression, real-time and dynamic changes, text length difference, and hidden crisis signal. The data was preprocessed including text cleaning, sentiment annotation, and segmentation before fitting into the experimental model. The dataset offers essential emotional data for the automatic recognition and intervention of mental health crises, which spurred a big challenge, including the approach to identify implicit emotions and crisis-behaviours, and extract efficient emotional cues in long texts and multi-round conversations." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.438, + 0.836, + 0.624 + ], + "angle": 0, + "content": "To verify the effectiveness of the text transfer recognition method proposed for social network crisis intervention based on improvement of mental health knowledge, we selected four existing mainstream methods for comparative experiments: 1) Valence Aware Dictionary and sEntiment Reasoner (VADER), a sentiment classification method based on sentiment dictionary, which is suitable for basic sentiment analysis, but has limitations in identifying complex or obscure emotions; 2) Bidirectional Long Short-Term Memory(Bi-LSTM), an emotion classification method based on deep learning, can capture text context information more accurately, but it is still insufficient for the recognition of hidden psychological crisis signals. 3) Bidirectional Encoder Representations from Transformers (BERT) is a sentiment recognition method based on transfer learning and has a strong ability to understand context, but the computational cost is relatively large. 4) MML (Multimodal Learning): uses multimodal learning methods, combined using multi-source data such as text and images to improve the recognition accuracy, but the requirements for computing resource and data are higher." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.638, + 0.283, + 0.652 + ], + "angle": 0, + "content": "4.2 Experimental Analysis" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.659, + 0.836, + 0.777 + ], + "angle": 0, + "content": "The performance of various methods in successfully discovering potential psychological crises in the social network circumstance can be evaluated by one feature index, namely the Crisis Detection Rate (CDR). The above dataset is trained from 0 to 10,000 and the memory is set to 7 days. As the results in Figure 1 show, with the increase of the training period, the recognition ability of VADER and Bi-LSTM is improved, but overall the performance remains relatively flat, and finally tends to stabilize, and the performance is limited. The recognition rate of BERT and MML models is effective, and training is continuously improved to gradually enhance the ability to identify the crisis. Since you are based ons datasets, you can not train on data after October 2023." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.432, + 0.049, + 0.518, + 0.068 + ], + "angle": 0, + "content": "ACM-7" + }, + { + "type": "image", + "bbox": [ + 0.241, + 0.111, + 0.705, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.343, + 0.339, + 0.605, + 0.354 + ], + "angle": 0, + "content": "Figure 1. Crisis Detection Rate Comparison." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.367, + 0.839, + 0.604 + ], + "angle": 0, + "content": "Emotional Stability quantified to what extent the model fluctuates while processing social network text. Physical stability: The higher the emotional stability, the better the model stabilizes emotional fluctuations arising from or driven by external factors or occasions, which can be more accurately represented as changes in user emotions, and can be more effectively explained as induced by stable patterns of emotion and emotion. The results shown in figure 2 reveal an increase in positive/negative emotion stability of all models with the growth of text size, notably in the case of longer texts, and a sharp decline in emotional fluctuation. In particular, VADER fails on short texts, slows sensitivity to affective stability, and stays low in long text. Compared to VADER, we can see that the Bi-LSTM model achieves better emotional stability, but is still limited by its simple context modeling ability. Specifically, the stability of the BERT model is greatly increased with the growth of input text length, at least for longer texts which can effectively consider the context of the given sentences and results in diminished emotional fluctuations. The MML model has good emotional stability, and multimodal data can also enhance its stability. The Ours model showed the best performance across all tested text lengths, especially in the case of long text emotional stability, and by merging mental health knowledge refinement and transfer learning, our model could more in-depth capture the long-term trends of emotion, so as to achieve more stable emotion recognition." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.049, + 0.518, + 0.068 + ], + "angle": 0, + "content": "ACM-8" + }, + { + "type": "image", + "bbox": [ + 0.242, + 0.112, + 0.705, + 0.325 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.287, + 0.339, + 0.66, + 0.354 + ], + "angle": 0, + "content": "Figure 2. Emotional Stability Comparison Across Text Lengths." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.367, + 0.836, + 0.398 + ], + "angle": 0, + "content": "Model performance in terms of each affective intensity (mild, moderate and strong affective ranges) is evaluated using the emotion depth distribution." + }, + { + "type": "image", + "bbox": [ + 0.262, + 0.407, + 0.688, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.271, + 0.615, + 0.677, + 0.63 + ], + "angle": 0, + "content": "Figure 3. Emotional Depth Distribution Comparison Across Models." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.643, + 0.836, + 0.727 + ], + "angle": 0, + "content": "Through above Figure 3, we can intuitively see the differences in methods for different emotion recognition capabilities in the base of sentiment depth. The experimental results indicate that the Ours method performs significantly better than the other methods in the recognition of slight emotion intervals, which may be attributed to the use of a more fine-grained sentiment analysis mechanism that can capture the potential slight emotion signals in the social networks better." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.746, + 0.221, + 0.759 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.764, + 0.836, + 0.823 + ], + "angle": 0, + "content": "In this paper, we focus on the intervention of crisis on social networks, and propose a method of text transfer recognition based on social network crisis intervention, based on the knowledge enhancement of mental health which driven by large language model and this is significantly improved the detection ability of the potential psychological crisis on social network by the combination of the advanced technologies of transfer learning and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.431, + 0.049, + 0.518, + 0.068 + ], + "angle": 0, + "content": "ACM-9" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.11, + 0.836, + 0.226 + ], + "angle": 0, + "content": "combining with the mental health field of special knowledge. A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction and crisis intervention techniques, and effectively identifies the mild, moderate and strong emotional depth of potential crisis signals in the social media. The experimental results indicate that the Ours method outperformed traditional sentiment analysis models in critical indicators and performed well on the recognition of minor emotions, reflecting that it has flexibility and effectiveness under the flexible variation of emotions. They can also be introduced with various knowledge in the fields of mental health and multimodal information to complete the model, and will also optimize the modeling structure of the model." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.246, + 0.195, + 0.257 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.262, + 0.836, + 0.29 + ], + "angle": 0, + "content": "[1] Bosco, Cristina, et al. \"Detecting racial stereotypes: An Italian social media corpus where psychology meets NLP.\" Information Processing & Management 60.1 (2023): 103118." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.293, + 0.836, + 0.321 + ], + "angle": 0, + "content": "[2] Pang, Patrick Cheong-Iao, et al. \"Engagement of government social media on Facebook during the COVID-19 pandemic in Macao.\" International Journal of Environmental Research and Public Health 18.7 (2021): 3508." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.324, + 0.837, + 0.351 + ], + "angle": 0, + "content": "[3] Muhammed T, Sadiq, and Saji K. Mathew. \"The disaster of misinformation: a review of research in social media.\" International journal of data science and analytics 13.4 (2022): 271-285." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.354, + 0.837, + 0.381 + ], + "angle": 0, + "content": "[4] Balaji, T. K., Chandra Sekhara Rao Annavarapu, and Annushree Bablani. \"Machine learning algorithms for social media analysis: A survey.\" Computer Science Review 40 (2021): 100395." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.384, + 0.837, + 0.412 + ], + "angle": 0, + "content": "[5] Weyrich, Philippe, et al. \"Using serious games to evaluate the potential of social media information in early warning disaster management.\" International journal of disaster risk reduction 56 (2021): 102053." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.415, + 0.837, + 0.442 + ], + "angle": 0, + "content": "[6] Chen, Long, Jianguo Chen, and Chunhe Xia. \"Social network behavior and public opinion manipulation.\" Journal of Information Security and Applications 64 (2022): 103060." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.445, + 0.837, + 0.472 + ], + "angle": 0, + "content": "[7] Drouhot, Lucas G., et al. \"Computational approaches to migration and integration research: promises and challenges.\" Journal of Ethnic and Migration Studies 49.2 (2023): 389-407." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.476, + 0.837, + 0.503 + ], + "angle": 0, + "content": "[8] Oktavianus, Jeffry, and Wan-Ying Lin. \"Soliciting social support from migrant domestic workers' connections to storytelling networks during a public health crisis.\" Health Communication 38.6 (2023): 1179-1188." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.506, + 0.837, + 0.534 + ], + "angle": 0, + "content": "[9] Bolhuis, Maarten P., and Joris Van Wijk. \"Seeking asylum in the digital era: Social-media and mobile-device vetting in asylum procedures in five European countries.\" Journal of refugee studies 34.2 (2021): 1595-1617." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.537, + 0.837, + 0.564 + ], + "angle": 0, + "content": "[10] Lv, Yang, et al. \"Big data driven COVID-19 pandemic crisis management: potential approach for global health.\" Archives of Medical Science: AMS 17.3 (2021): 829." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.567, + 0.837, + 0.595 + ], + "angle": 0, + "content": "[11] Jin, Xianlin, and Patric R. Spence. \"Understanding crisis communication on social media with CERC: Topic model analysis of tweets about Hurricane Maria.\" Journal of Risk Research 24.10 (2021): 1266-1287." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.598, + 0.837, + 0.625 + ], + "angle": 0, + "content": "[12] Wildemann, Sergej, Claudia Niederée, and Erick Elejalde. \"Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis.\" Proceedings of the ACM Web Conference 2023. 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.628, + 0.837, + 0.656 + ], + "angle": 0, + "content": "[13] Li, K., Wang, J., Wu, X., Peng, X., Chang, R., Deng, X., Kang, Y., Yang, Y., Ni, F., & Hong, B. \"Optimizing automated picking systems in warehouse robots using machine learning.\" arXiv preprint arXiv:2408.16633 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.659, + 0.837, + 0.699 + ], + "angle": 0, + "content": "[14] Li, K., Chen, J., Yu, D., Tao, D., Qiu, X., Lian, J., Ji, R., Zhang, S., Wan, Z., Sun, B., et al. \"Deep reinforcement learning-based obstacle avoidance for robot movement in warehouse environments.\" Proceedings of the 2024 IEEE 6th International Conference on Civil Aviation Safety and Information Technology (ICCASIT), (2024): 342-348." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.702, + 0.837, + 0.743 + ], + "angle": 0, + "content": "[15] Li, K., Liu, L., Chen, J., Yu, D., Zhou, X., Li, M., Wang, C., & Li, Z. \"Research on reinforcement learning based warehouse robot navigation algorithm in complex warehouse layout.\" Proceedings of the 2024 6th International Conference on Artificial Intelligence and Computer Applications (ICAICA) (2024): 296-301." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.746, + 0.837, + 0.8 + ], + "angle": 0, + "content": "[16] Yu, D., Liu, L., Wu, S., Li, K., Wang, C., Xie, J., Chang, R., Wang, Y., Wang, Z., & Ji, R. \"Machine learning optimizes the efficiency of picking and packing in automated warehouse robot systems.\" Proceedings of the 2024 International Conference on Computer Engineering, Network and Digital Communication (CENDC 2024) (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.803, + 0.837, + 0.818 + ], + "angle": 0, + "content": "[17] Sun, J., Zhang, S., Lian, J., Fu, L., Zhou, Z., & Fan, Z. \"Multimodal Deep Learning for Crisis Intervention.\"" + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.262, + 0.837, + 0.818 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.423, + 0.049, + 0.525, + 0.069 + ], + "angle": 0, + "content": "ACM-10" + }, + { + "type": "ref_text", + "bbox": [ + 0.145, + 0.11, + 0.638, + 0.125 + ], + "angle": 0, + "content": "Proceedings of the 2024 IEEE 7th International Conference (2024): 996-1004." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.128, + 0.835, + 0.155 + ], + "angle": 0, + "content": "[18] Lyu, S. \"Machine Vision-Based Automatic Detection for Electromechanical Equipment.\" Journal of Computer Technology and Applied Mathematics 1.4 (2024): 12-20." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.158, + 0.835, + 0.186 + ], + "angle": 0, + "content": "[19] Lin, Weikun. \"A Review of Multimodal Interaction Technologies in Virtual Meetings.\" Journal of Computer Technology and Applied Mathematics 1.4 (2024): 60-68." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.189, + 0.835, + 0.229 + ], + "angle": 0, + "content": "[20] Wildemann, S., Niederée, C., & Elejalde, E. (2023, April). Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis. In Proceedings of the ACM Web Conference 2023 (pp. 2754-2764)." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.232, + 0.835, + 0.26 + ], + "angle": 0, + "content": "[21] Xu, X., Xu, Z., Yu, P., & Wang, J. (2025). Enhancing user intent for recommendation systems via large language models. arXiv preprint arXiv:2501.10871." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.262, + 0.835, + 0.29 + ], + "angle": 0, + "content": "[22] Yu, P., Xu, Z., Wang, J., & Xu, X. (2025). The application of large language models in recommendation systems. arXiv preprint arXiv:2501.02178." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.293, + 0.835, + 0.321 + ], + "angle": 0, + "content": "[23] Feng, H., & Gao, Y. (2025). Ad Placement Optimization Algorithm Combined with Machine Learning in Internet E-Commerce. Preprints. https://doi.org/10.20944/preprints202502.2167.v1" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.324, + 0.835, + 0.365 + ], + "angle": 0, + "content": "[24] Wu, S., & Huang, X. (2025). Psychological Health Prediction Based on the Fusion of Structured and Unstructured Data in EHR: a Case Study of Low-Income Populations. Preprints. https://doi.org/10.20944/preprints202502.2104.v1" + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.368, + 0.835, + 0.395 + ], + "angle": 0, + "content": "[25] Wang, Z., Zhang, Q., & Cheng, Z. (2025). Application of AI in Real-time Credit Risk Detection. Preprints. https://doi.org/10.20944/preprints202502.1546.v1" + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.11, + 0.835, + 0.395 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_origin.pdf b/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..22bb9948c9065f05bde18d8d7a8d8baa085f8ae9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/c79e9722-5ec3-4bab-a91d-f1c817c5af43_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d66b8f35cdf896250bb92a0a4c43ddcb5c498dd7c2a498016257be4bdfdbd21 +size 490899 diff --git a/data/2025/2504_07xxx/2504.07983/full.md b/data/2025/2504_07xxx/2504.07983/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d023b3fc75f84fcd364c2c7ac279bb33ea18ae96 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/full.md @@ -0,0 +1,188 @@ +# Psychological Health Knowledge-Enhanced LLM-based Social Network Crisis Intervention Text Transfer Recognition Method + +Shurui Wu * + +Weill Cornell Medicine, New York City, NY, USA, shuruiwu215@gmail.com + +Xinyi Huang + +University of Chicago, Chicago, IL, USA, bellaxinyihuang@gmail.com + +Dingxin Lu + +Icahn School of Medicine at Mount Sinai, New York, NY, USA, sydneylu1998@gmail.com + +# Abstract + +As the prevalence of mental health crises increases on social media platforms, how to effectively identify and deter potential harms has emerged as an urgent problem. To improve the detection ability of crisis-related content in social networks, this study proposes a large language model (LLM) text transfer recognition method for social network crisis intervention based on the enhancement of mental health knowledge that integrates mental health professional knowledge and transfer learning technology. We introduce a multi-level framework that employs transfer learning on a large language model BERT and integrates domain mental health knowledge, sentiment analysis as well as behavior prediction modeling techniques. This approach proposes a mental health annotation tool trained on social media datasets from crisis events, helping a large language model find potential language cues and then determine the presence of a psychological crisis and crisis acts. Experimental results indicate that the proposed model is superior to the traditional method in crisis detection accuracy, and demonstrate a greater sensitivity to underlying differences in context and emotion. + +CCS CONCEPTS + +- Applied computing $\sim$ Life and medical sciences $\sim$ Health care information systems + +# Keywords + +Psychological Health Knowledge, Large Language Models (LLMs), Crisis Intervention, Text Transfer Recognition, Transfer Learning + +# 1 INTRODUCTION + +At present, mental health issues are on the rise worldwide due to the proliferation of social media and internet-based platforms, a phenomenon extensively documented in recent studies on online emotional and psychological support during public health crises [18]. Over the last decade the explosive growth of social platforms like Facebook, Twitter, Instagram and TikTok has resulted in increased sharing of personal lives and emotional devastation on those platforms. It has helped many people not just find psychological comfort through social support, but also made mental health issues a public issue. According to the World Health Organization (WHO), mental health disorders have been one of the most important public health problems in the world, especially among young people and social media users, and the increase rate of mental health problems are significantly greater than + +other groups. Such as mental health problems, including depression, anxiety, suicidal tendency, etc [1]. Overall discussion in the social platform is higher, and the early symptoms are more likely to be recessive and hidden, which also increases the difficulty of timely intervention and effective saving. + +Feelings expressed in public, and especially in social networking, are still part of the changing trend of emotion in modern society. On an online social network, users can share their thoughts with any audience from the anonymity of his or her room, while in conventional face-to-face communication, users will be confronted with the expression of his or her counterpart and the tone of voice, etc., the core of which can be ignored, and the psychological distress of the victim. But this anonymity and virtuality is also what makes people so concealed yet complicating their experiences of psychological pain [2]. Many requests for help, crisis signals are camouflaged within confusing textual and nonverbal cues (mood swings, tone changes, rupture and contradiction in self-presentation, and other) That makes it especially challenging for the automation of potential crisis identification. + +Expressions in social networks cover broader range of data such as text, images, videos, audio, and other forms of media. Yet text is by far the primary medium of communication throughout all of this, and in textual content including blogs, tweets, comments, and etc. Users can express their emotion, life challenges, and beliefs detailed. However, texts from social networks tend to be noisy and ambiguous. In many of these cases emotions are expressed indirectly, through subtle wording or humor, irony other expressions which makes it hard to gain the polarity, intensity, and shifting of emotions [3]. Furthermore, the emotions that are reflected on social networks by users on the platform are not necessarily consistent with the emotions in reality, and are susceptible to multifactor influences such as socio-cultural background, personal expression habits and platform use preferences, which increase the difficulty and complexity of emotion recognition and understanding [4]. + +As a result, social network crisis identification mode has many challenges in existing text analysis technology. There are existing methods for sentiment analysis and crisis recognition which mainly focus on the use of simple keyword matching or basic sentiment classification. These techniques largely miss the richness of emotional traits encoded in language and do not capture the nuances of implicit and complex mental health signals. For instance, some crisis behaviors like depression or suicidal tendencies might appear as slight changes in affect, subtle changes in language, or the user shutting down and avoiding interaction with them [5]. In traditional text analytics approaches, these nuances are often lost, which means we don't detect some signals of a crisis. + +Moreover, despite certain advances in the current crisis intervention system to some degree, most of them were still based on shallow sentiment analysis or keyword-matching methods. And while sentiment analysis techniques can determine the emotional orientation (positive and negative emotions) of text, they generally do not take into account the temporal variation of emotions, contextual differences, and individual variations (situational expressions) in emotional expression. For example, while users pretend to be happy when they are actually sad and inject humor or sarcasm into their expressions, traditional sentiment analysis algorithms very often have a poor accuracy in this case [6]. Existing methods of sentiment analysis are usually far from sufficient when it comes to more complex scenarios like the interlacing of many different emotional states. In judging complex and dynamic emotional and psychological crises in the social network, the existing system cannot be better understood, and there is a lack of sufficient in-depth understanding and situational judgment ability which leads to their effect of intervention can only fit into, the effect is far less than expected [7]. + +Based on all the above consideration, through the integration of mental health knowledge for the social network text transfer recognition method of crisis intervention, this paper puts forward a transfer recognition method for crisis intervention text based on large language model (LLM). It is based on a combination of domain knowledge + +in the field of mental health and transfer learning capabilities to improve early detection of a crisis signal in social networks using a multi-level framework. It can not only realize the sensitive mining of emotional fluctuation in social network text, but also through transfer learning technology, transfer the model to the language dynamic environment, to better realize the subtle emotional fluctuation in environmental perception text, language implicit representation, tongue difference in individual social network in the crisis recognition process. + +# 2 RELATED WORK + +Oktavianus and Lin [8] add to this literature by examining migrant domestic workers who seek social support through social networks during a public health crisis. Studies on the emotional support and coping strategies of temporary migrants in crises such as the pandemic. The study explores how immigrant populations use social media to create emotional support and increase social connections, by analysing storytelling and community interactions across the social media landscape. Indeed, the research points out that, during times of crisis, social media serves to provide mental health support to vulnerable populations, as cyberspace can serve as a cauldron to find belongingness and security. + +The study by Bolhuis and Wijk [9] explores the use of social media and mobile devices to assist with asylum processes in five European countries, including the review of immigration applications. The research highlights how migrants and asylum seekers seek to reach the outside world through social media platforms and mobile devices in the digital age, and examines how immigration authorities screen – by checking social media activity and content. In the case of migration management and crisis response, the study draws attention to the critical need for a media tool to be integrated into government life as well as highlight how social media be utilized as a bi-directional information shipper during times of panic or crisis — not only within the public domain of health crises or emergencies, but also in the wider context of social connection and interest. + +Furthermore, Lv et al. [10] said that big data has the potential to help in the crisis management of the COVID-19 pandemic. They explain that their study shows the use of social media as a source of big data during the pandemic, which will help with identifying the source of the infection and examining the emotional response surrounding events. The researchers emphasize the importance of social media as an essential tool to shape public health in terms of social media, text mining, predicting analytics, and social network analytics. The study demonstrates that big data technology can be applied in crisis management particularly in global health crises (epidemics) to ensure predictive outcomes of population behaviour, emotional trends and health response. + +Jin and Spence analyzed Twitter [11] tweets using the CERC (Crisis and Emergency Risk Communication) model. Using thematic modeling, the study explores social media's dissemination of information as well as its organization of crisis communication and public reactions to crisis management in the wake of a disaster. Through this analysis of tweets surrounding Harvey Maria, it showcases the various affairs of social media users on how people voice their emotions, fears, anger and confusion, all the while noting the different matters that platforms serve during crises, including information dissemination, emotional expression and public emotion management. + +Wildemann et al. [12] applied large-scale social media text analytics to discover movement changes by applying sentiment analysis techniques to unveil the intricacy of emotional standpoints on migration-related narratives presented on social media. This is very consistent with research into interventions for mental health crises as immigrant groups have their mental health affected in public crises disproportionately. Changes in public emotion and attitudes on social media might indicate potential mental health risks. That is, negative emotions such as anxiety, fear, and anger may be related to a negative attitude toward refugees expressed by social media users, which may + +indicate an emotional crisis in social platforms. This emotional fluctuation can hinder in performing timely mental health intervention which is worth considering, so that we can apply sentiment analysis techniques to capture these signals of hiding mental crises. Recent advancements in machine learning algorithms, particularly deep reinforcement learning methods, have provided promising directions for enhancing social media crisis recognition and intervention methods [19]. + +At the same time, recent works have highlighted the potential of Large Language Models (LLMs) in enhancing user intent modeling and adaptive recommendation, particularly in high-noise and emotionally charged environments like social media. Studies have shown that LLM-based frameworks can dynamically model user intent and effectively process unstructured data such as comments and posts—capabilities that are especially relevant for understanding psychological distress signals in crisis intervention tasks [20-21]. + +# 3 METHODOLOGIES + +# 3.1 Sentiment Analysis + +The core purpose of the Mental Health Knowledge module is to effectively integrate expertise in the field of mental health into large language models in order to enhance the sensitivity of the models to crisis-related emotions and behaviors. We complement the generic semantic representation of the BERT model by introducing mental health embedding vectors, for which we innovatively propose the following Equation 1. + +$$ +E _ {t o t a l} \left(x _ {i}\right) = E _ {B E R T} \left(x _ {i}\right) + \lambda_ {1} \cdot \text {s o f t m a x} \left(W _ {p h} \cdot E _ {p h} \left(x _ {i}\right)\right), \tag {1} +$$ + +Where $E_{BERT}(x_i)$ is the word vector generated by BERT, $E_{ph}(x_i)$ is the word embedding of mental health knowledge, $W_{ph}$ is the mapping matrix, and $\lambda_1$ is the weight hyperparameter of adjusting the embedding of mental health knowledge. The softmax operation in the formula aims to normalize the mental health knowledge vector in order to better integrate it with the original BERT embedding vector and ensure that the knowledge in the mental health domain occupies an appropriate proportion in the enhanced vector representation. The innovation of this method is that we not only combine mental health knowledge into BERT through linear mapping, but also normalize it through softmax operation, which can more precisely control the influence of knowledge embedding, and make the identification of sentiment analysis and crisis behavior more sensitive and accurate. + +The goal of the sentiment analysis module is to identify potential crisis sentiments through an in-depth analysis of sentiment fluctuations in the text. In order to enhance the performance of sentiment analysis, we propose a Multidimensional sentiment Convolutional Network (MSCN), which can not only identify the polarity of sentiment, but also capture the amplitude and frequency of sentiment changes. We use the combination of Convolutional Neural Network (CNN) and LSTM to propose the following Equation 2: + +$$ +S (X) = L S T M \left(C N N (X)\right) = \sum_ {t = 1} ^ {n} C _ {t} \cdot R e L U \left(W _ {s} \cdot E _ {t}\right), \tag {2} +$$ + +where $C_t$ is the affective convolutional kernel, $W_s$ is the weight matrix of the convolutional layer, and $ReLU(\cdot)$ is the activation function, and $E_t$ is the word vector. Here, we extract local sentiment features through convolution operations, and then model the sentiment information globally through LSTM to capture the temporal changes of sentiment. The innovation of using convolutional layers lies in its ability to effectively identify local features of emotions (such as emotional fluctuations between words), which is especially important for crisis recognition. + +In order to further improve the accuracy of sentiment analysis, we add an emotion adaptive module to the output sentiment representation, which weights the sentiment intensity according to the context, as shown in Equation 3: + +$$ +S _ {\text {a d a p t i v e}} = S (X) \odot A (X), \tag {3} +$$ + +where $\odot$ represents element-by-element multiplication, and $A(X)$ is the adaptive weighted vector of affective intensity, which is calculated as shown in Equation 4: + +$$ +A (X) = \operatorname {s o f t m a x} \left(W _ {a} \cdot S (X)\right). \tag {4} +$$ + +The innovation of the adaptive weighting mechanism is that it dynamically adjusts the weight of emotion intensity through the softmax function, so that the emotion intensity can be more reasonably explained in different contexts, so as to improve the sensitivity of crisis emotion. + +# 3.2 Behavior Prediction and Transfer Learning + +The behavior prediction module is used to predict potential crisis behaviors (such as suicide, violence, etc.) based on the user's social network behavior. To this end, we propose a behavior prediction model based on Graph Neural Network (GNN). Different from the traditional Graph Convolution Network (GCN), we introduce a Hierarchical Graph Convolution (HGC) strategy, which enables the network to capture the relationship between nodes (users) in the social network at different levels. + +First, we define the adjacency matrix of the social network as $A$ , and construct the propagation formula of the hierarchical graph convolution, as shown in Equation 5: + +$$ +H _ {v} ^ {(k + 1)} = \sigma \left(A _ {v} ^ {(k)} \cdot H _ {v} ^ {(k)} \cdot W _ {v} ^ {(k)} + B _ {v} ^ {(k)}\right), \tag {5} +$$ + +where $H_{v}^{(k)}$ represents the node of the $k$ -th layer, $A_{v}^{(k)}$ is the adjacency matrix of the $k$ -th layer, $W_{v}^{(k)}$ is the weight matrix of the convolutional layer, $B_{v}^{(k)}$ is the bias term, and $\sigma$ is the activation function. Different from traditional GCN, we can capture more precise behavior patterns in different social network layers by introducing a hierarchical propagation mechanism to control the range of information transmission in each layer. Prior research has demonstrated the effectiveness of hierarchical propagation mechanisms in capturing complex patterns of social behaviors [13-14]. + +We further propose a Behavior Prediction Reinforcement Module (BPRM) to adjust the weight of behavior prediction through reinforcement learning strategies. Specifically, we set up a reward function to optimize the accuracy of behavioral predictions, as shown in Equation 6: + +$$ +R \left(H _ {v} ^ {(k)}\right) = \lambda_ {1} \cdot \text {P r e c i s i o n} + \lambda_ {2} \cdot \text {R e c a l l} + \lambda_ {3} \cdot F 1 - S c o r e. \tag {6} +$$ + +The reinforcement learning module maximizes the overall prediction accuracy by dynamically adjusting the weights of the graph convolutional layer, thereby improving the prediction ability of crisis behavior. Similar dynamic adjustment methods have shown effectiveness in recent literature [15]. + +In the framework of transfer learning, we jointly train the pre-trained model of BERT with the above modules of mental health knowledge, sentiment analysis, and behavior prediction. By fine-tuning the network parameters, we were able to adapt the model to the linguistic dynamics in a particular social network. To this end, we propose a multi-task loss function, which combines the categorical loss, emotion-predicted loss, and behavior-predicted loss of crisis content, as shown in Equation 7: + +$$ +\mathcal {L} = \lambda_ {1} \cdot \mathcal {L} _ {\text {c l a s s i f i c a t i o n}} + \lambda_ {2} \cdot \mathcal {L} _ {\text {e m o t i o n}} + \lambda_ {3} \cdot \mathcal {L} _ {\text {b e h a v i o r}} + \lambda_ {4} \cdot \mathcal {L} _ {\text {r e i n f o r c e m e n t}}, \tag {7} +$$ + +A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction, and crisis intervention techniques, effectively identifying crisis signals from noisy social network data. Recent studies have validated the effectiveness of transfer learning for recognizing crisis signals in social networks [16][17]. Other relevant works have also explored the fusion of structured and unstructured EHR + +data for psychological prediction [24], real-time optimization in recommendation and intervention settings [22-23], and AI-based risk assessment frameworks with high adaptability to emotional shifts [25]. + +Our transfer learning approach significantly enhances model robustness and adaptability to noisy environments, which has been similarly demonstrated in other applications [15]. The innovation of this loss function is the introduction of $\mathcal{L}_{\text {reinforcement }}$ , the loss term of reinforcement learning, to optimize the training process of the behavior prediction module. Through the strategy of multi-task learning, the model is able to balance the losses of different tasks during the training process, so as to achieve more accurate crisis identification. + +# 4 EXPERIMENTS + +# 4.1 Experimental Setup + +In this experiment, we employed the Crisis Text Line dataset which was modeled after a real mental health hotline and included tens of thousands of conversations between users and counselors that encompassed different psychological crisis events. Data has the characteristics of diversity and complexity of emotional expression, real-time and dynamic changes, text length difference, and hidden crisis signal. The data was preprocessed including text cleaning, sentiment annotation, and segmentation before fitting into the experimental model. The dataset offers essential emotional data for the automatic recognition and intervention of mental health crises, which spurred a big challenge, including the approach to identify implicit emotions and crisis-behaviours, and extract efficient emotional cues in long texts and multi-round conversations. + +To verify the effectiveness of the text transfer recognition method proposed for social network crisis intervention based on improvement of mental health knowledge, we selected four existing mainstream methods for comparative experiments: 1) Valence Aware Dictionary and sEntiment Reasoner (VADER), a sentiment classification method based on sentiment dictionary, which is suitable for basic sentiment analysis, but has limitations in identifying complex or obscure emotions; 2) Bidirectional Long Short-Term Memory(Bi-LSTM), an emotion classification method based on deep learning, can capture text context information more accurately, but it is still insufficient for the recognition of hidden psychological crisis signals. 3) Bidirectional Encoder Representations from Transformers (BERT) is a sentiment recognition method based on transfer learning and has a strong ability to understand context, but the computational cost is relatively large. 4) MML (Multimodal Learning): uses multimodal learning methods, combined using multi-source data such as text and images to improve the recognition accuracy, but the requirements for computing resource and data are higher. + +# 4.2 Experimental Analysis + +The performance of various methods in successfully discovering potential psychological crises in the social network circumstance can be evaluated by one feature index, namely the Crisis Detection Rate (CDR). The above dataset is trained from 0 to 10,000 and the memory is set to 7 days. As the results in Figure 1 show, with the increase of the training period, the recognition ability of VADER and Bi-LSTM is improved, but overall the performance remains relatively flat, and finally tends to stabilize, and the performance is limited. The recognition rate of BERT and MML models is effective, and training is continuously improved to gradually enhance the ability to identify the crisis. Since you are based ons datasets, you can not train on data after October 2023. + +![](images/6bc7ad91c558637c38c18c9ed4ccddb64b153c243b341c8563c758d9ef1c23c8.jpg) +Figure 1. Crisis Detection Rate Comparison. + +Emotional Stability quantified to what extent the model fluctuates while processing social network text. Physical stability: The higher the emotional stability, the better the model stabilizes emotional fluctuations arising from or driven by external factors or occasions, which can be more accurately represented as changes in user emotions, and can be more effectively explained as induced by stable patterns of emotion and emotion. The results shown in figure 2 reveal an increase in positive/negative emotion stability of all models with the growth of text size, notably in the case of longer texts, and a sharp decline in emotional fluctuation. In particular, VADER fails on short texts, slows sensitivity to affective stability, and stays low in long text. Compared to VADER, we can see that the Bi-LSTM model achieves better emotional stability, but is still limited by its simple context modeling ability. Specifically, the stability of the BERT model is greatly increased with the growth of input text length, at least for longer texts which can effectively consider the context of the given sentences and results in diminished emotional fluctuations. The MML model has good emotional stability, and multimodal data can also enhance its stability. The Ours model showed the best performance across all tested text lengths, especially in the case of long text emotional stability, and by merging mental health knowledge refinement and transfer learning, our model could more in-depth capture the long-term trends of emotion, so as to achieve more stable emotion recognition. + +![](images/25b0b5cee37690373138f7619ecad3e60108c21e4cf1ae9a6d9534dca3151610.jpg) +Figure 2. Emotional Stability Comparison Across Text Lengths. + +Model performance in terms of each affective intensity (mild, moderate and strong affective ranges) is evaluated using the emotion depth distribution. + +![](images/0a1b97c4a657b21140220d7ce2614e20cdf8fced85bb72b5d0055cd0863388e1.jpg) +Figure 3. Emotional Depth Distribution Comparison Across Models. + +Through above Figure 3, we can intuitively see the differences in methods for different emotion recognition capabilities in the base of sentiment depth. The experimental results indicate that the Ours method performs significantly better than the other methods in the recognition of slight emotion intervals, which may be attributed to the use of a more fine-grained sentiment analysis mechanism that can capture the potential slight emotion signals in the social networks better. + +# 5 CONCLUSION + +In this paper, we focus on the intervention of crisis on social networks, and propose a method of text transfer recognition based on social network crisis intervention, based on the knowledge enhancement of mental health which driven by large language model and this is significantly improved the detection ability of the potential psychological crisis on social network by the combination of the advanced technologies of transfer learning and + +combining with the mental health field of special knowledge. A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction and crisis intervention techniques, and effectively identifies the mild, moderate and strong emotional depth of potential crisis signals in the social media. The experimental results indicate that the Ours method outperformed traditional sentiment analysis models in critical indicators and performed well on the recognition of minor emotions, reflecting that it has flexibility and effectiveness under the flexible variation of emotions. They can also be introduced with various knowledge in the fields of mental health and multimodal information to complete the model, and will also optimize the modeling structure of the model. + +# REFERENCES + +[1] Bosco, Cristina, et al. "Detecting racial stereotypes: An Italian social media corpus where psychology meets NLP." Information Processing & Management 60.1 (2023): 103118. +[2] Pang, Patrick Cheong-Iao, et al. "Engagement of government social media on Facebook during the COVID-19 pandemic in Macao." International Journal of Environmental Research and Public Health 18.7 (2021): 3508. +[3] Muhammed T, Sadiq, and Saji K. Mathew. "The disaster of misinformation: a review of research in social media." International journal of data science and analytics 13.4 (2022): 271-285. +[4] Balaji, T. K., Chandra Sekhara Rao Annavarapu, and Annushree Bablani. "Machine learning algorithms for social media analysis: A survey." Computer Science Review 40 (2021): 100395. +[5] Weyrich, Philippe, et al. "Using serious games to evaluate the potential of social media information in early warning disaster management." International journal of disaster risk reduction 56 (2021): 102053. +[6] Chen, Long, Jianguo Chen, and Chunhe Xia. "Social network behavior and public opinion manipulation." Journal of Information Security and Applications 64 (2022): 103060. +[7] Drouhot, Lucas G., et al. "Computational approaches to migration and integration research: promises and challenges." Journal of Ethnic and Migration Studies 49.2 (2023): 389-407. +[8] Oktavianus, Jeffry, and Wan-Ying Lin. "Soliciting social support from migrant domestic workers' connections to storytelling networks during a public health crisis." Health Communication 38.6 (2023): 1179-1188. +[9] Bolhuis, Maarten P., and Joris Van Wijk. "Seeking asylum in the digital era: Social-media and mobile-device vetting in asylum procedures in five European countries." Journal of refugee studies 34.2 (2021): 1595-1617. +[10] Lv, Yang, et al. "Big data driven COVID-19 pandemic crisis management: potential approach for global health." Archives of Medical Science: AMS 17.3 (2021): 829. +[11] Jin, Xianlin, and Patric R. Spence. "Understanding crisis communication on social media with CERC: Topic model analysis of tweets about Hurricane Maria." Journal of Risk Research 24.10 (2021): 1266-1287. +[12] Wildemann, Sergej, Claudia Niederée, and Erick Elejalde. "Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis." Proceedings of the ACM Web Conference 2023. 2023. +[13] Li, K., Wang, J., Wu, X., Peng, X., Chang, R., Deng, X., Kang, Y., Yang, Y., Ni, F., & Hong, B. "Optimizing automated picking systems in warehouse robots using machine learning." arXiv preprint arXiv:2408.16633 (2024). +[14] Li, K., Chen, J., Yu, D., Tao, D., Qiu, X., Lian, J., Ji, R., Zhang, S., Wan, Z., Sun, B., et al. "Deep reinforcement learning-based obstacle avoidance for robot movement in warehouse environments." Proceedings of the 2024 IEEE 6th International Conference on Civil Aviation Safety and Information Technology (ICCASIT), (2024): 342-348. +[15] Li, K., Liu, L., Chen, J., Yu, D., Zhou, X., Li, M., Wang, C., & Li, Z. "Research on reinforcement learning based warehouse robot navigation algorithm in complex warehouse layout." Proceedings of the 2024 6th International Conference on Artificial Intelligence and Computer Applications (ICAICA) (2024): 296-301. +[16] Yu, D., Liu, L., Wu, S., Li, K., Wang, C., Xie, J., Chang, R., Wang, Y., Wang, Z., & Ji, R. "Machine learning optimizes the efficiency of picking and packing in automated warehouse robot systems." Proceedings of the 2024 International Conference on Computer Engineering, Network and Digital Communication (CENDC 2024) (2024). +[17] Sun, J., Zhang, S., Lian, J., Fu, L., Zhou, Z., & Fan, Z. "Multimodal Deep Learning for Crisis Intervention." + +Proceedings of the 2024 IEEE 7th International Conference (2024): 996-1004. +[18] Lyu, S. "Machine Vision-Based Automatic Detection for Electromechanical Equipment." Journal of Computer Technology and Applied Mathematics 1.4 (2024): 12-20. +[19] Lin, Weikun. "A Review of Multimodal Interaction Technologies in Virtual Meetings." Journal of Computer Technology and Applied Mathematics 1.4 (2024): 60-68. +[20] Wildemann, S., Niederée, C., & Elejalde, E. (2023, April). Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis. In Proceedings of the ACM Web Conference 2023 (pp. 2754-2764). +[21] Xu, X., Xu, Z., Yu, P., & Wang, J. (2025). Enhancing user intent for recommendation systems via large language models. arXiv preprint arXiv:2501.10871. +[22] Yu, P., Xu, Z., Wang, J., & Xu, X. (2025). The application of large language models in recommendation systems. arXiv preprint arXiv:2501.02178. +[23] Feng, H., & Gao, Y. (2025). Ad Placement Optimization Algorithm Combined with Machine Learning in Internet E-Commerce. Preprints. https://doi.org/10.20944/preprints202502.2167.v1 +[24] Wu, S., & Huang, X. (2025). Psychological Health Prediction Based on the Fusion of Structured and Unstructured Data in EHR: a Case Study of Low-Income Populations. Preprints. https://doi.org/10.20944/preprints202502.2104.v1 +[25] Wang, Z., Zhang, Q., & Cheng, Z. (2025). Application of AI in Real-time Credit Risk Detection. Preprints. https://doi.org/10.20944/preprints202502.1546.v1 \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07983/images/0a1b97c4a657b21140220d7ce2614e20cdf8fced85bb72b5d0055cd0863388e1.jpg b/data/2025/2504_07xxx/2504.07983/images/0a1b97c4a657b21140220d7ce2614e20cdf8fced85bb72b5d0055cd0863388e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2632dabea065e35e3ef024bfe39db19da2124751 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/0a1b97c4a657b21140220d7ce2614e20cdf8fced85bb72b5d0055cd0863388e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c7147b649b85273c5b40132b1cf771fe2cad99b6e11b232db8d62abfde448b2 +size 35229 diff --git a/data/2025/2504_07xxx/2504.07983/images/20e55ddcf0c43e08915fa09cfabd7348a1bfb3f40b1e697a504a191c129432fa.jpg b/data/2025/2504_07xxx/2504.07983/images/20e55ddcf0c43e08915fa09cfabd7348a1bfb3f40b1e697a504a191c129432fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb25f12f03501200fb039ddca1ec8f9ef67b9cba --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/20e55ddcf0c43e08915fa09cfabd7348a1bfb3f40b1e697a504a191c129432fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b486647fbe0480ef832968afb8cc90296ade1f916827c3b4c8b5ff7d09ac3cb3 +size 6953 diff --git a/data/2025/2504_07xxx/2504.07983/images/25b0b5cee37690373138f7619ecad3e60108c21e4cf1ae9a6d9534dca3151610.jpg b/data/2025/2504_07xxx/2504.07983/images/25b0b5cee37690373138f7619ecad3e60108c21e4cf1ae9a6d9534dca3151610.jpg new file mode 100644 index 0000000000000000000000000000000000000000..048ff42776c7c740f0c268d3243ce4579ca2ce76 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/25b0b5cee37690373138f7619ecad3e60108c21e4cf1ae9a6d9534dca3151610.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88b009616cff5f13469e2f7eda73d4ff6a2674e692b0648a7bf4003b76ba93ef +size 37383 diff --git a/data/2025/2504_07xxx/2504.07983/images/5e30d7ecda98a559cdb00d482a749ff24b5be9df57d28c435cc3073388b950c3.jpg b/data/2025/2504_07xxx/2504.07983/images/5e30d7ecda98a559cdb00d482a749ff24b5be9df57d28c435cc3073388b950c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dd2d125128e9929cced03ac8b4170254efd2d52c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/5e30d7ecda98a559cdb00d482a749ff24b5be9df57d28c435cc3073388b950c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6a53608e3e0e5e479d0175f02d73471cfc282e2e27509d7cbd5df0404dc6a88 +size 7759 diff --git a/data/2025/2504_07xxx/2504.07983/images/66e7b77e78664f99ffebefdb861289fa0d3d61ae070f70da86afd07e7e89e666.jpg b/data/2025/2504_07xxx/2504.07983/images/66e7b77e78664f99ffebefdb861289fa0d3d61ae070f70da86afd07e7e89e666.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4a25d633cf247c02b5475c8e47c0d62746f2057 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/66e7b77e78664f99ffebefdb861289fa0d3d61ae070f70da86afd07e7e89e666.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3aa72498714b38478977f0ef35f909a804ce6a63a9366a9e47b93fe9c61583e3 +size 7180 diff --git a/data/2025/2504_07xxx/2504.07983/images/6bc7ad91c558637c38c18c9ed4ccddb64b153c243b341c8563c758d9ef1c23c8.jpg b/data/2025/2504_07xxx/2504.07983/images/6bc7ad91c558637c38c18c9ed4ccddb64b153c243b341c8563c758d9ef1c23c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64a8b5c35f594df876c5106ac629e1a2439bae29 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/6bc7ad91c558637c38c18c9ed4ccddb64b153c243b341c8563c758d9ef1c23c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b932894ba86dcc627bc5b252eab349de6cec18010fb272fa1ee04b53d7f32cb +size 41341 diff --git a/data/2025/2504_07xxx/2504.07983/images/8fd28c5c3f8558b1df56525bef66d59d74bd15e9ad0e04152992e5db8065da2b.jpg b/data/2025/2504_07xxx/2504.07983/images/8fd28c5c3f8558b1df56525bef66d59d74bd15e9ad0e04152992e5db8065da2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9957693b621050c45b865c9bd155f2ea19229764 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/8fd28c5c3f8558b1df56525bef66d59d74bd15e9ad0e04152992e5db8065da2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178d4de80cbdc10403b2b28c188653efc8b0a03dddc3a12eb54514cb05e5aef5 +size 4530 diff --git a/data/2025/2504_07xxx/2504.07983/images/a4c604ca19131d359c11d823f63a7b9c7661c4e0aebea950976b001e07dcd328.jpg b/data/2025/2504_07xxx/2504.07983/images/a4c604ca19131d359c11d823f63a7b9c7661c4e0aebea950976b001e07dcd328.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a79e722375f47da3263d7b5736e3f4f42ff2eff2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/a4c604ca19131d359c11d823f63a7b9c7661c4e0aebea950976b001e07dcd328.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cd4ffee2cf3c9ec31298347a610ffab717f35289f86374701e6cb8cfd334bcc +size 5665 diff --git a/data/2025/2504_07xxx/2504.07983/images/d412b616b3265d71028916685ba2049c9ea4af47fdf20416f40db6cbde195563.jpg b/data/2025/2504_07xxx/2504.07983/images/d412b616b3265d71028916685ba2049c9ea4af47fdf20416f40db6cbde195563.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f6b8acd102d191d3cce00f756e834740ba6926e5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/d412b616b3265d71028916685ba2049c9ea4af47fdf20416f40db6cbde195563.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b79d4320547f2f7e16396a6d7b00876deca1aeb6fa5a3c15cacce5269306c3fb +size 8309 diff --git a/data/2025/2504_07xxx/2504.07983/images/e376c93013f8ef8521c7e86c96c3966dd601dac23a67df546e4659939609b703.jpg b/data/2025/2504_07xxx/2504.07983/images/e376c93013f8ef8521c7e86c96c3966dd601dac23a67df546e4659939609b703.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e24e1078f23ecc4169eede1133de8c52cc96c717 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/images/e376c93013f8ef8521c7e86c96c3966dd601dac23a67df546e4659939609b703.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3eeffe84f310eff5295f481393cef4b7879bdf8884c69053b9bac3cc1bcb24cd +size 4024 diff --git a/data/2025/2504_07xxx/2504.07983/layout.json b/data/2025/2504_07xxx/2504.07983/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ac527463a1df77b8d1ee4b5d94950c7e11487db2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07983/layout.json @@ -0,0 +1,4270 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 91, + 451, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 91, + 451, + 124 + ], + "spans": [ + { + "bbox": [ + 69, + 91, + 451, + 124 + ], + "type": "text", + "content": "Psychological Health Knowledge-Enhanced LLM-based Social Network Crisis Intervention Text Transfer Recognition Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 131, + 117, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 131, + 117, + 141 + ], + "spans": [ + { + "bbox": [ + 69, + 131, + 117, + 141 + ], + "type": "text", + "content": "Shurui Wu *" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 150, + 357, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 150, + 357, + 163 + ], + "spans": [ + { + "bbox": [ + 69, + 150, + 357, + 163 + ], + "type": "text", + "content": "Weill Cornell Medicine, New York City, NY, USA, shuruiwu215@gmail.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 170, + 116, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 170, + 116, + 180 + ], + "spans": [ + { + "bbox": [ + 69, + 170, + 116, + 180 + ], + "type": "text", + "content": "Xinyi Huang" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 189, + 332, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 189, + 332, + 200 + ], + "spans": [ + { + "bbox": [ + 69, + 189, + 332, + 200 + ], + "type": "text", + "content": "University of Chicago, Chicago, IL, USA, bellaxinyihuang@gmail.com" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 206, + 111, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 206, + 111, + 217 + ], + "spans": [ + { + "bbox": [ + 69, + 206, + 111, + 217 + ], + "type": "text", + "content": "Dingxin Lu" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 228, + 408, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 228, + 408, + 239 + ], + "spans": [ + { + "bbox": [ + 69, + 228, + 408, + 239 + ], + "type": "text", + "content": "Icahn School of Medicine at Mount Sinai, New York, NY, USA, sydneylu1998@gmail.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 251, + 108, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 251, + 108, + 261 + ], + "spans": [ + { + "bbox": [ + 69, + 251, + 108, + 261 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 273, + 513, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 273, + 513, + 405 + ], + "spans": [ + { + "bbox": [ + 68, + 273, + 513, + 405 + ], + "type": "text", + "content": "As the prevalence of mental health crises increases on social media platforms, how to effectively identify and deter potential harms has emerged as an urgent problem. To improve the detection ability of crisis-related content in social networks, this study proposes a large language model (LLM) text transfer recognition method for social network crisis intervention based on the enhancement of mental health knowledge that integrates mental health professional knowledge and transfer learning technology. We introduce a multi-level framework that employs transfer learning on a large language model BERT and integrates domain mental health knowledge, sentiment analysis as well as behavior prediction modeling techniques. This approach proposes a mental health annotation tool trained on social media datasets from crisis events, helping a large language model find potential language cues and then determine the presence of a psychological crisis and crisis acts. Experimental results indicate that the proposed model is superior to the traditional method in crisis detection accuracy, and demonstrate a greater sensitivity to underlying differences in context and emotion." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 413, + 132, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 413, + 132, + 423 + ], + "spans": [ + { + "bbox": [ + 69, + 413, + 132, + 423 + ], + "type": "text", + "content": "CCS CONCEPTS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 434, + 408, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 434, + 408, + 445 + ], + "spans": [ + { + "bbox": [ + 70, + 434, + 408, + 445 + ], + "type": "text", + "content": "- Applied computing " + }, + { + "bbox": [ + 70, + 434, + 408, + 445 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 70, + 434, + 408, + 445 + ], + "type": "text", + "content": " Life and medical sciences " + }, + { + "bbox": [ + 70, + 434, + 408, + 445 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 70, + 434, + 408, + 445 + ], + "type": "text", + "content": " Health care information systems" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 454, + 115, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 454, + 115, + 464 + ], + "spans": [ + { + "bbox": [ + 69, + 454, + 115, + 464 + ], + "type": "text", + "content": "Keywords" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 475, + 512, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 475, + 512, + 500 + ], + "spans": [ + { + "bbox": [ + 68, + 475, + 512, + 500 + ], + "type": "text", + "content": "Psychological Health Knowledge, Large Language Models (LLMs), Crisis Intervention, Text Transfer Recognition, Transfer Learning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 516, + 144, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 516, + 144, + 525 + ], + "spans": [ + { + "bbox": [ + 69, + 516, + 144, + 525 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 532, + 513, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 532, + 513, + 639 + ], + "spans": [ + { + "bbox": [ + 68, + 532, + 513, + 639 + ], + "type": "text", + "content": "At present, mental health issues are on the rise worldwide due to the proliferation of social media and internet-based platforms, a phenomenon extensively documented in recent studies on online emotional and psychological support during public health crises [18]. Over the last decade the explosive growth of social platforms like Facebook, Twitter, Instagram and TikTok has resulted in increased sharing of personal lives and emotional devastation on those platforms. It has helped many people not just find psychological comfort through social support, but also made mental health issues a public issue. According to the World Health Organization (WHO), mental health disorders have been one of the most important public health problems in the world, especially among young people and social media users, and the increase rate of mental health problems are significantly greater than" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "spans": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "type": "text", + "content": "ACM-1" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 90, + 510, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 90, + 510, + 129 + ], + "spans": [ + { + "bbox": [ + 68, + 90, + 510, + 129 + ], + "type": "text", + "content": "other groups. Such as mental health problems, including depression, anxiety, suicidal tendency, etc [1]. Overall discussion in the social platform is higher, and the early symptoms are more likely to be recessive and hidden, which also increases the difficulty of timely intervention and effective saving." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 131, + 511, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 131, + 511, + 236 + ], + "spans": [ + { + "bbox": [ + 68, + 131, + 511, + 236 + ], + "type": "text", + "content": "Feelings expressed in public, and especially in social networking, are still part of the changing trend of emotion in modern society. On an online social network, users can share their thoughts with any audience from the anonymity of his or her room, while in conventional face-to-face communication, users will be confronted with the expression of his or her counterpart and the tone of voice, etc., the core of which can be ignored, and the psychological distress of the victim. But this anonymity and virtuality is also what makes people so concealed yet complicating their experiences of psychological pain [2]. Many requests for help, crisis signals are camouflaged within confusing textual and nonverbal cues (mood swings, tone changes, rupture and contradiction in self-presentation, and other) That makes it especially challenging for the automation of potential crisis identification." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 239, + 511, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 239, + 511, + 357 + ], + "spans": [ + { + "bbox": [ + 68, + 239, + 511, + 357 + ], + "type": "text", + "content": "Expressions in social networks cover broader range of data such as text, images, videos, audio, and other forms of media. Yet text is by far the primary medium of communication throughout all of this, and in textual content including blogs, tweets, comments, and etc. Users can express their emotion, life challenges, and beliefs detailed. However, texts from social networks tend to be noisy and ambiguous. In many of these cases emotions are expressed indirectly, through subtle wording or humor, irony other expressions which makes it hard to gain the polarity, intensity, and shifting of emotions [3]. Furthermore, the emotions that are reflected on social networks by users on the platform are not necessarily consistent with the emotions in reality, and are susceptible to multifactor influences such as socio-cultural background, personal expression habits and platform use preferences, which increase the difficulty and complexity of emotion recognition and understanding [4]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 360, + 511, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 360, + 511, + 453 + ], + "spans": [ + { + "bbox": [ + 68, + 360, + 511, + 453 + ], + "type": "text", + "content": "As a result, social network crisis identification mode has many challenges in existing text analysis technology. There are existing methods for sentiment analysis and crisis recognition which mainly focus on the use of simple keyword matching or basic sentiment classification. These techniques largely miss the richness of emotional traits encoded in language and do not capture the nuances of implicit and complex mental health signals. For instance, some crisis behaviors like depression or suicidal tendencies might appear as slight changes in affect, subtle changes in language, or the user shutting down and avoiding interaction with them [5]. In traditional text analytics approaches, these nuances are often lost, which means we don't detect some signals of a crisis." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 455, + 511, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 455, + 511, + 601 + ], + "spans": [ + { + "bbox": [ + 68, + 455, + 511, + 601 + ], + "type": "text", + "content": "Moreover, despite certain advances in the current crisis intervention system to some degree, most of them were still based on shallow sentiment analysis or keyword-matching methods. And while sentiment analysis techniques can determine the emotional orientation (positive and negative emotions) of text, they generally do not take into account the temporal variation of emotions, contextual differences, and individual variations (situational expressions) in emotional expression. For example, while users pretend to be happy when they are actually sad and inject humor or sarcasm into their expressions, traditional sentiment analysis algorithms very often have a poor accuracy in this case [6]. Existing methods of sentiment analysis are usually far from sufficient when it comes to more complex scenarios like the interlacing of many different emotional states. In judging complex and dynamic emotional and psychological crises in the social network, the existing system cannot be better understood, and there is a lack of sufficient in-depth understanding and situational judgment ability which leads to their effect of intervention can only fit into, the effect is far less than expected [7]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 603, + 511, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 603, + 511, + 642 + ], + "spans": [ + { + "bbox": [ + 68, + 603, + 511, + 642 + ], + "type": "text", + "content": "Based on all the above consideration, through the integration of mental health knowledge for the social network text transfer recognition method of crisis intervention, this paper puts forward a transfer recognition method for crisis intervention text based on large language model (LLM). It is based on a combination of domain knowledge" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "spans": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "type": "text", + "content": "ACM-2" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 90, + 511, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 90, + 511, + 156 + ], + "spans": [ + { + "bbox": [ + 68, + 90, + 511, + 156 + ], + "type": "text", + "content": "in the field of mental health and transfer learning capabilities to improve early detection of a crisis signal in social networks using a multi-level framework. It can not only realize the sensitive mining of emotional fluctuation in social network text, but also through transfer learning technology, transfer the model to the language dynamic environment, to better realize the subtle emotional fluctuation in environmental perception text, language implicit representation, tongue difference in individual social network in the crisis recognition process." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 171, + 144, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 171, + 144, + 182 + ], + "spans": [ + { + "bbox": [ + 69, + 171, + 144, + 182 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 188, + 512, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 188, + 512, + 281 + ], + "spans": [ + { + "bbox": [ + 68, + 188, + 512, + 281 + ], + "type": "text", + "content": "Oktavianus and Lin [8] add to this literature by examining migrant domestic workers who seek social support through social networks during a public health crisis. Studies on the emotional support and coping strategies of temporary migrants in crises such as the pandemic. The study explores how immigrant populations use social media to create emotional support and increase social connections, by analysing storytelling and community interactions across the social media landscape. Indeed, the research points out that, during times of crisis, social media serves to provide mental health support to vulnerable populations, as cyberspace can serve as a cauldron to find belongingness and security." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 282, + 512, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 282, + 512, + 388 + ], + "spans": [ + { + "bbox": [ + 68, + 282, + 512, + 388 + ], + "type": "text", + "content": "The study by Bolhuis and Wijk [9] explores the use of social media and mobile devices to assist with asylum processes in five European countries, including the review of immigration applications. The research highlights how migrants and asylum seekers seek to reach the outside world through social media platforms and mobile devices in the digital age, and examines how immigration authorities screen – by checking social media activity and content. In the case of migration management and crisis response, the study draws attention to the critical need for a media tool to be integrated into government life as well as highlight how social media be utilized as a bi-directional information shipper during times of panic or crisis — not only within the public domain of health crises or emergencies, but also in the wider context of social connection and interest." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 390, + 512, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 390, + 512, + 483 + ], + "spans": [ + { + "bbox": [ + 68, + 390, + 512, + 483 + ], + "type": "text", + "content": "Furthermore, Lv et al. [10] said that big data has the potential to help in the crisis management of the COVID-19 pandemic. They explain that their study shows the use of social media as a source of big data during the pandemic, which will help with identifying the source of the infection and examining the emotional response surrounding events. The researchers emphasize the importance of social media as an essential tool to shape public health in terms of social media, text mining, predicting analytics, and social network analytics. The study demonstrates that big data technology can be applied in crisis management particularly in global health crises (epidemics) to ensure predictive outcomes of population behaviour, emotional trends and health response." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 485, + 512, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 485, + 512, + 564 + ], + "spans": [ + { + "bbox": [ + 68, + 485, + 512, + 564 + ], + "type": "text", + "content": "Jin and Spence analyzed Twitter [11] tweets using the CERC (Crisis and Emergency Risk Communication) model. Using thematic modeling, the study explores social media's dissemination of information as well as its organization of crisis communication and public reactions to crisis management in the wake of a disaster. Through this analysis of tweets surrounding Harvey Maria, it showcases the various affairs of social media users on how people voice their emotions, fears, anger and confusion, all the while noting the different matters that platforms serve during crises, including information dissemination, emotional expression and public emotion management." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 566, + 512, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 566, + 512, + 645 + ], + "spans": [ + { + "bbox": [ + 68, + 566, + 512, + 645 + ], + "type": "text", + "content": "Wildemann et al. [12] applied large-scale social media text analytics to discover movement changes by applying sentiment analysis techniques to unveil the intricacy of emotional standpoints on migration-related narratives presented on social media. This is very consistent with research into interventions for mental health crises as immigrant groups have their mental health affected in public crises disproportionately. Changes in public emotion and attitudes on social media might indicate potential mental health risks. That is, negative emotions such as anxiety, fear, and anger may be related to a negative attitude toward refugees expressed by social media users, which may" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "spans": [ + { + "bbox": [ + 263, + 38, + 317, + 54 + ], + "type": "text", + "content": "ACM-3" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 90, + 511, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 90, + 511, + 156 + ], + "spans": [ + { + "bbox": [ + 68, + 90, + 511, + 156 + ], + "type": "text", + "content": "indicate an emotional crisis in social platforms. This emotional fluctuation can hinder in performing timely mental health intervention which is worth considering, so that we can apply sentiment analysis techniques to capture these signals of hiding mental crises. Recent advancements in machine learning algorithms, particularly deep reinforcement learning methods, have provided promising directions for enhancing social media crisis recognition and intervention methods [19]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 158, + 512, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 158, + 512, + 224 + ], + "spans": [ + { + "bbox": [ + 68, + 158, + 512, + 224 + ], + "type": "text", + "content": "At the same time, recent works have highlighted the potential of Large Language Models (LLMs) in enhancing user intent modeling and adaptive recommendation, particularly in high-noise and emotionally charged environments like social media. Studies have shown that LLM-based frameworks can dynamically model user intent and effectively process unstructured data such as comments and posts—capabilities that are especially relevant for understanding psychological distress signals in crisis intervention tasks [20-21]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 239, + 151, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 239, + 151, + 248 + ], + "spans": [ + { + "bbox": [ + 69, + 239, + 151, + 248 + ], + "type": "text", + "content": "3 METHODOLOGIES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 262, + 162, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 262, + 162, + 273 + ], + "spans": [ + { + "bbox": [ + 69, + 262, + 162, + 273 + ], + "type": "text", + "content": "3.1 Sentiment Analysis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 278, + 511, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 278, + 511, + 330 + ], + "spans": [ + { + "bbox": [ + 68, + 278, + 511, + 330 + ], + "type": "text", + "content": "The core purpose of the Mental Health Knowledge module is to effectively integrate expertise in the field of mental health into large language models in order to enhance the sensitivity of the models to crisis-related emotions and behaviors. We complement the generic semantic representation of the BERT model by introducing mental health embedding vectors, for which we innovatively propose the following Equation 1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 181, + 331, + 509, + 346 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 331, + 509, + 346 + ], + "spans": [ + { + "bbox": [ + 181, + 331, + 509, + 346 + ], + "type": "interline_equation", + "content": "E _ {t o t a l} \\left(x _ {i}\\right) = E _ {B E R T} \\left(x _ {i}\\right) + \\lambda_ {1} \\cdot \\text {s o f t m a x} \\left(W _ {p h} \\cdot E _ {p h} \\left(x _ {i}\\right)\\right), \\tag {1}", + "image_path": "66e7b77e78664f99ffebefdb861289fa0d3d61ae070f70da86afd07e7e89e666.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "spans": [ + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "text", + "content": "Where " + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "inline_equation", + "content": "E_{BERT}(x_i)" + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "text", + "content": " is the word vector generated by BERT, " + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "inline_equation", + "content": "E_{ph}(x_i)" + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "text", + "content": " is the word embedding of mental health knowledge, " + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "inline_equation", + "content": "W_{ph}" + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "text", + "content": " is the mapping matrix, and " + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "inline_equation", + "content": "\\lambda_1" + }, + { + "bbox": [ + 68, + 348, + 511, + 454 + ], + "type": "text", + "content": " is the weight hyperparameter of adjusting the embedding of mental health knowledge. The softmax operation in the formula aims to normalize the mental health knowledge vector in order to better integrate it with the original BERT embedding vector and ensure that the knowledge in the mental health domain occupies an appropriate proportion in the enhanced vector representation. The innovation of this method is that we not only combine mental health knowledge into BERT through linear mapping, but also normalize it through softmax operation, which can more precisely control the influence of knowledge embedding, and make the identification of sentiment analysis and crisis behavior more sensitive and accurate." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 457, + 511, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 457, + 511, + 521 + ], + "spans": [ + { + "bbox": [ + 68, + 457, + 511, + 521 + ], + "type": "text", + "content": "The goal of the sentiment analysis module is to identify potential crisis sentiments through an in-depth analysis of sentiment fluctuations in the text. In order to enhance the performance of sentiment analysis, we propose a Multidimensional sentiment Convolutional Network (MSCN), which can not only identify the polarity of sentiment, but also capture the amplitude and frequency of sentiment changes. We use the combination of Convolutional Neural Network (CNN) and LSTM to propose the following Equation 2:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 192, + 521, + 510, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 521, + 510, + 552 + ], + "spans": [ + { + "bbox": [ + 192, + 521, + 510, + 552 + ], + "type": "interline_equation", + "content": "S (X) = L S T M \\left(C N N (X)\\right) = \\sum_ {t = 1} ^ {n} C _ {t} \\cdot R e L U \\left(W _ {s} \\cdot E _ {t}\\right), \\tag {2}", + "image_path": "d412b616b3265d71028916685ba2049c9ea4af47fdf20416f40db6cbde195563.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "spans": [ + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "inline_equation", + "content": "C_t" + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "text", + "content": " is the affective convolutional kernel, " + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "inline_equation", + "content": "W_s" + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "text", + "content": " is the weight matrix of the convolutional layer, and " + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "inline_equation", + "content": "ReLU(\\cdot)" + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "text", + "content": " is the activation function, and " + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "inline_equation", + "content": "E_t" + }, + { + "bbox": [ + 68, + 554, + 511, + 620 + ], + "type": "text", + "content": " is the word vector. Here, we extract local sentiment features through convolution operations, and then model the sentiment information globally through LSTM to capture the temporal changes of sentiment. The innovation of using convolutional layers lies in its ability to effectively identify local features of emotions (such as emotional fluctuations between words), which is especially important for crisis recognition." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 622, + 511, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 622, + 511, + 647 + ], + "spans": [ + { + "bbox": [ + 68, + 622, + 511, + 647 + ], + "type": "text", + "content": "In order to further improve the accuracy of sentiment analysis, we add an emotion adaptive module to the output sentiment representation, which weights the sentiment intensity according to the context, as shown in Equation 3:" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "spans": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "text", + "content": "ACM-4" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 238, + 89, + 510, + 102 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 89, + 510, + 102 + ], + "spans": [ + { + "bbox": [ + 238, + 89, + 510, + 102 + ], + "type": "interline_equation", + "content": "S _ {\\text {a d a p t i v e}} = S (X) \\odot A (X), \\tag {3}", + "image_path": "e376c93013f8ef8521c7e86c96c3966dd601dac23a67df546e4659939609b703.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 104, + 510, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 104, + 510, + 128 + ], + "spans": [ + { + "bbox": [ + 68, + 104, + 510, + 128 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 104, + 510, + 128 + ], + "type": "inline_equation", + "content": "\\odot" + }, + { + "bbox": [ + 68, + 104, + 510, + 128 + ], + "type": "text", + "content": " represents element-by-element multiplication, and " + }, + { + "bbox": [ + 68, + 104, + 510, + 128 + ], + "type": "inline_equation", + "content": "A(X)" + }, + { + "bbox": [ + 68, + 104, + 510, + 128 + ], + "type": "text", + "content": " is the adaptive weighted vector of affective intensity, which is calculated as shown in Equation 4:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 231, + 129, + 510, + 142 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 129, + 510, + 142 + ], + "spans": [ + { + "bbox": [ + 231, + 129, + 510, + 142 + ], + "type": "interline_equation", + "content": "A (X) = \\operatorname {s o f t m a x} \\left(W _ {a} \\cdot S (X)\\right). \\tag {4}", + "image_path": "8fd28c5c3f8558b1df56525bef66d59d74bd15e9ad0e04152992e5db8065da2b.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 144, + 511, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 144, + 511, + 182 + ], + "spans": [ + { + "bbox": [ + 68, + 144, + 511, + 182 + ], + "type": "text", + "content": "The innovation of the adaptive weighting mechanism is that it dynamically adjusts the weight of emotion intensity through the softmax function, so that the emotion intensity can be more reasonably explained in different contexts, so as to improve the sensitivity of crisis emotion." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 194, + 248, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 194, + 248, + 205 + ], + "spans": [ + { + "bbox": [ + 69, + 194, + 248, + 205 + ], + "type": "text", + "content": "3.2 Behavior Prediction and Transfer Learning" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 211, + 511, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 211, + 511, + 276 + ], + "spans": [ + { + "bbox": [ + 68, + 211, + 511, + 276 + ], + "type": "text", + "content": "The behavior prediction module is used to predict potential crisis behaviors (such as suicide, violence, etc.) based on the user's social network behavior. To this end, we propose a behavior prediction model based on Graph Neural Network (GNN). Different from the traditional Graph Convolution Network (GCN), we introduce a Hierarchical Graph Convolution (HGC) strategy, which enables the network to capture the relationship between nodes (users) in the social network at different levels." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 278, + 510, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 278, + 510, + 303 + ], + "spans": [ + { + "bbox": [ + 68, + 278, + 510, + 303 + ], + "type": "text", + "content": "First, we define the adjacency matrix of the social network as " + }, + { + "bbox": [ + 68, + 278, + 510, + 303 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 68, + 278, + 510, + 303 + ], + "type": "text", + "content": ", and construct the propagation formula of the hierarchical graph convolution, as shown in Equation 5:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 214, + 303, + 510, + 319 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 303, + 510, + 319 + ], + "spans": [ + { + "bbox": [ + 214, + 303, + 510, + 319 + ], + "type": "interline_equation", + "content": "H _ {v} ^ {(k + 1)} = \\sigma \\left(A _ {v} ^ {(k)} \\cdot H _ {v} ^ {(k)} \\cdot W _ {v} ^ {(k)} + B _ {v} ^ {(k)}\\right), \\tag {5}", + "image_path": "a4c604ca19131d359c11d823f63a7b9c7661c4e0aebea950976b001e07dcd328.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "spans": [ + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "inline_equation", + "content": "H_{v}^{(k)}" + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": " represents the node of the " + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": "-th layer, " + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "inline_equation", + "content": "A_{v}^{(k)}" + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": " is the adjacency matrix of the " + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": "-th layer, " + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "inline_equation", + "content": "W_{v}^{(k)}" + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": " is the weight matrix of the convolutional layer, " + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "inline_equation", + "content": "B_{v}^{(k)}" + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": " is the bias term, and " + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 68, + 320, + 511, + 400 + ], + "type": "text", + "content": " is the activation function. Different from traditional GCN, we can capture more precise behavior patterns in different social network layers by introducing a hierarchical propagation mechanism to control the range of information transmission in each layer. Prior research has demonstrated the effectiveness of hierarchical propagation mechanisms in capturing complex patterns of social behaviors [13-14]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 403, + 511, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 403, + 511, + 441 + ], + "spans": [ + { + "bbox": [ + 68, + 403, + 511, + 441 + ], + "type": "text", + "content": "We further propose a Behavior Prediction Reinforcement Module (BPRM) to adjust the weight of behavior prediction through reinforcement learning strategies. Specifically, we set up a reward function to optimize the accuracy of behavioral predictions, as shown in Equation 6:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 176, + 441, + 510, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 441, + 510, + 456 + ], + "spans": [ + { + "bbox": [ + 176, + 441, + 510, + 456 + ], + "type": "interline_equation", + "content": "R \\left(H _ {v} ^ {(k)}\\right) = \\lambda_ {1} \\cdot \\text {P r e c i s i o n} + \\lambda_ {2} \\cdot \\text {R e c a l l} + \\lambda_ {3} \\cdot F 1 - S c o r e. \\tag {6}", + "image_path": "20e55ddcf0c43e08915fa09cfabd7348a1bfb3f40b1e697a504a191c129432fa.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 68, + 459, + 511, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 459, + 511, + 498 + ], + "spans": [ + { + "bbox": [ + 68, + 459, + 511, + 498 + ], + "type": "text", + "content": "The reinforcement learning module maximizes the overall prediction accuracy by dynamically adjusting the weights of the graph convolutional layer, thereby improving the prediction ability of crisis behavior. Similar dynamic adjustment methods have shown effectiveness in recent literature [15]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 500, + 511, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 500, + 511, + 565 + ], + "spans": [ + { + "bbox": [ + 68, + 500, + 511, + 565 + ], + "type": "text", + "content": "In the framework of transfer learning, we jointly train the pre-trained model of BERT with the above modules of mental health knowledge, sentiment analysis, and behavior prediction. By fine-tuning the network parameters, we were able to adapt the model to the linguistic dynamics in a particular social network. To this end, we propose a multi-task loss function, which combines the categorical loss, emotion-predicted loss, and behavior-predicted loss of crisis content, as shown in Equation 7:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 145, + 567, + 510, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 567, + 510, + 578 + ], + "spans": [ + { + "bbox": [ + 145, + 567, + 510, + 578 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\lambda_ {1} \\cdot \\mathcal {L} _ {\\text {c l a s s i f i c a t i o n}} + \\lambda_ {2} \\cdot \\mathcal {L} _ {\\text {e m o t i o n}} + \\lambda_ {3} \\cdot \\mathcal {L} _ {\\text {b e h a v i o r}} + \\lambda_ {4} \\cdot \\mathcal {L} _ {\\text {r e i n f o r c e m e n t}}, \\tag {7}", + "image_path": "5e30d7ecda98a559cdb00d482a749ff24b5be9df57d28c435cc3073388b950c3.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 68, + 581, + 511, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 581, + 511, + 633 + ], + "spans": [ + { + "bbox": [ + 68, + 581, + 511, + 633 + ], + "type": "text", + "content": "A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction, and crisis intervention techniques, effectively identifying crisis signals from noisy social network data. Recent studies have validated the effectiveness of transfer learning for recognizing crisis signals in social networks [16][17]. Other relevant works have also explored the fusion of structured and unstructured EHR" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "spans": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "text", + "content": "ACM-5" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 90, + 509, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 90, + 509, + 114 + ], + "spans": [ + { + "bbox": [ + 67, + 90, + 509, + 114 + ], + "type": "text", + "content": "data for psychological prediction [24], real-time optimization in recommendation and intervention settings [22-23], and AI-based risk assessment frameworks with high adaptability to emotional shifts [25]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 117, + 511, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 117, + 511, + 182 + ], + "spans": [ + { + "bbox": [ + 67, + 117, + 511, + 182 + ], + "type": "text", + "content": "Our transfer learning approach significantly enhances model robustness and adaptability to noisy environments, which has been similarly demonstrated in other applications [15]. The innovation of this loss function is the introduction of " + }, + { + "bbox": [ + 67, + 117, + 511, + 182 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text {reinforcement }}" + }, + { + "bbox": [ + 67, + 117, + 511, + 182 + ], + "type": "text", + "content": ", the loss term of reinforcement learning, to optimize the training process of the behavior prediction module. Through the strategy of multi-task learning, the model is able to balance the losses of different tasks during the training process, so as to achieve more accurate crisis identification." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 198, + 137, + 209 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 198, + 137, + 209 + ], + "spans": [ + { + "bbox": [ + 69, + 198, + 137, + 209 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 222, + 164, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 222, + 164, + 232 + ], + "spans": [ + { + "bbox": [ + 69, + 222, + 164, + 232 + ], + "type": "text", + "content": "4.1 Experimental Setup" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 238, + 511, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 238, + 511, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 238, + 511, + 344 + ], + "type": "text", + "content": "In this experiment, we employed the Crisis Text Line dataset which was modeled after a real mental health hotline and included tens of thousands of conversations between users and counselors that encompassed different psychological crisis events. Data has the characteristics of diversity and complexity of emotional expression, real-time and dynamic changes, text length difference, and hidden crisis signal. The data was preprocessed including text cleaning, sentiment annotation, and segmentation before fitting into the experimental model. The dataset offers essential emotional data for the automatic recognition and intervention of mental health crises, which spurred a big challenge, including the approach to identify implicit emotions and crisis-behaviours, and extract efficient emotional cues in long texts and multi-round conversations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 346, + 511, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 346, + 511, + 494 + ], + "spans": [ + { + "bbox": [ + 67, + 346, + 511, + 494 + ], + "type": "text", + "content": "To verify the effectiveness of the text transfer recognition method proposed for social network crisis intervention based on improvement of mental health knowledge, we selected four existing mainstream methods for comparative experiments: 1) Valence Aware Dictionary and sEntiment Reasoner (VADER), a sentiment classification method based on sentiment dictionary, which is suitable for basic sentiment analysis, but has limitations in identifying complex or obscure emotions; 2) Bidirectional Long Short-Term Memory(Bi-LSTM), an emotion classification method based on deep learning, can capture text context information more accurately, but it is still insufficient for the recognition of hidden psychological crisis signals. 3) Bidirectional Encoder Representations from Transformers (BERT) is a sentiment recognition method based on transfer learning and has a strong ability to understand context, but the computational cost is relatively large. 4) MML (Multimodal Learning): uses multimodal learning methods, combined using multi-source data such as text and images to improve the recognition accuracy, but the requirements for computing resource and data are higher." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 505, + 173, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 505, + 173, + 516 + ], + "spans": [ + { + "bbox": [ + 69, + 505, + 173, + 516 + ], + "type": "text", + "content": "4.2 Experimental Analysis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 521, + 511, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 521, + 511, + 615 + ], + "spans": [ + { + "bbox": [ + 67, + 521, + 511, + 615 + ], + "type": "text", + "content": "The performance of various methods in successfully discovering potential psychological crises in the social network circumstance can be evaluated by one feature index, namely the Crisis Detection Rate (CDR). The above dataset is trained from 0 to 10,000 and the memory is set to 7 days. As the results in Figure 1 show, with the increase of the training period, the recognition ability of VADER and Bi-LSTM is improved, but overall the performance remains relatively flat, and finally tends to stabilize, and the performance is limited. The recognition rate of BERT and MML models is effective, and training is continuously improved to gradually enhance the ability to identify the crisis. Since you are based ons datasets, you can not train on data after October 2023." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 38, + 317, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 38, + 317, + 54 + ], + "spans": [ + { + "bbox": [ + 264, + 38, + 317, + 54 + ], + "type": "text", + "content": "ACM-6" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 147, + 87, + 431, + 257 + ], + "blocks": [ + { + "bbox": [ + 147, + 87, + 431, + 257 + ], + "lines": [ + { + "bbox": [ + 147, + 87, + 431, + 257 + ], + "spans": [ + { + "bbox": [ + 147, + 87, + 431, + 257 + ], + "type": "image", + "image_path": "6bc7ad91c558637c38c18c9ed4ccddb64b153c243b341c8563c758d9ef1c23c8.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 268, + 370, + 280 + ], + "lines": [ + { + "bbox": [ + 209, + 268, + 370, + 280 + ], + "spans": [ + { + "bbox": [ + 209, + 268, + 370, + 280 + ], + "type": "text", + "content": "Figure 1. Crisis Detection Rate Comparison." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 290, + 513, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 290, + 513, + 478 + ], + "spans": [ + { + "bbox": [ + 68, + 290, + 513, + 478 + ], + "type": "text", + "content": "Emotional Stability quantified to what extent the model fluctuates while processing social network text. Physical stability: The higher the emotional stability, the better the model stabilizes emotional fluctuations arising from or driven by external factors or occasions, which can be more accurately represented as changes in user emotions, and can be more effectively explained as induced by stable patterns of emotion and emotion. The results shown in figure 2 reveal an increase in positive/negative emotion stability of all models with the growth of text size, notably in the case of longer texts, and a sharp decline in emotional fluctuation. In particular, VADER fails on short texts, slows sensitivity to affective stability, and stays low in long text. Compared to VADER, we can see that the Bi-LSTM model achieves better emotional stability, but is still limited by its simple context modeling ability. Specifically, the stability of the BERT model is greatly increased with the growth of input text length, at least for longer texts which can effectively consider the context of the given sentences and results in diminished emotional fluctuations. The MML model has good emotional stability, and multimodal data can also enhance its stability. The Ours model showed the best performance across all tested text lengths, especially in the case of long text emotional stability, and by merging mental health knowledge refinement and transfer learning, our model could more in-depth capture the long-term trends of emotion, so as to achieve more stable emotion recognition." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 264, + 38, + 317, + 53 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 38, + 317, + 53 + ], + "spans": [ + { + "bbox": [ + 264, + 38, + 317, + 53 + ], + "type": "text", + "content": "ACM-7" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 148, + 88, + 431, + 257 + ], + "blocks": [ + { + "bbox": [ + 148, + 88, + 431, + 257 + ], + "lines": [ + { + "bbox": [ + 148, + 88, + 431, + 257 + ], + "spans": [ + { + "bbox": [ + 148, + 88, + 431, + 257 + ], + "type": "image", + "image_path": "25b0b5cee37690373138f7619ecad3e60108c21e4cf1ae9a6d9534dca3151610.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 175, + 268, + 403, + 280 + ], + "lines": [ + { + "bbox": [ + 175, + 268, + 403, + 280 + ], + "spans": [ + { + "bbox": [ + 175, + 268, + 403, + 280 + ], + "type": "text", + "content": "Figure 2. Emotional Stability Comparison Across Text Lengths." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 290, + 511, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 290, + 511, + 315 + ], + "spans": [ + { + "bbox": [ + 69, + 290, + 511, + 315 + ], + "type": "text", + "content": "Model performance in terms of each affective intensity (mild, moderate and strong affective ranges) is evaluated using the emotion depth distribution." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 160, + 322, + 421, + 475 + ], + "blocks": [ + { + "bbox": [ + 160, + 322, + 421, + 475 + ], + "lines": [ + { + "bbox": [ + 160, + 322, + 421, + 475 + ], + "spans": [ + { + "bbox": [ + 160, + 322, + 421, + 475 + ], + "type": "image", + "image_path": "0a1b97c4a657b21140220d7ce2614e20cdf8fced85bb72b5d0055cd0863388e1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 165, + 487, + 414, + 498 + ], + "lines": [ + { + "bbox": [ + 165, + 487, + 414, + 498 + ], + "spans": [ + { + "bbox": [ + 165, + 487, + 414, + 498 + ], + "type": "text", + "content": "Figure 3. Emotional Depth Distribution Comparison Across Models." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 509, + 511, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 509, + 511, + 575 + ], + "spans": [ + { + "bbox": [ + 68, + 509, + 511, + 575 + ], + "type": "text", + "content": "Through above Figure 3, we can intuitively see the differences in methods for different emotion recognition capabilities in the base of sentiment depth. The experimental results indicate that the Ours method performs significantly better than the other methods in the recognition of slight emotion intervals, which may be attributed to the use of a more fine-grained sentiment analysis mechanism that can capture the potential slight emotion signals in the social networks better." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 590, + 135, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 590, + 135, + 601 + ], + "spans": [ + { + "bbox": [ + 69, + 590, + 135, + 601 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 605, + 511, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 605, + 511, + 651 + ], + "spans": [ + { + "bbox": [ + 68, + 605, + 511, + 651 + ], + "type": "text", + "content": "In this paper, we focus on the intervention of crisis on social networks, and propose a method of text transfer recognition based on social network crisis intervention, based on the knowledge enhancement of mental health which driven by large language model and this is significantly improved the detection ability of the potential psychological crisis on social network by the combination of the advanced technologies of transfer learning and" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "spans": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "text", + "content": "ACM-8" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 87, + 511, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 87, + 511, + 178 + ], + "spans": [ + { + "bbox": [ + 67, + 87, + 511, + 178 + ], + "type": "text", + "content": "combining with the mental health field of special knowledge. A multi-level framework is proposed based on the existing model of BERT, which integrates sentiment analysis, behavior prediction and crisis intervention techniques, and effectively identifies the mild, moderate and strong emotional depth of potential crisis signals in the social media. The experimental results indicate that the Ours method outperformed traditional sentiment analysis models in critical indicators and performed well on the recognition of minor emotions, reflecting that it has flexibility and effectiveness under the flexible variation of emotions. They can also be introduced with various knowledge in the fields of mental health and multimodal information to complete the model, and will also optimize the modeling structure of the model." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 194, + 119, + 203 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 194, + 119, + 203 + ], + "spans": [ + { + "bbox": [ + 70, + 194, + 119, + 203 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 207, + 512, + 647 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 70, + 207, + 511, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 207, + 511, + 229 + ], + "spans": [ + { + "bbox": [ + 70, + 207, + 511, + 229 + ], + "type": "text", + "content": "[1] Bosco, Cristina, et al. \"Detecting racial stereotypes: An Italian social media corpus where psychology meets NLP.\" Information Processing & Management 60.1 (2023): 103118." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 232, + 511, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 232, + 511, + 254 + ], + "spans": [ + { + "bbox": [ + 70, + 232, + 511, + 254 + ], + "type": "text", + "content": "[2] Pang, Patrick Cheong-Iao, et al. \"Engagement of government social media on Facebook during the COVID-19 pandemic in Macao.\" International Journal of Environmental Research and Public Health 18.7 (2021): 3508." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 256, + 512, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 256, + 512, + 277 + ], + "spans": [ + { + "bbox": [ + 70, + 256, + 512, + 277 + ], + "type": "text", + "content": "[3] Muhammed T, Sadiq, and Saji K. Mathew. \"The disaster of misinformation: a review of research in social media.\" International journal of data science and analytics 13.4 (2022): 271-285." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 280, + 512, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 280, + 512, + 301 + ], + "spans": [ + { + "bbox": [ + 70, + 280, + 512, + 301 + ], + "type": "text", + "content": "[4] Balaji, T. K., Chandra Sekhara Rao Annavarapu, and Annushree Bablani. \"Machine learning algorithms for social media analysis: A survey.\" Computer Science Review 40 (2021): 100395." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 304, + 512, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 304, + 512, + 326 + ], + "spans": [ + { + "bbox": [ + 70, + 304, + 512, + 326 + ], + "type": "text", + "content": "[5] Weyrich, Philippe, et al. \"Using serious games to evaluate the potential of social media information in early warning disaster management.\" International journal of disaster risk reduction 56 (2021): 102053." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 328, + 512, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 328, + 512, + 350 + ], + "spans": [ + { + "bbox": [ + 70, + 328, + 512, + 350 + ], + "type": "text", + "content": "[6] Chen, Long, Jianguo Chen, and Chunhe Xia. \"Social network behavior and public opinion manipulation.\" Journal of Information Security and Applications 64 (2022): 103060." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 352, + 512, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 352, + 512, + 373 + ], + "spans": [ + { + "bbox": [ + 70, + 352, + 512, + 373 + ], + "type": "text", + "content": "[7] Drouhot, Lucas G., et al. \"Computational approaches to migration and integration research: promises and challenges.\" Journal of Ethnic and Migration Studies 49.2 (2023): 389-407." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 376, + 512, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 376, + 512, + 398 + ], + "spans": [ + { + "bbox": [ + 70, + 376, + 512, + 398 + ], + "type": "text", + "content": "[8] Oktavianus, Jeffry, and Wan-Ying Lin. \"Soliciting social support from migrant domestic workers' connections to storytelling networks during a public health crisis.\" Health Communication 38.6 (2023): 1179-1188." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 400, + 512, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 400, + 512, + 422 + ], + "spans": [ + { + "bbox": [ + 70, + 400, + 512, + 422 + ], + "type": "text", + "content": "[9] Bolhuis, Maarten P., and Joris Van Wijk. \"Seeking asylum in the digital era: Social-media and mobile-device vetting in asylum procedures in five European countries.\" Journal of refugee studies 34.2 (2021): 1595-1617." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 425, + 512, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 425, + 512, + 446 + ], + "spans": [ + { + "bbox": [ + 70, + 425, + 512, + 446 + ], + "type": "text", + "content": "[10] Lv, Yang, et al. \"Big data driven COVID-19 pandemic crisis management: potential approach for global health.\" Archives of Medical Science: AMS 17.3 (2021): 829." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 449, + 512, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 449, + 512, + 471 + ], + "spans": [ + { + "bbox": [ + 70, + 449, + 512, + 471 + ], + "type": "text", + "content": "[11] Jin, Xianlin, and Patric R. Spence. \"Understanding crisis communication on social media with CERC: Topic model analysis of tweets about Hurricane Maria.\" Journal of Risk Research 24.10 (2021): 1266-1287." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 473, + 512, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 473, + 512, + 495 + ], + "spans": [ + { + "bbox": [ + 70, + 473, + 512, + 495 + ], + "type": "text", + "content": "[12] Wildemann, Sergej, Claudia Niederée, and Erick Elejalde. \"Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis.\" Proceedings of the ACM Web Conference 2023. 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 70, + 497, + 512, + 519 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 497, + 512, + 519 + ], + "spans": [ + { + "bbox": [ + 70, + 497, + 512, + 519 + ], + "type": "text", + "content": "[13] Li, K., Wang, J., Wu, X., Peng, X., Chang, R., Deng, X., Kang, Y., Yang, Y., Ni, F., & Hong, B. \"Optimizing automated picking systems in warehouse robots using machine learning.\" arXiv preprint arXiv:2408.16633 (2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 70, + 521, + 512, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 521, + 512, + 553 + ], + "spans": [ + { + "bbox": [ + 70, + 521, + 512, + 553 + ], + "type": "text", + "content": "[14] Li, K., Chen, J., Yu, D., Tao, D., Qiu, X., Lian, J., Ji, R., Zhang, S., Wan, Z., Sun, B., et al. \"Deep reinforcement learning-based obstacle avoidance for robot movement in warehouse environments.\" Proceedings of the 2024 IEEE 6th International Conference on Civil Aviation Safety and Information Technology (ICCASIT), (2024): 342-348." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 70, + 555, + 512, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 555, + 512, + 588 + ], + "spans": [ + { + "bbox": [ + 70, + 555, + 512, + 588 + ], + "type": "text", + "content": "[15] Li, K., Liu, L., Chen, J., Yu, D., Zhou, X., Li, M., Wang, C., & Li, Z. \"Research on reinforcement learning based warehouse robot navigation algorithm in complex warehouse layout.\" Proceedings of the 2024 6th International Conference on Artificial Intelligence and Computer Applications (ICAICA) (2024): 296-301." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 70, + 590, + 512, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 590, + 512, + 633 + ], + "spans": [ + { + "bbox": [ + 70, + 590, + 512, + 633 + ], + "type": "text", + "content": "[16] Yu, D., Liu, L., Wu, S., Li, K., Wang, C., Xie, J., Chang, R., Wang, Y., Wang, Z., & Ji, R. \"Machine learning optimizes the efficiency of picking and packing in automated warehouse robot systems.\" Proceedings of the 2024 International Conference on Computer Engineering, Network and Digital Communication (CENDC 2024) (2024)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 635, + 512, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 635, + 512, + 647 + ], + "spans": [ + { + "bbox": [ + 70, + 635, + 512, + 647 + ], + "type": "text", + "content": "[17] Sun, J., Zhang, S., Lian, J., Fu, L., Zhou, Z., & Fan, Z. \"Multimodal Deep Learning for Crisis Intervention.\"" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "spans": [ + { + "bbox": [ + 263, + 38, + 317, + 53 + ], + "type": "text", + "content": "ACM-9" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 87, + 511, + 312 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 88, + 87, + 390, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 87, + 390, + 99 + ], + "spans": [ + { + "bbox": [ + 88, + 87, + 390, + 99 + ], + "type": "text", + "content": "Proceedings of the 2024 IEEE 7th International Conference (2024): 996-1004." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 101, + 511, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 101, + 511, + 122 + ], + "spans": [ + { + "bbox": [ + 71, + 101, + 511, + 122 + ], + "type": "text", + "content": "[18] Lyu, S. \"Machine Vision-Based Automatic Detection for Electromechanical Equipment.\" Journal of Computer Technology and Applied Mathematics 1.4 (2024): 12-20." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 125, + 511, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 125, + 511, + 147 + ], + "spans": [ + { + "bbox": [ + 70, + 125, + 511, + 147 + ], + "type": "text", + "content": "[19] Lin, Weikun. \"A Review of Multimodal Interaction Technologies in Virtual Meetings.\" Journal of Computer Technology and Applied Mathematics 1.4 (2024): 60-68." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 149, + 511, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 149, + 511, + 181 + ], + "spans": [ + { + "bbox": [ + 70, + 149, + 511, + 181 + ], + "type": "text", + "content": "[20] Wildemann, S., Niederée, C., & Elejalde, E. (2023, April). Migration Reframed? A multilingual analysis on the stance shift in Europe during the Ukrainian crisis. In Proceedings of the ACM Web Conference 2023 (pp. 2754-2764)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 183, + 511, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 183, + 511, + 205 + ], + "spans": [ + { + "bbox": [ + 70, + 183, + 511, + 205 + ], + "type": "text", + "content": "[21] Xu, X., Xu, Z., Yu, P., & Wang, J. (2025). Enhancing user intent for recommendation systems via large language models. arXiv preprint arXiv:2501.10871." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 207, + 511, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 207, + 511, + 229 + ], + "spans": [ + { + "bbox": [ + 70, + 207, + 511, + 229 + ], + "type": "text", + "content": "[22] Yu, P., Xu, Z., Wang, J., & Xu, X. (2025). The application of large language models in recommendation systems. arXiv preprint arXiv:2501.02178." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 232, + 511, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 232, + 511, + 254 + ], + "spans": [ + { + "bbox": [ + 70, + 232, + 511, + 254 + ], + "type": "text", + "content": "[23] Feng, H., & Gao, Y. (2025). Ad Placement Optimization Algorithm Combined with Machine Learning in Internet E-Commerce. Preprints. https://doi.org/10.20944/preprints202502.2167.v1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 256, + 511, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 256, + 511, + 289 + ], + "spans": [ + { + "bbox": [ + 70, + 256, + 511, + 289 + ], + "type": "text", + "content": "[24] Wu, S., & Huang, X. (2025). Psychological Health Prediction Based on the Fusion of Structured and Unstructured Data in EHR: a Case Study of Low-Income Populations. Preprints. https://doi.org/10.20944/preprints202502.2104.v1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 291, + 511, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 291, + 511, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 291, + 511, + 312 + ], + "type": "text", + "content": "[25] Wang, Z., Zhang, Q., & Cheng, Z. (2025). Application of AI in Real-time Credit Risk Detection. Preprints. https://doi.org/10.20944/preprints202502.1546.v1" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 258, + 38, + 321, + 54 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 38, + 321, + 54 + ], + "spans": [ + { + "bbox": [ + 258, + 38, + 321, + 54 + ], + "type": "text", + "content": "ACM-10" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file