Add Batch fd9e6ba1-15f8-4cc4-8d0f-60948efa8fa7
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/69aa88c6-fb8d-40f7-9781-6d3ee96fc40e_content_list.json +3 -0
- ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/69aa88c6-fb8d-40f7-9781-6d3ee96fc40e_model.json +3 -0
- ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/69aa88c6-fb8d-40f7-9781-6d3ee96fc40e_origin.pdf +3 -0
- ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/full.md +526 -0
- ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/images.zip +3 -0
- ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/layout.json +3 -0
- ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/da9fbf2a-a1c3-4f09-b587-cf7ab97be4b0_content_list.json +3 -0
- ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/da9fbf2a-a1c3-4f09-b587-cf7ab97be4b0_model.json +3 -0
- ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/da9fbf2a-a1c3-4f09-b587-cf7ab97be4b0_origin.pdf +3 -0
- ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/full.md +0 -0
- ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/images.zip +3 -0
- ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/layout.json +3 -0
- ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/8aedc65a-0768-45dc-94d5-6be5947ddcd7_content_list.json +3 -0
- ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/8aedc65a-0768-45dc-94d5-6be5947ddcd7_model.json +3 -0
- ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/8aedc65a-0768-45dc-94d5-6be5947ddcd7_origin.pdf +3 -0
- ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/full.md +0 -0
- ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/images.zip +3 -0
- ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/layout.json +3 -0
- ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/cad58b3c-c8ca-4697-bc6d-85f4b57f8922_content_list.json +3 -0
- ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/cad58b3c-c8ca-4697-bc6d-85f4b57f8922_model.json +3 -0
- ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/cad58b3c-c8ca-4697-bc6d-85f4b57f8922_origin.pdf +3 -0
- ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/full.md +382 -0
- ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/images.zip +3 -0
- ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/layout.json +3 -0
- ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/607f6af7-947e-47ac-8e4d-68bda5925360_content_list.json +3 -0
- ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/607f6af7-947e-47ac-8e4d-68bda5925360_model.json +3 -0
- ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/607f6af7-947e-47ac-8e4d-68bda5925360_origin.pdf +3 -0
- ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/full.md +574 -0
- ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/images.zip +3 -0
- ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/layout.json +3 -0
- ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/dc43e24f-9f75-429e-86a4-434685053d80_content_list.json +3 -0
- ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/dc43e24f-9f75-429e-86a4-434685053d80_model.json +3 -0
- ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/dc43e24f-9f75-429e-86a4-434685053d80_origin.pdf +3 -0
- ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/full.md +282 -0
- ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/images.zip +3 -0
- ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/layout.json +3 -0
- ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/485ae2a8-1fc5-402d-9e71-4d6939418c63_content_list.json +3 -0
- ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/485ae2a8-1fc5-402d-9e71-4d6939418c63_model.json +3 -0
- ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/485ae2a8-1fc5-402d-9e71-4d6939418c63_origin.pdf +3 -0
- ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/full.md +403 -0
- ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/images.zip +3 -0
- ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/layout.json +3 -0
- ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/aed8248d-b454-44ef-a338-4bc6daba1424_content_list.json +3 -0
- ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/aed8248d-b454-44ef-a338-4bc6daba1424_model.json +3 -0
- ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/aed8248d-b454-44ef-a338-4bc6daba1424_origin.pdf +3 -0
- ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/full.md +483 -0
- ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/images.zip +3 -0
- ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/layout.json +3 -0
- ICML/2025/e-GAI_ e-value-based Generalized $α$-Investing for Online False Discovery Rate Control/dabdc8cf-f6a6-4355-a6eb-b0bb1e491482_content_list.json +3 -0
- ICML/2025/e-GAI_ e-value-based Generalized $α$-Investing for Online False Discovery Rate Control/dabdc8cf-f6a6-4355-a6eb-b0bb1e491482_model.json +3 -0
ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/69aa88c6-fb8d-40f7-9781-6d3ee96fc40e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77c9b5a5ebdb00289c39363d62eaf6daef8699d6ba8dfb56368c32babb8f37d6
|
| 3 |
+
size 121404
|
ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/69aa88c6-fb8d-40f7-9781-6d3ee96fc40e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75f9af59aa9df10234a24153fda1a60e5dc1df70de40c3c1f4256b3579286a11
|
| 3 |
+
size 147869
|
ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/69aa88c6-fb8d-40f7-9781-6d3ee96fc40e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d8d1b618ffca8f3255b3817ec8b643e25e6dae8ce685b790fa3643839c9e19c
|
| 3 |
+
size 4751941
|
ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/full.md
ADDED
|
@@ -0,0 +1,526 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints
|
| 2 |
+
|
| 3 |
+
Dapeng Jiang $^{12*}$ Xiangzhe Kong $^{13*}$ Jiaqi Han $^{4*}$ Mingyu Li $^{1}$ Rui Jiao $^{13}$ Wenbing Huang $^{56}$ Stefano Ermon $^{4}$ Jianzhu Ma $^{17}$ Yang Liu $^{13}$
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Cyclic peptides, characterized by geometric constraints absent in linear peptides, offer enhanced biochemical properties, presenting new opportunities to address unmet medical needs. However, designing target-specific cyclic peptides remains underexplored due to limited training data. To bridge the gap, we propose CP-Composer, a novel generative framework that enables zero-shot cyclic peptide generation via composable geometric constraints. Our approach decomposes complex cyclization patterns into unit constraints, which are incorporated into a diffusion model through geometric conditioning on nodes and edges. During training, the model learns from unit constraints and their random combinations in linear peptides, while at inference, novel constraint combinations required for cyclization are imposed as input. Experiments show that our model, despite trained with linear peptides, is capable of generating diverse target-binding cyclic peptides, reaching success rates from $38\%$ to $84\%$ on different cyclization strategies.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Peptides occupy an intermediate position between small molecules and antibodies, offering unique advantages over conventional drug formats, such as higher specificity and enhanced cell permeability (Fosgerau & Hoffmann, 2015; Lee et al., 2019). Among them, cyclic peptides, which introduce geometric constraints into linear peptides, have earned
|
| 12 |
+
|
| 13 |
+
*Equal contribution 1Institute for AI Industry Research (AIR), Tsinghua 2Xingjian College, Tsinghua University 3Department of Computer Science and Technology, Tsinghua University 4Stanford University 5Gaoling School of Artificial Intelligence, Renmin University of China 6Beijing Key Laboratory of Research on Large Models and Intelligent Governance 7Department of Electronic Engineering, Tsinghua University. Correspondence to: Yang Liu <liuyang2011@tsinghua.edu.cn>, Jianzhu Ma <majianzhu@tsinghua.edu.cn>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1. Four common strategies to form cyclic peptides. (A) Stapled peptide where a lysine (K) at position $i$ and an aspartic acid (D) at position $i + 3$ are connected via dehydration condensation on side chains. The aspartic acid can also be replaced with glutamic acid (E) at position $i + 4$ . (B) Head-to-tail peptide where the first residue and the last residue form an amide bond for connection. (C) Disulfide peptide where two cysteines (C) non-adjacent in sequence are spatially connected through a disulfur bond. (D) Bicycle peptide which uses 1,3,5-trimethylbenezene to form a triangle between three cysteines (C) non-adjacent in sequence.
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+
increasing attention (Zorzi et al., 2017). These constraints stabilize the peptide conformation, enhancing biochemical properties including binding affinity, in vivo stability, and oral bioavailability (Ji et al., 2024), which are essential for identifying desired drug candidates (Zhang & Chen, 2022).
|
| 27 |
+
|
| 28 |
+
Existing literature on target-specific peptide generation primarily focuses on linear peptides, utilizing autoregressive models (Li et al., 2024a), multi-modal flow matching (Li et al., 2024b; Lin et al., 2024), and geometric latent diffusion (Kong et al., 2024). However, these methods are not directly applicable to cyclic peptide design due to the scarcity of available data (Rettie et al., 2024). Other approaches either impose geometric constraints on linear peptides through post-filtering (Wang et al., 2024b), which typically results in low acceptance rates, or rely on hard-coded model design (Rettie et al., 2024), which lacks generalizability across different cyclization patterns. In contrast, we hypothesize that the complex geometric constraints of cyclic peptides can be decomposed into fundamental unit constraints, resembling how complex mathematical formulas are built from basic arithmetic operations. While existing datasets rarely contain peptides that satisfy intricate cyclic
|
| 29 |
+
|
| 30 |
+
constraints, they typically include abundant instances of single unit constraints and their random combinations, which serve as the building blocks for more complicated designs. Therefore, we reason that a framework could potentially be developed to learn these unit constraints from available linear peptide data, circumventing data limitations and enabling generalization to the diverse combined constraints required for cyclic peptide design.
|
| 31 |
+
|
| 32 |
+
In this paper, we present CP-Composer, a framework for zero-shot cyclic peptide generation, relying solely on available data for linear peptides. Our work is equipped with the following contributions. 1) Decomposing cyclization strategies into fundamental geometric constraints. We identify four common chemical cyclization strategies (Figure 1) and formalize cyclic peptide design as a geometrically constrained generation problem. By analyzing cyclization patterns, we derive two fundamental unit constraints, type constraints and distance constraints, allowing description of diverse cyclization strategies to be specific combinations of these units. 2) Encoding constraints with geometric conditioning. We incorporate unit constraints into a the denoising network of a diffusion model (Kong et al., 2024) using additional vectorized embeddings of types and distances on geometric graphs, which enables flexible conditioning on compositions of constraints required for cyclic peptide generation. 3) Enabling zero-shot cyclic peptide design. We jointly train conditional and unconditional models on unit constraints and their random combinations found in linear peptide data. At inference, novel constraint combinations corresponding to desired cyclization strategies, which are unseen during training, are imposed as input conditions. The model is guided by the difference in score estimates between conditional and unconditional models, enabling zero-shot generalization to cyclic peptides. 4) Assessing generated cyclic peptides on comprehensive metrics. Experiments demonstrate that our CP-Composer generates cyclic peptides with complex geometric constraints effectively, achieving high success rates from $38\%$ to $84\%$ , while maintaining realistic distributions on amino acid types and dihedral angles. Molecular dynamics further confirm that the generated cyclic peptides exhibit desired binding affinity while forming more stable binding conformation compared to the native linear peptide binders.
|
| 33 |
+
|
| 34 |
+
# 2. Related Work
|
| 35 |
+
|
| 36 |
+
Geometric diffusion models. Besides their success on applications like image (Rombach et al., 2021; Song et al., 2020; 2021a) and video (Ho et al., 2022) generation, diffusion models have become a preeminent tool in modeling the distribution of structured data in geometric domains. While early works have explored their applicability on tasks like molecule generation (Xu et al., 2022; 2023; Park & Shen,
|
| 37 |
+
|
| 38 |
+
2024), there have been growing interests in scaling these models to systems of larger scales, such as antibody (Luo et al., 2022), peptide (Kong et al., 2024), and protein (Yim et al., 2023; Watson et al., 2023; Anand & Achim, 2022) in general, or to those with complex dynamics, such as molecular dynamics simulation (Han et al., 2024b). Despite fruitful achievements, how to impose diverse geometric constraints stills remain under-explored for geometric diffusion models, which we aim to address in this work.
|
| 39 |
+
|
| 40 |
+
Diffusion guidance. Diffusion sampling can be flexibly controlled by progressively enforcing guidance through the reverse denoising process. Dhariwal & Nichol (2021) proposes classifier-guidance, which employs an additionally trained classifier to amplify the guidance signal. Classifier-free guidance (CFG) (Ho & Salimans, 2022) is a more widely adopted alternative that replaces the classifier with the difference of the conditional and unconditional score, which has been further generalized to the multi-constraint scenario by composing multiple scores in diffusion sampling (Liu et al., 2022; Huang et al., 2023). Diffusion guidance has also been explored for solving inverse problems on images (Song et al., 2024; Kawar et al., 2022; Song et al., 2021b), molecules (Bao et al., 2022), and PDEs (Jiang et al., 2024). Our approach instead extends CFG to compose geometric constraints with application to cyclic peptide design.
|
| 41 |
+
|
| 42 |
+
Peptide design. Target-specific peptide design initially relied on physical methods using statistical force fields and fragment libraries (Hosseinzadeh et al., 2021; Swanson et al., 2022). With the rise of equivariant neural networks (Satorras et al., 2021; Han et al., 2024a), geometric deep generative models have emerged. PepFlow (Li et al., 2024b) and PPFLOW (Lin et al., 2024) use multi-modal flow matching, while PepGLAD (Kong et al., 2024) applies geometric latent diffusion with a full-atom autoencoder. However, these methods struggle with cyclic peptide design due to limited data. Prior works introduce disulfide bonds via post-filtering (Wang et al., 2024b) or enforce head-to-tail cyclization through hard-coded model design (Rettie et al., 2024). In contrast, our approach decomposes cyclization into fundamental unit constraints, enabling zero-shot cyclic peptide generation with broad flexibility across diverse patterns.
|
| 43 |
+
|
| 44 |
+
# 3. Method
|
| 45 |
+
|
| 46 |
+
In this section, we detail our method, CP-Composer. We first introduce basic concepts of peptide modeling and cyclic strategies in Sec. 3.1 and specify these strategies as constraints in Sec. 3.2. We further present the guided generation framework and the encoding strategy for incorporating the constraints in Sec. 3.3 and Sec. 3.4, respectively. We finally describe the training and inference schemes in Sec. 3.5. The overall workflow is depicted in Fig. 2.
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Figure 2. Overall training and inference design of CP-Composer. We define two unit constraints, type constraint and distance constraint (§ 3.2), which are incorporated into the diffusion model via geometric conditioning (§ 3.4). During training, the model learns from single unit constraints and their combinations observed in linear peptides. At inference, novel combinations corresponding to specific cyclization strategies are imposed with guidance signal amplified by classifier-free guidance, enabling zero-shot cyclic peptide design (§ 3.5).
|
| 50 |
+
|
| 51 |
+
# 3.1. Preliminaries
|
| 52 |
+
|
| 53 |
+
Representing peptide as geometric graph. We represent the binding site and peptide as a fully-connected geometric graph $\mathcal{G} = (\mathcal{V},\mathcal{E})$ where $\mathcal{V}$ is the set of nodes and $\mathcal{E}$ is the set of edges. Each node is a residue, bound with node features $(\pmb{h}_i,\vec{\pmb{X}}_i)$ with $h_i\in \mathbb{R}^m$ being the one-hot encoding of the amino acid type and $\vec{\pmb{X}}_i\in \mathbb{R}^{k_i\times 3}$ being the coordinate of the $k_{i}$ atoms.
|
| 54 |
+
|
| 55 |
+
Geometric latent diffusion model for peptide design. Our model is built on PepGLAD (Kong et al., 2024), a latent geometric diffusion model, but is adaptable to other diffusion-based frameworks. It employs a variational autoencoder to project peptide graphs $\mathcal{G}$ into residue-level latents $\mathcal{G}_z = \{(z_i,\vec{z}_i)\}_{i = 1}^N$ with an encoder $\mathcal{E}_{\phi}$ , and a corresponding decoder $\mathcal{D}_{\xi}$ for the inverse, where $z_{i}\in \mathbb{R}^{8}$ is the E(3)-invariant latent and $\vec{z}_i\in \mathbb{R}^3$ is the E(3)-equivariant counterpart. A diffusion model is learned in the compact latent space, with the denoiser $\epsilon_{\theta}(\mathcal{G}_z^{(t)},t)$ parameterized by an equivariant GNN (Kong et al., 2023). The sampling process initiates with latents $\mathcal{G}_z^{(T)} = \{(z_i^{(T)},\vec{z}_i^{(T)})\}_{i = 1}^N$ drawn from the prior and gradually denoises it using DDPM (Ho et al., 2020) sampler for a total of $T$ steps. The final latents $\mathcal{G}_z^{(0)}$ are decoded back to the data space using decoder $\mathcal{D}_{\xi}$ .
|
| 56 |
+
|
| 57 |
+
Cyclic peptide and cyclization strategies. Unlike common linear peptides, which are chain-like structures, a cyclic peptide is formed by animo acids connected in a ring structure. As shown in Fig. 1, we primarily focus on four types of cyclic peptides in this paper: stapled, head-to-tail, disulfide and bicycle peptides. Each strategy applies constraints on
|
| 58 |
+
|
| 59 |
+
specific amino acid types and/or their pairwise distances. Taking the disulfide peptide as an example (Fig. 1C), to link two cysteines at indices $i, j$ with a disulfur bond of length $d_{S}$ , a disulfide peptide is constrained by
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\mathbb {C} _ {\text {D i s u l f i d e}, i, j} = \left(\left\{\arg \max \left(h _ {i}\right) = \arg \max \left(h _ {j}\right) = k _ {C} \right\}, \right.
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
\left\{\left\| \vec {X} _ {i} - \vec {X} _ {j} \right\| _ {2} = d _ {S} \right\}), \tag {1}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
where $k_{C}$ represents the index of cysteine (C) in the one-hot embeddings. This constraint can be decomposed into two node-level constraints on the amino acid types and one edge-level constraint on the distance. We refer to these as unit geometric constraints with further details on these constraints provided in Sec. 3.2. We demonstrate that all four cyclic strategies can be expressed as combinations of these unit geometric constraints in Appendix B.
|
| 70 |
+
|
| 71 |
+
# 3.2. Decomposing Cyclization Strategies as Geometric Constraints
|
| 72 |
+
|
| 73 |
+
In this work, we consider two types of unit geometric constraints, namely type constraint and distance constraint. In particular, type constraint operates on node-level by enforcing the node to be of certain type, while distance constraint takes place on edge-level, specifying a pair of nodes to reside at a certain distance.
|
| 74 |
+
|
| 75 |
+
Definition 3.1 (Type constraint). A type constraint is a set $\mathbb{C}_T\coloneqq \{(i,l_i)\}_{i\in \mathcal{V}_T}$ where each entry $(i,l_{i})$ represents that node $i$ should be of type $l_{i}$ , while $\mathcal{V}_T\subseteq \mathcal{V}$ is the set of nodes to enforce the type constraint.
|
| 76 |
+
|
| 77 |
+
Definition 3.2 (Distance constraint). A distance constraint is a set $\mathbb{C}_D\coloneqq \{(i,j,d_{ij})\}_{(i,j)\in \mathcal{E}_D}$ where each element $(i,j,d_{ij})$ represents that node $i$ and $j$ should be positioned at the distance of $d_{ij}$ , while $\mathcal{E}_D\subseteq \mathcal{E}$ is the set of edges to enforce the distance constraint.
|
| 78 |
+
|
| 79 |
+
Notably, our taxonomy of geometric constraints is particularly interesting due to its completeness, in the sense that each of the cyclic strategies $\mathbb{C}$ described in Sec. 3.1 can be decomposed into combinations of type constraints $\mathbb{C}_T$ and/or distance constraints $\mathbb{C}_D$ . We defer the detailed explanations to Appendix B.
|
| 80 |
+
|
| 81 |
+
Problem definition. We formulate the task of cyclic peptide design as finding the candidate peptides $\mathcal{G}$ that satisfy constraint $\mathbb{C}$ , where $\mathbb{C}$ is any one of the four cyclic constraints.
|
| 82 |
+
|
| 83 |
+
# 3.3. Inverse Design with Diffusion Guidance
|
| 84 |
+
|
| 85 |
+
To perform inverse design, a widely adopted approach is to progressively inject certain guidance term into diffusion sampling towards the design target (Bao et al., 2022; Song et al., 2023), which share similar spirit as classifier guidance (Dhariwal & Nichol, 2021). Specifically, at each sampling step $t$ , the conditional score is derived by Bayes' rule:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\begin{array}{l} \nabla_ {\mathcal {G} _ {z} ^ {(t)}} \log p _ {t} (\mathcal {G} _ {z} ^ {(t)} | \mathbb {C}) = \nabla_ {\mathcal {G} _ {z} ^ {(t)}} \log p _ {t} (\mathcal {G} _ {z} ^ {(t)}) \\ + \nabla_ {\mathcal {G} _ {z} ^ {(t)}} \log p _ {t} (\mathbb {C} | \mathcal {G} _ {z} ^ {(t)}), \tag {2} \\ \end{array}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
where the last term $\nabla_{\mathcal{G}_{z}^{(t)}}\log p_t(\mathbb{C}|\mathcal{G}_z^{(t)})$ takes the effect as guidance, which can typically be a hand-crafted energy function (Kawar et al., 2022; Song et al., 2024) or a pretrained neural network (Dhariwal & Nichol, 2021; Bao et al., 2022).
|
| 92 |
+
|
| 93 |
+
However, empirically the approach is often demonstrated unfavorable since the guidance term in Eq. 2 is the gradient of neural network, which detriments sample quality due to adversarial effect (Ho & Salimans, 2022). Distinct from the approach above, we propose an alternative that, inspired by classifier-free guidance, guides the sampling by directly composing unconditional and conditional score without additional gradient terms. In detail, we have,
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\tilde {\epsilon} _ {\theta} \left(\mathcal {G} _ {z} ^ {(t)}, \mathbb {C}, t\right) = (w + 1) \epsilon_ {\theta} \left(\mathcal {G} _ {z} ^ {(t)}, \mathbb {C}, t\right) - w \epsilon_ {\theta} \left(\mathcal {G} _ {z} ^ {(t)}, t\right) \tag {3}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $w$ is the guidance weight and the guided score $\tilde{\epsilon}_{\theta}$ will replace $\epsilon_{\theta}$ for score computation. In particular, the rationale of Eq. 2 and Eq. 3 are linked by the following distribution
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\tilde {p} _ {t} \left(\mathcal {G} _ {z} ^ {(t)} | \mathbb {C}\right) \propto p _ {t} \left(\mathcal {G} _ {z} ^ {(t)}\right) p _ {t} \left(\mathbb {C} \mid \mathcal {G} _ {z} ^ {(t)}\right) ^ {w}, \tag {4}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
with the corresponding conditional score
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\begin{array}{l} \nabla_ {\mathcal {G} _ {z} ^ {(t)}} \log \tilde {p} _ {t} (\mathcal {G} _ {z} ^ {(t)} | \mathbb {C}) \\ = \nabla_ {\mathcal {G} _ {z} ^ {(t)}} \log p _ {t} (\mathcal {G} _ {z} ^ {(t)}) + w \nabla_ {\mathcal {G} _ {z} ^ {(t)}} \log p _ {t} (\mathbb {C} | \mathcal {G} _ {z} ^ {(t)}), \\ \approx \epsilon_ {\theta} \left(\mathcal {G} _ {z} ^ {(t)}, t\right) + w \nabla_ {\mathcal {G} _ {z} ^ {(t)}} \log p _ {t} \left(\mathbb {C} \mid \mathcal {G} _ {z} ^ {(t)}\right). \tag {5} \\ \end{array}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
By further leveraging the relation $\nabla_{\mathcal{G}_z^{(t)}}\log p_t(\mathbb{C}|\mathcal{G}_z^{(t)}) =$ $\nabla_{\mathcal{G}_z^{(t)}}\log p_t(\mathcal{G}_z^{(t)}|\mathbb{C}) - \nabla_{\mathcal{G}_z^{(t)}}\log p_t(\mathcal{G}_z^{(t)})\approx \epsilon_\theta (\mathcal{G}_z^{(t)},\mathbb{C},t) -$ $\epsilon_{\theta}(\mathcal{G}_{z}^{(t)},t)$ into Eq. 5, we obtain the expression in Eq. 3.
|
| 112 |
+
|
| 113 |
+
Conceptually, Eq. 2 adopts energy-guidance that directly models $\log p_t(\mathbb{C}|\mathcal{G}_z^{(t)})$ by an externally trained energy function. Eq. 3 instead follows the convention in classifier-free guidance by rewriting $\nabla_{\mathcal{G}_z^{(t)}}\log p_t(\mathbb{C}|\mathcal{G}_z^{(t)}) = \nabla_{\mathcal{G}_z^{(t)}}\log p_t(\mathcal{G}_z^{(t)}|\mathbb{C}) - \nabla_{\mathcal{G}_z^{(t)}}\log p_t(\mathcal{G}_z^{(t)})\approx \epsilon_\theta (\mathcal{G}_z^{(t)},\mathbb{C},t) - \epsilon_\theta (\mathcal{G}_z^{(t)},t)$ , which gives Eq. 3 after simplification.
|
| 114 |
+
|
| 115 |
+
In recent studies, how to obtain the conditional score $\epsilon_{\theta}(\mathcal{G}_{z}^{(t)},\mathbb{C},t)$ still remains unclear. Notably, $\mathbb{C}$ is a complicated geometric constraint, which is fundamentally different from a class label (Ho & Salimans, 2022) or a target value (Bao et al., 2022), where an embedding (e.g., one-hot for class label) can be readily adopted as the control signal to feed into the denoiser. In the following section, we will introduce our approach to encode type and distance constraint.
|
| 116 |
+
|
| 117 |
+
# 3.4. Encoding Constraints via Geometric Conditioning
|
| 118 |
+
|
| 119 |
+
To encode the constraints as control signals, we propose geometric conditioning that embeds the type and distance constraints into the denoiser through vectorization.
|
| 120 |
+
|
| 121 |
+
Conditioning type constraints. For type constraint $\mathbb{C}_T = \{(i,l_i)\}_{i\in \mathcal{V}_T}$ where $l_{i}\in \{0,1,\dots ,K - 1\}$ is the desired node type for node $i$ , we operate at node-level by augmenting the E(3)-invariant node feature $\pmb{h}_i$ with an additional vector $\pmb{l}_i\in \mathbb{R}^K$ which serves as the control signal. This corresponds to the encoding function $f_{T}(\mathbb{C}_{T}) = \{(i,l_{i})\}_{i\in \mathcal{V}_{T}}$ that lifts $l_{i}$ to the embedding space where
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\boldsymbol {l} _ {i} = \left\{ \begin{array}{l l} \operatorname {O n e - h o t} \left(l _ {i}\right) & i \in \mathcal {V} _ {T}, \\ \boldsymbol {0} & i \in \mathcal {V} \backslash \mathcal {V} _ {T}. \end{array} \right. \tag {6}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
Such design of the control signal is simple yet effective, since different type constraints will induce different signal $l_{i}$ , thus making the constraints distinguishable to the network. More importantly, for any type constraint, the conditional score $\epsilon_{\theta}(\mathcal{G}_{z}^{(t)},\mathbb{C},t)$ obtained by this means still enjoys E(3)-equivariance, since $l_{i}$ is E(3)-invariant.
|
| 128 |
+
|
| 129 |
+
Conditioning distance constraints. For distance constraint $\mathbb{C}_D\coloneqq \{(i,j,d_{ij})\}_{(i,j)\in \mathcal{E}_D}$ where $d_{ij}$ specifies the distance between node $i$ and $j$ , we instead design the encoding function as $f_{D}(\mathbb{C}_{D}) = \{(i,j,\pmb{d}_{ij})\}_{(i,j)\in \mathcal{E}_{D}}$ , where the control signal $\pmb{d}_{ij}$ is defined at edge-level:
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
\boldsymbol {d} _ {i j} = \left\{ \begin{array}{l l} \operatorname {R B F} \left(d _ {i j}\right) & (i, j) \in \mathcal {E} _ {D}, \\ \phi & (i, j) \in \mathcal {E} \backslash \mathcal {E} _ {D}. \end{array} \right. \tag {7}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
Here $\mathrm{RBF}(\cdot)$ is the radial basis kernel that lifts the distance from a scalar to a high-dimensional vector (Schütt et al., 2018), and $\phi$ denotes that the edges not in the set $\mathcal{E}_D$ will not be featurized. The control signal $d_{ij}$ is then viewed as a special type of edge feature, which will be further processed by an additional dyMEAN layer (Kong et al., 2023), whose input will be the subgraph $(\mathcal{V},\mathcal{E}_D)$ with edge features $\{d_{ij}\}_{(i,j)\in \mathcal{E}_D}$ . More details are deferred to Appendix C.2. Akin to the analysis for type constraints, our way of encoding distance constraints also preserve the E(3)-equivariance of the conditional score, with proof in Appendix A.2.
|
| 136 |
+
|
| 137 |
+
Moreover, the encoding is also injective, as formally stated in Theorem 3.3. Such property is crucial for effective guidance since different constraints will be projected as different control signals, always making them distinguishable to the score network.
|
| 138 |
+
|
| 139 |
+
Theorem 3.3 (Injective). Both $f_{T}$ and $f_{D}$ are injective. That is, $f(\mathbb{C}^{1}) = f(\mathbb{C}^{2})$ if and only if $\mathbb{C}^{1} = \mathbb{C}^{2}$ , where $(f, \mathbb{C}^{1}, \mathbb{C}^{2})$ can be $(f_{T}, \mathbb{C}_{T}^{1}, \mathbb{C}_{T}^{2})$ or $(f_{D}, \mathbb{C}_{D}^{1}, \mathbb{C}_{D}^{2})$ . Furthermore, their product function $\tilde{f}(\mathbb{C}_T, \mathbb{C}_D) := (f_T(\mathbb{C}_T), f_D(\mathbb{C}_D))$ is also injective.
|
| 140 |
+
|
| 141 |
+
Composing type and distance constraints. Our approach of encoding the type and distance constraints in node- and edge-level respectively also facilitates conveniently composing them together. In particular, we can easily devise $\epsilon_{\theta}(\mathcal{G}_{z}^{(t)},\mathbb{C}_{T},\mathbb{C}_{D},t)$ by simultaneous feeding the type and distance control signals in Eq. 6 and 7 into the score network, which corresponds to enforcing a compositional constraint $(\mathbb{C}_T,\mathbb{C}_D)$ . This extension is critical since it enables us to enforce richer combinations of the constraints at inference time, even generalizing to those unseen during training. In this way, we are able to design cyclic peptides with training data that only consist of linear peptides due to the generalization capability of our approach.
|
| 142 |
+
|
| 143 |
+
# 3.5. Training and Inference
|
| 144 |
+
|
| 145 |
+
With the geometric conditioning technique to derive the conditional score, we are now ready to introduce the training and inference framework.
|
| 146 |
+
|
| 147 |
+
Design space for constraints. For a linear peptide $\mathcal{G}$ sampled from training set with features $\{(h_i,\vec{\pmb{X}}_i)\}_{i = 1}^N$ , we consider the following design space for type constraint:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\mathcal {C} _ {T} (\mathcal {G}) = \left\{\mathbb {C} _ {T} \mid \mathbb {C} _ {T} = \left\{(i, \arg \max \left(\boldsymbol {h} _ {i}\right) \right\} _ {i \in \mathcal {V} _ {T}}, \left| \mathcal {V} _ {T} \right| \leq 4 \right\}, \tag {8}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
which include all of the type constraints that control the type of the node to be the same as that of node $i$ in $\mathcal{G}$ and the number of constraints to be fewer or equal to 4. For distance
|
| 154 |
+
|
| 155 |
+
# Algorithm 1 Training Procedure of CP-Composer
|
| 156 |
+
|
| 157 |
+
Input: Data distribution $\mathcal{D}$ , mask probabilities for type and distance constraints $p_T, p_D$ , encoder $\mathcal{E}_{\phi}$ , score network $\epsilon_{\theta}$ , diffusion scheduler Scheduler( $\cdot$ )
|
| 158 |
+
|
| 159 |
+
1: while not converged do
|
| 160 |
+
2: Sample $\mathcal{G}\sim \mathcal{D},\mathbb{C}_T\sim \mathrm{Unif}(\mathcal{C}_T(\mathcal{G}))$ and $\mathbb{C}_D\sim \mathrm{Unif}(\mathcal{C}_D(\mathcal{G}))$ {c.f. Eq. 8-9}
|
| 161 |
+
3: $\mathbb{C}_T\gets \emptyset$ with probability $p_T$
|
| 162 |
+
4: $\mathbb{C}_D\gets \emptyset$ with probability $p_D$
|
| 163 |
+
5: $(\pmb {\epsilon},\mathcal{G}_z^{(t)},t)\gets \mathrm{Schedule}(\mathcal{E}_\phi (\mathcal{G}))$
|
| 164 |
+
6: Take gradient step on
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\mathcal {L} (\theta) = \| \boldsymbol {\epsilon} - \boldsymbol {\epsilon} _ {\theta} \left(\mathcal {G} _ {\boldsymbol {z}} ^ {(t)}, \mathbb {C} _ {T}, \mathbb {C} _ {D}, t\right) \| _ {2} ^ {2}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
7: end while
|
| 171 |
+
|
| 172 |
+
constraint, we select the following design space:
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\begin{array}{l} \mathcal {C} _ {D} (\mathcal {G}) = \left\{\mathbb {C} _ {D} | \mathbb {C} _ {D} = \left\{(i, j, \| \vec {\boldsymbol {X}} _ {i} - \vec {\boldsymbol {X}} _ {j} \| _ {2}) \right\} _ {(i, j) \in \mathcal {E} _ {D}}, \right. \\ d _ {\mathcal {G}} (i, j) \in \{3, 4, 6 \}, | \mathcal {E} _ {D} | \leq 6 \}, \tag {9} \\ \end{array}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
which spans across all possible distance constraints that specify the distance between node $i$ and $j$ to be their Euclidean distance in $\mathcal{G}$ , while the shortest path distance between $i$ and $j$ , i.e., $d_{\mathcal{G}}(i,j)$ , equals to 3, 4, or 6. We design $\mathcal{C}_T(\mathcal{G})$ and $\mathcal{C}_D(\mathcal{G})$ such that $\mathcal{C}_T(\mathcal{G}) \times \mathcal{C}_D(\mathcal{G})$ covers the constraint space of cyclic peptides, where $\times$ is the Cartesian product. This permits our approach to generalize to novel compositions within the space $\mathcal{C}_T(\mathcal{G}) \times \mathcal{C}_D(\mathcal{G})$ at inference time without necessarily seeing such particular combination in training data, e.g., the four compositional constraints of cyclic peptides.
|
| 179 |
+
|
| 180 |
+
Training. We employ a single network $\epsilon_{\theta}$ to jointly optimize the conditional and unconditional score during training, following the paradigm in Ho & Salimans (2022). At each training step, we first sample $\mathcal{G}$ from training data distribution $\mathcal{D}$ and derive the candidate constraints $\mathcal{C}_T(\mathcal{G})$ and $\mathcal{C}_D(\mathcal{G})$ . We then sample a type constraint $\mathbb{C}_T$ and a distance constraint $\mathbb{C}_D$ uniformly from the candidates $\mathcal{C}_T(\mathcal{G})$ and $\mathcal{C}_D(\mathcal{G})$ , respectively. To jointly optimize the conditional and unconditional score networks, we replace $\mathbb{C}_T$ and $\mathbb{C}_D$ by empty set $\varnothing$ with probability $p_T$ and $p_D$ respectively, where the empty set will enforce no meaningful type and/or distance control signal which degenerates to the unconditional score. Finally, we encode $\mathcal{G}$ into latent space by $\mathcal{E}_{\phi}$ , sample the noise $\epsilon$ and diffusion step $t$ , and compute the noised latent $\mathcal{G}_z^{(t)}$ . The noise prediction loss (Ho et al., 2020) is adopted to train the score network. We present the detailed training procedure in Alg. 1.
|
| 181 |
+
|
| 182 |
+
Inference. At inference time, we will select one of the four cyclic constraints at one time. Each constraint is represented by $(\mathbb{C}_T^*,\mathbb{C}_D^*)$ where $\mathbb{C}_T^*$ and $\mathbb{C}_D^*$ are the target type and distance constraint, respectively. We start from the initial latent $\mathcal{G}_{\boldsymbol{z}}^{(T)}$ sampled from the prior and perform standard
|
| 183 |
+
|
| 184 |
+
Table 1. Success rates and KL divergence for generated samples from different cyclization strategies.
|
| 185 |
+
|
| 186 |
+
<table><tr><td rowspan="2"></td><td colspan="4">Stapled peptide</td><td colspan="4">Head-to-tail peptide</td></tr><tr><td>Succ.</td><td>AA-KL</td><td>B-KL</td><td>S-KL</td><td>Succ.</td><td>AA-KL</td><td>B-KL</td><td>S-KL</td></tr><tr><td>PepGLAD (Kong et al., 2024)</td><td>22.80%</td><td>0.1035</td><td>1.1401</td><td>0.0126</td><td>30.23%</td><td>0.1052</td><td>1.1347</td><td>0.0125</td></tr><tr><td>w/ EG (Bao et al., 2022)</td><td>25.41%</td><td>0.0744</td><td>1.1821</td><td>0.0127</td><td>61.63%</td><td>0.0798</td><td>1.0891</td><td>0.0128</td></tr><tr><td>CP-Composer w = 0.0</td><td>25.71%</td><td>0.0932</td><td>1.1179</td><td>0.0126</td><td>37.21%</td><td>0.1021</td><td>1.0787</td><td>0.0118</td></tr><tr><td>CP-Composer w = 1.0</td><td>30.00%</td><td>0.1017</td><td>1.1235</td><td>0.0161</td><td>55.81%</td><td>0.1008</td><td>1.0604</td><td>0.0124</td></tr><tr><td>CP-Composer w = 2.0</td><td>21.42%</td><td>0.1067</td><td>1.0996</td><td>0.0147</td><td>65.11%</td><td>0.1055</td><td>1.1005</td><td>0.0126</td></tr><tr><td>+CADS (Sadat et al., 2024)</td><td>27.14%</td><td>0.0807</td><td>1.0975</td><td>0.0119</td><td>45.54%</td><td>0.0798</td><td>1.0589</td><td>0.0132</td></tr><tr><td>CP-Composer w = 5.0</td><td>38.57%</td><td>0.1812</td><td>1.1515</td><td>0.0180</td><td>74.42%</td><td>0.1320</td><td>1.0523</td><td>0.0122</td></tr><tr><td>CP-Composer w = 10.0</td><td>32.86%</td><td>0.3532</td><td>1.1726</td><td>0.0232</td><td>68.60%</td><td>0.1784</td><td>1.0301</td><td>0.0175</td></tr><tr><td rowspan="2"></td><td colspan="4">Disulfide peptide</td><td colspan="4">Bicycle peptide</td></tr><tr><td>Succ.</td><td>AA-KL</td><td>B-KL</td><td>S-KL</td><td>Succ.</td><td>AA-KL</td><td>B-KL</td><td>S-KL</td></tr><tr><td>PepGLAD (Kong et al., 2024)</td><td>0</td><td>0.0808</td><td>1.1324</td><td>0.0124</td><td>0</td><td>0.0838</td><td>1.1823</td><td>0.0238</td></tr><tr><td>w/ EG (Bao et al., 2022)</td><td>0</td><td>0.0711</td><td>1.0891</td><td>0.0103</td><td>0</td><td>0.0729</td><td>1.0968</td><td>0.0228</td></tr><tr><td>CP-Composer w = 0.0</td><td>7.50%</td><td>0.1016</td><td>1.1062</td><td>0.0151</td><td>0</td><td>0.1225</td><td>1.1980</td><td>0.0252</td></tr><tr><td>CP-Composer w = 1.0</td><td>21.25%</td><td>0.1477</td><td>1.0939</td><td>0.0151</td><td>11.53%</td><td>0.1638</td><td>1.1490</td><td>0.0395</td></tr><tr><td>CP-Composer w = 2.0</td><td>41.25%</td><td>0.2873</td><td>1.0994</td><td>0.0379</td><td>30.76%</td><td>0.2147</td><td>1.1195</td><td>0.0735</td></tr><tr><td>+CADS (Sadat et al., 2024)</td><td>3.75%</td><td>0.0939</td><td>1.0788</td><td>0.0162</td><td>3.85%</td><td>0.0901</td><td>1.0624</td><td>0.0684</td></tr><tr><td>CP-Composer w = 5.0</td><td>82.50%</td><td>0.5139</td><td>1.0397</td><td>0.1913</td><td>84.62%</td><td>0.3385</td><td>1.0759</td><td>0.3351</td></tr><tr><td>CP-Composer w = 10.0</td><td>62.50%</td><td>1.6965</td><td>4.0312</td><td>1.1046</td><td>38.46%</td><td>1.2677</td><td>8.1935</td><td>0.3374</td></tr></table>
|
| 187 |
+
|
| 188 |
+
# Algorithm 2 Inference Procedure of CP-Composer
|
| 189 |
+
|
| 190 |
+
Input: Target type and distance constraint $(\mathbb{C}_T^*,\mathbb{C}_D^*)$ , diffusion sampler $\mathrm{Sampler}(\cdot)$ , guidance weight $w$ , step $T$ , score network $\epsilon_{\theta}$ , decoder $\mathcal{D}_{\xi}$
|
| 191 |
+
|
| 192 |
+
1: Initialize latents $\mathcal{G}_z^{(T)}$ from prior
|
| 193 |
+
2: for $t = T, T - 1, \dots, 1$ do
|
| 194 |
+
3: Compute score $\tilde{\epsilon} \gets (w + 1)\epsilon_{\theta}(\mathcal{G}_{\boldsymbol{z}}^{(t)}, \mathbb{C}_T^*, \mathbb{C}_D^*, t) - w\epsilon_{\theta}(\mathcal{G}_{\boldsymbol{z}}^{(t)}, \varnothing, \varnothing, t)$ {Eq. 10}
|
| 195 |
+
4: $\mathcal{G}_z^{(t - 1)}\gets \mathrm{Samp}\text{ler} (\mathcal{G}_z^{(t)},\tilde{\epsilon},t)$ {Denoising step}
|
| 196 |
+
5: end for
|
| 197 |
+
|
| 198 |
+
Return: $\mathcal{D}_{\xi}(\mathcal{G}_z^{(0)})$
|
| 199 |
+
|
| 200 |
+
diffusion sampling with the guided score:
|
| 201 |
+
|
| 202 |
+
$$
|
| 203 |
+
\begin{array}{l} \tilde {\epsilon} \left(\mathcal {G} _ {\boldsymbol {z}} ^ {(t)}, \mathbb {C} _ {T} ^ {*}, \mathbb {C} _ {D} ^ {*}, t\right) = (w + 1) \epsilon_ {\theta} \left(\mathcal {G} _ {\boldsymbol {z}} ^ {(t)}, \mathbb {C} _ {T} ^ {*}, \mathbb {C} _ {D} ^ {*}, t\right) \\ - w \epsilon_ {\theta} \left(\mathcal {G} _ {z} ^ {(t)}, \varnothing , \varnothing , t\right), \tag {10} \\ \end{array}
|
| 204 |
+
$$
|
| 205 |
+
|
| 206 |
+
where a modified classifier-free guidance is employed to further amplify the guidance signal. The sample is acquired by decoding $\mathcal{G}_z^{(0)}$ back to the data space using the decoder $\mathcal{D}_{\xi}$ . The inference procedure is depicted in Alg. 2.
|
| 207 |
+
|
| 208 |
+
# 4. Experiments
|
| 209 |
+
|
| 210 |
+
Task. We evaluate CP-Composer on target-specific cyclic peptide design, aiming to co-design the sequence and the binding structure of cyclic peptides given the binding site on the target protein.
|
| 211 |
+
|
| 212 |
+
Dataset. We utilize PepBench and ProtFrag datasets (Kong et al., 2024) for training and validation, with the LNR
|
| 213 |
+
|
| 214 |
+
dataset (Kong et al., 2024; Tsaban et al., 2022) for testing. PepBench contains 4,157 protein-peptide complexes for training and 114 complexes for validation, with a target protein longer than 30 residues and a peptide binder between 4 to 25 residues. ProtFrag encompasses 70,498 synthetic samples resembling protein-peptide complexes, which are extracted from local contexts in protein monomers. LNR consists of 93 protein-peptide complexes curated by domain experts, with peptide lengths ranging from 4 to 25 residues.
|
| 215 |
+
|
| 216 |
+
We evaluate zero-shot cyclic peptide generation in Sec. 4.1, demonstrate the flexibility of composable geometric constraints with high-order multi-cycle constraints in Sec. 4.2, and assess the stability and binding affinity of the generated cyclic peptides through molecular dynamics in Sec. 4.3.
|
| 217 |
+
|
| 218 |
+
# 4.1. Zero-Shot Cyclic Peptide Generation
|
| 219 |
+
|
| 220 |
+
Metrics. We evaluate the generated peptides based on two key aspects: cyclic constraint satisfaction and generation quality. For each target protein in the test set, we generate five candidate peptides and compute the following metrics. Success Rate (Succ.) measures the proportion of target proteins for which at least one of the five generated peptides satisfies the geometric constraints of the specified cyclization strategy. Amino Acid Divergence (AA-KL) calculates the Kullback-Leibler (KL) divergence between the amino acid composition of reference peptides and all of the generated samples. For cyclization patterns that impose amino acid constraints at specific positions, we exclude these constrained amino acid types when computing the distributions, as successful designs inherently deviate from the reference distribution on these amino acid types. Backbone Dihe
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
Stapled Peptide
|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
Head-To-Tail Peptide
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Disulfide Peptide
|
| 230 |
+
Figure 3. Four types of generated cyclic peptides, with the red boxes highlighting the position for cyclization.
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
Bicycle Peptide
|
| 234 |
+
|
| 235 |
+
dral Angle Divergence (B-KL) and Side-Chain Dihedral Angle Divergence (S-KL) indicate the KL divergence between the distribution of the dihedral angles in reference peptides and the generated samples, assessing rationality in the generated backbone and side chains, respectively.
|
| 236 |
+
|
| 237 |
+
Baselines. First, we compare our CP-Composer with the backbone model PepGLAD (Kong et al., 2024) without additional guidance to validate the effectiveness of our framework with composable geometric constraints. We further implement a baseline with the prevailing Energy-based Guidance (EG) (Dhariwal & Nichol, 2021; Bao et al., 2022) applied to node embeddings and pairwise distances to assess the advantages of our approach, with implementation details in Appendix C. To compare CP-Composer with other cyclic peptide generation method, we implement DiffPepBuilder (Wang et al., 2024a), a model specifically designed for disulfide peptides. Furthermore, we also incorporate our method with a advanced sampler Condition Annealed Diffusion Sampler(CADS) (Sadat et al., 2024) to analysis the performance of our method combining with other sampler.
|
| 238 |
+
|
| 239 |
+
Results. As shown in Table 1, CP-Composer significantly improves constraint satisfaction rates across all cyclization strategies compared to unguided baselines, while maintaining fidelity to reference distributions in amino acid composition and structural dihedral angles. The energy-guided baseline proves effective in simple cases requiring control over a single pairwise distance (i.e., head-to-tail cyclization), but struggles with more complex scenarios involving combinations of distance constraints and type constraints. This limitation is evident from its lower success rates on stapled peptides and complete failure in handling more intricate cyclization patterns including disulfide and bicycle peptides. In contrast, CP-Composer consistently achieves high success rates across these challenging cases, demonstrating the strength of our framework design with compos
|
| 240 |
+
|
| 241 |
+
able geometric constraints. In Table 3, we further compare CP-Composer with DiffPepBuilder (Wang et al., 2024a). Although DiffPepbuilder is a method specifically designed for disulfide peptide generation, CP-Composer shows a better success rates than DiffPepbuilder. These results show the effectiveness of CP-Composer. We visualize examples of generated peptides for each cyclization strategy in Fig. 3, with more cases in Appendix E. Furthermore, the weight parameter $w$ effectively balances success rates and generation quality, with increasing control strength yielding higher constraint satisfaction yet slightly higher KL divergence, indicating a trade-off between constraint satisfaction and distributional fidelity. This flexibility allows users to customize the method based on specific application needs, prioritizing either higher success rates or closer resemblance to natural peptide distributions.
|
| 242 |
+
|
| 243 |
+
# 4.2. Flexibility in High-Order Combinations
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Stapled + Stapled
|
| 247 |
+
|
| 248 |
+

|
| 249 |
+
-S-S-+Head-to-Tail
|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
-S-S- + -S-S-
|
| 253 |
+
Figure 4. Generated peptides conforming to high-order combinations of cyclizations, with the red boxes highlighting the positions for cyclization.
|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
-S-S-+ -S-S-+ -S-S-
|
| 257 |
+
|
| 258 |
+
Setup. To demonstrate the flexibility of our framework in handling composable geometric constraints, we investigate more complex and customized scenarios that involve multiple cyclizations within a single peptide. Specifically, we explore the following high-order combinations: 2*Stapled has two stapled pairs in one peptide. -S-S- + H-T includes one disulfide bond and one head-to-tail in one peptide; 2*-S-S- contains two disulfide bonds in one peptide; 3*-S-S- involves three disulfide bonds in one peptide; The flexibility of CP-Composer enables seamless implementation of these complex constraints: simply combining the individual unit constraints for each cyclization strategy allows the model to accommodate them simultaneously.
|
| 259 |
+
|
| 260 |
+
Results. As shown in Table 2, despite the increasing complexity of the constraints, CP-Composer achieves reasonable
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
Figure 5. RMSD trajectories from 100 ns molecular dynamics simulations for two target proteins, each bound to either a native linear peptide binder or a cyclic peptide generated by our model. The target proteins and their corresponding linear peptide binders are derived from PDB 3RC4 (top) and PDB 4J86 (bottom), respectively.
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
|
| 271 |
+
success rates across all high-order cyclization scenarios. The control strength parameter $w$ remains effective, with higher values leading to enhanced success rates. The only exception is 2*Stapled, likely due to the inherent difficulty of the Staple strategy, which already exhibits the lowest success rate in Table 1. This indicates that our framework effectively learns to generate peptides that conform to the joint distribution of multiple constraints. Fig. 4 visualizes peptides with these high-order cyclization patterns, highlighting the flexibility of CP-Composer in designing structurally feasible peptides tailored for customized requirements.
|
| 272 |
+
|
| 273 |
+
Table 2. Success rates for high-order combinations of multiple cyclizations within the same peptide.
|
| 274 |
+
|
| 275 |
+
<table><tr><td></td><td>2*Stapled</td><td>-S-S-+H-T</td><td>2*-S-S-</td><td>3*-S-S-</td></tr><tr><td>w = 1.0</td><td>2.5%</td><td>0</td><td>0</td><td>0</td></tr><tr><td>w = 2.0</td><td>7.5%</td><td>10.0%</td><td>26.0%</td><td>17.2%</td></tr><tr><td>w = 2.5</td><td>7.5%</td><td>20.0%</td><td>34.0%</td><td>34.5%</td></tr><tr><td>w = 3.0</td><td>7.5%</td><td>26.0%</td><td>62.0%</td><td>65.5%</td></tr></table>
|
| 276 |
+
|
| 277 |
+
Table 3. Success rates comparison between DiffPepBuilder and our method
|
| 278 |
+
|
| 279 |
+
<table><tr><td>Succ.</td><td>Disulfide Peptide</td><td>2*-S-S-</td></tr><tr><td>CP-Composer</td><td>41.25%</td><td>62.00%</td></tr><tr><td>DiffPepBuilder (Wang et al., 2024a)</td><td>23.07%</td><td>32.78%</td></tr></table>
|
| 280 |
+
|
| 281 |
+
In Table 3, we compare CP-Composer with DiffPepBuilder. The results show that our method outperforms the cyclic peptide generation model under high-order cyclization scenario: two disulfide bonds in one peptide. This indicates the flexibility of our framework.
|
| 282 |
+
|
| 283 |
+
# 4.3. Evaluations by Molecular Dynamics
|
| 284 |
+
|
| 285 |
+
Setup. We perform molecular dynamics (MD) simulations using the Amber22 package (Salomon-Ferrer et al., 2013) to compare the stability and binding affinity of linear peptides from the test set with cyclic peptides generated by our model. We use the ff14SB force field for proteins and peptides (Maier et al., 2015) with all systems solvated in water, and $150\,nM\,Na^{+}/Cl^{-}$ counterions are added to neutralize charges and simulate the normal saline environment (Jorgensen et al., 1983; Li et al., 2024c). The SHAKE algorithm is applied to constrain covalent bonds involving hydrogen atoms (Ryckaert et al., 1977), while non-bonded interactions are truncated at $10.0\,\text{\AA}$ , with long-range electrostatics treated using the PME method. To estimate peptide binding energies, we further employ MM/PBSA calculations (Genheden & Ryde, 2015). Notably, while MD simulations provide high accuracy in evaluating conformational stability and binding affinity, they are very computationally expensive. Therefore, we randomly select two target proteins from the test set and generate one cyclic peptide using head-totail and disulfide bond cyclization strategies for evaluation. More details on the setup of MD are in Appendix C.3.
|
| 286 |
+
|
| 287 |
+
Results. As shown in Fig. 5, the root mean square deviation (RMSD) trajectories of the two linear peptides from the test set exhibit significant fluctuations, indicating vibrate binding conformations. In contrast, the RMSD trajectories of the cyclic peptides generated by our model are quite flat, producing consistently lower RMSD compared to the linear peptides, suggesting that the introduced geometric constraints effectively enhance conformational stability. Table 4 presents the average RMSD values with standard deviations, along with the binding affinity $(\Delta G)$ estimated via
|
| 288 |
+
|
| 289 |
+
Table 4. RMSD trajectories from molecular dynamics after 50 ns (average values and standard deviations), along with binding affinities $(\Delta G)$ estimated by running simulations with MM/PBSA.
|
| 290 |
+
|
| 291 |
+
<table><tr><td>Peptide</td><td>RMSD (Å)</td><td>ΔG-MM/PBSA (kcal/mol)</td></tr><tr><td colspan="3">PDB: 3RC4</td></tr><tr><td>Linear (test set)</td><td>2.57±0.51</td><td>-9.73</td></tr><tr><td>Cyclic (ours)</td><td>1.44±0.23</td><td>-10.66</td></tr><tr><td colspan="3">PDB: 4J86</td></tr><tr><td>Linear (test set)</td><td>3.37±0.73</td><td>-15.17</td></tr><tr><td>Cyclic (ours)</td><td>1.56±0.40</td><td>-20.41</td></tr></table>
|
| 292 |
+
|
| 293 |
+
MM/PBSA simulations. The results indicate that cyclic peptides achieve significantly stronger binding affinities than their linear counterparts, thanks to their enhanced stability in the binding conformations.
|
| 294 |
+
|
| 295 |
+
# 4.4. Generalization beyond Available Data
|
| 296 |
+
|
| 297 |
+
In Fig. 6, we visualize the structural embeddings of peptides generated under different cyclization strategies, along with linear peptides from the test set, using ESM2-650M(Lin et al., 2023) and T-SNE (Van der Maaten & Hinton, 2008). The results reveal distinct clusters corresponding to different cyclization strategies, all of which are clearly separated from the linear peptides. This indicates that CP-Composer generalizes well beyond the available data, effectively exploring unseen regions of cyclic peptides.
|
| 298 |
+
|
| 299 |
+

|
| 300 |
+
Figure 6. T-SNE visualization of ESM embeddings for peptides in the test set and those generated with different cyclization strategies.
|
| 301 |
+
|
| 302 |
+
# 5. Conclusion
|
| 303 |
+
|
| 304 |
+
We introduce CP-Composer, a generative framework that enables zero-shot cyclic peptide design via composable geometric constraints. By decomposing complex cyclization patterns into unit constraints, it circumvents the limitation of data, achieves high success rates while preserving fidelity to natural distributions of type and structural statistics, and allows for high-order combinations of cyclization patterns, enabling the design of multi-cycle peptides with customiz
|
| 305 |
+
|
| 306 |
+
able strategies. Our framework offers a principled approach to cyclic peptide design, with potential extensions to broader biomolecular applications involving geometric constraints.
|
| 307 |
+
|
| 308 |
+
# Acknowledgements
|
| 309 |
+
|
| 310 |
+
This work is jointly supported by the National Key R&D Program of China (No.2022ZD0160502), the National Natural Science Foundation of China (No. 61925601, No. 62376276, No. 62276152), Beijing Nova Program (20230484278), China's Village Science and Technology City Key Technology funding, Beijing Natural Science Foundation (No. QY24249) and Wuxi Research Institute of Applied Technologies.
|
| 311 |
+
|
| 312 |
+
# Impact Statement
|
| 313 |
+
|
| 314 |
+
This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none which we feel must be specifically highlighted here.
|
| 315 |
+
|
| 316 |
+
# References
|
| 317 |
+
|
| 318 |
+
Anand, N. and Achim, T. Protein structure and sequence generation with equivariant denoising diffusion probabilistic models. arXiv preprint arXiv:2205.15019, 2022. 2
|
| 319 |
+
Bao, F., Zhao, M., Hao, Z., Li, P., Li, C., and Zhu, J. Equivariant energy-guided sde for inverse molecular design. arXiv preprint arXiv:2209.15408, 2022. 2, 4, 6, 7
|
| 320 |
+
Bertsekas, D. P. Constrained optimization and Lagrange multiplier methods. Academic press, 2014. 14
|
| 321 |
+
Dhariwal, P. and Nichol, A. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 34:8780-8794, 2021. 2, 4, 7
|
| 322 |
+
Fosgerau, K. and Hoffmann, T. Peptide therapeutics: current status and future directions. *Drug discovery today*, 20(1): 122–128, 2015. 1
|
| 323 |
+
Genheden, S. and Ryde, U. The mm/pbsa and mm/gbsa methods to estimate ligand-binding affinities. Expert opinion on drug discovery, 10(5):449-461, 2015. 8, 15
|
| 324 |
+
Goldenthal, R., Harmon, D., Fattal, R., Bercovier, M., and Grinspun, E. Efficient simulation of inextensible cloth. In ACM SIGGRAPH 2007 papers, pp. 49-es. 2007. 14
|
| 325 |
+
Han, J., Cen, J., Wu, L., Li, Z., Kong, X., Jiao, R., Yu, Z., Xu, T., Wu, F., Wang, Z., et al. A survey of geometric graph neural networks: Data structures, models and applications. arXiv preprint arXiv:2403.00485, 2024a. 2
|
| 326 |
+
|
| 327 |
+
Han, J., Xu, M., Lou, A., Ye, H., and Ermon, S. Geometric trajectory diffusion models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b. URL https://openreview.net/forum?id=OYmms5Mv9H.2
|
| 328 |
+
Ho, J. and Salimans, T. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 2, 4, 5
|
| 329 |
+
Ho, J., Jain, A., and Abbeel, P. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020. 3, 5
|
| 330 |
+
Ho, J., Salimans, T., Gritsenko, A., Chan, W., Norouzi, M., and Fleet, D. J. Video diffusion models. Advances in Neural Information Processing Systems, 35:8633-8646, 2022. 2
|
| 331 |
+
Hosseinzadeh, P., Watson, P. R., Craven, T. W., Li, X., Rettie, S., Pardo-Avila, F., Bera, A. K., Mulligan, V. K., Lu, P., Ford, A. S., et al. Anchor extension: a structure-guided approach to design cyclic peptides targeting enzyme active sites. Nature Communications, 12(1):3384, 2021. 2
|
| 332 |
+
Huang, L., Chen, D., Liu, Y., Shen, Y., Zhao, D., and Zhou, J. Composer: Creative and controllable image synthesis with composable conditions. arXiv preprint arXiv:2302.09778, 2023. 2
|
| 333 |
+
Ji, X., Nielsen, A. L., and Heinis, C. Cyclic peptides for drug development. Angewandte Chemie International Edition, 63(3):e202308251, 2024. 1
|
| 334 |
+
Jiang, E., Peng, J., Ma, Z., and Yan, X.-B. Ode-dps: Ode-based diffusion posterior sampling for inverse problems in partial differential equation. arXiv preprint arXiv:2404.13496, 2024. 2
|
| 335 |
+
Jorgensen, W. L., Chandrasekhar, J., Madura, J. D., Impey, R. W., and Klein, M. L. Comparison of simple potential functions for simulating liquid water. The Journal of chemical physics, 79(2):926-935, 1983. 8, 15
|
| 336 |
+
Kawar, B., Elad, M., Ermon, S., and Song, J. Denoising diffusion restoration models. Advances in Neural Information Processing Systems, 35:23593-23606, 2022. 2, 4
|
| 337 |
+
Kong, X., Huang, W., and Liu, Y. End-to-end full-atom antibody design. arXiv preprint arXiv:2302.00203, 2023. 3, 5, 13, 15
|
| 338 |
+
Kong, X., Jia, Y., Huang, W., and Liu, Y. Full-atom peptide design with geometric latent diffusion, 2024. URL https://arxiv.org/abs/2402.13555.1, 2, 3, 6, 7, 14, 15
|
| 339 |
+
|
| 340 |
+
Lee, A. C.-L., Harris, J. L., Khanna, K. K., and Hong, J.-H. A comprehensive review on current advances in peptide drug development and design. International journal of molecular sciences, 20(10):2383, 2019. 1
|
| 341 |
+
Li, J., Chen, T., Luo, S., Cheng, C., Guan, J., Guo, R., Wang, S., Liu, G., Peng, J., and Ma, J. Hotspot-driven peptide design via multi-fragment autoregressive extension. arXiv preprint arXiv:2411.18463, 2024a. 1
|
| 342 |
+
Li, J., Cheng, C., Wu, Z., Guo, R., Luo, S., Ren, Z., Peng, J., and Ma, J. Full-atom peptide design based on multimodal flow matching. In *Forty-first International Conference on Machine Learning*, 2024b. 1, 2
|
| 343 |
+
Li, M., Lan, X., Shi, X., Zhu, C., Lu, X., Pu, J., Lu, S., and Zhang, J. Delineating the stepwise millisecond allosteric activation mechanism of the class c gpcr dimer mgl5. Nature Communications, 15(1):7519, 2024c. 8, 15
|
| 344 |
+
Lin, H., Zhang, O., Zhao, H., Jiang, D., Wu, L., Liu, Z., Huang, Y., and Li, S. Z. Ppflow: Target-aware peptide design with torsional flow matching. In *Forty-first International Conference on Machine Learning*, 2024. 1, 2
|
| 345 |
+
Lin, Z., Akin, H., Rao, R., Hie, B., Zhu, Z., Lu, W., Smetanin, N., Verkuil, R., Kabeli, O., Shmueli, Y., et al. Evolutionary-scale prediction of atomic-level protein structure with a language model. Science, 379(6637): 1123-1130, 2023. 9
|
| 346 |
+
Liu, N., Li, S., Du, Y., Torralba, A., and Tenenbaum, J. B. Compositional visual generation with composable diffusion models. In European Conference on Computer Vision, pp. 423-439. Springer, 2022. 2
|
| 347 |
+
Luo, S., Su, Y., Peng, X., Wang, S., Peng, J., and Ma, J. Antigen-specific antibody design and optimization with diffusion-based generative models for protein structures. In Oh, A. H., Agarwal, A., Belgrave, D., and Cho, K. (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=jSorGn2Tjg.2
|
| 348 |
+
Maier, J. A., Martinez, C., Kasavajhala, K., Wickstrom, L., Hauser, K. E., and Simmerling, C. ff14sb: improving the accuracy of protein side chain and backbone parameters from ff99sb. Journal of chemical theory and computation, 11(8):3696-3713, 2015. 8, 15
|
| 349 |
+
Park, J. and Shen, Y. Equivariant blurring diffusion for hierarchical molecular conformer generation. arXiv preprint arXiv:2410.20255, 2024. 2
|
| 350 |
+
Rettie, S., Juergens, D., Adebomi, V., Bueso, Y. F., Zhao, Q., Leveille, A., Liu, A., Bera, A., Wilms, J., Uffing, A., et al. Accurate de novo design of high-affinity protein
|
| 351 |
+
|
| 352 |
+
binding macrocycles using deep learning. bioRxiv, pp. 2024-11, 2024. 1, 2
|
| 353 |
+
Rombach, R., Blattmann, A., Lorenz, D., Esser, P., and Ommer, B. High-resolution image synthesis with latent diffusion models. 2022 IEEE. In CVF Conference on Computer Vision and Pattern Recognition (CVPR), volume 1, 2021. 2
|
| 354 |
+
Ryckaert, J.-P., Ciccotti, G., and Berendsen, H. J. Numerical integration of the cartesian equations of motion of a system with constraints: molecular dynamics of n-alkanes. Journal of computational physics, 23(3):327-341, 1977. 8, 15
|
| 355 |
+
Sadat, S., Buhmann, J., Bradley, D., Hilliges, O., and Weber, R. M. Cads: Unleashing the diversity of diffusion models through condition-annealed sampling, 2024. URL https://arxiv.org/abs/2310.17347.6, 7
|
| 356 |
+
Salomon-Ferrer, R., Gotz, A. W., Poole, D., Le Grand, S., and Walker, R. C. Routine microsecond molecular dynamics simulations with amber on gpus. 2. explicit solvent particle mesh ewald. Journal of chemical theory and computation, 9(9):3878-3888, 2013. 8, 15
|
| 357 |
+
Satorras, V. G., Hoogeboom, E., and Welling, M. E (n) equivariant graph neural networks. In International conference on machine learning, pp. 9323-9332. PMLR, 2021. 2
|
| 358 |
+
Schütt, K. T., Sauceda, H. E., Kindermans, P.-J., Tkatchenko, A., and Müller, K.-R. Schnet-a deep learning architecture for molecules and materials. The Journal of Chemical Physics, 148(24), 2018. 5, 13
|
| 359 |
+
Song, B., Kwon, S. M., Zhang, Z., Hu, X., Qu, Q., and Shen, L. Solving inverse problems with latent diffusion models via hard data consistency. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=j8hdRqOUhN.2,4
|
| 360 |
+
Song, J., Meng, C., and Ermon, S. Denoising diffusion implicit models. In International Conference on Learning Representations, 2021a. URL https://openreview.net/forum?id=St1giarCHLP.2
|
| 361 |
+
Song, J., Zhang, Q., Yin, H., Mardani, M., Liu, M.-Y., Kautz, J., Chen, Y., and Vahdat, A. Loss-guided diffusion models for plug-and-play controllable generation. In International Conference on Machine Learning, pp. 32483-32498. PMLR, 2023. 4
|
| 362 |
+
Song, Y., Sohl-Dickstein, J., Kingma, D. P., Kumar, A., Ermon, S., and Poole, B. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. 2
|
| 363 |
+
|
| 364 |
+
Song, Y., Shen, L., Xing, L., and Ermon, S. Solving inverse problems in medical imaging with score-based generative models. arXiv preprint arXiv:2111.08005, 2021b. 2
|
| 365 |
+
Swanson, S., Sivaraman, V., Grigoryan, G., and Keating, A. E. Tertiary motifs as building blocks for the design of protein-binding peptides. *Protein Science*, 31(6):e4322, 2022. 2
|
| 366 |
+
Tsaban, T., Varga, J. K., Avraham, O., Ben-Aharon, Z., Khramushin, A., and Schueler-Furman, O. Harnessing protein folding neural networks for peptide-protein docking. Nature communications, 13(1):176, 2022. 6
|
| 367 |
+
Van der Maaten, L. and Hinton, G. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008. 9
|
| 368 |
+
Wang, F., Wang, Y., Feng, L., Zhang, C., and Lai, L. Target-specific de novo peptide binder design with diffpepbuilder, 2024a. URL https://arxiv.org/abs/2405.00128.7,8
|
| 369 |
+
Wang, F., Wang, Y., Feng, L., Zhang, C., and Lai, L. Target-specific de novo peptide binder design with diffpepbuilder. Journal of Chemical Information and Modeling, 2024b. 1, 2
|
| 370 |
+
Watson, J. L., Juergens, D., Bennett, N. R., Trippe, B. L., Yim, J., Eisenach, H. E., Ahern, W., Borst, A. J., Ragotte, R. J., Milles, L. F., et al. De novo design of protein structure and function with rfdiffusion. Nature, 620(7976): 1089-1100, 2023. 2
|
| 371 |
+
Xu, M., Yu, L., Song, Y., Shi, C., Ermon, S., and Tang, J. Geodiff: A geometric diffusion model for molecular conformation generation. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=PzcvxEMzvQC.2
|
| 372 |
+
Xu, M., Powers, A., Dror, R., Ermon, S., and Leskovec, J. Geometric latent diffusion models for 3d molecule generation. In International Conference on Machine Learning. PMLR, 2023. 2
|
| 373 |
+
Yang, S., He, X., and Zhu, B. Learning physical constraints with neural projections. Advances in Neural Information Processing Systems, 33:5178-5189, 2020. 14
|
| 374 |
+
Ye, H., Lin, H., Han, J., Xu, M., Liu, S., Liang, Y., Ma, J., Zou, J., and Ermon, S. TFG: Unified training-free guidance for diffusion models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=N8YbGX98vc.14
|
| 375 |
+
Yim, J., Trippe, B. L., De Bortoli, V., Mathieu, E., Doucet, A., Barzilay, R., and Jaakkola, T. Se (3) diffusion model
|
| 376 |
+
|
| 377 |
+
with application to protein backbone generation. arXiv preprint arXiv:2302.02277, 2023. 2
|
| 378 |
+
Zhang, H. and Chen, S. Cyclic peptide drugs approved in the last two decades (2001-2021). RSC Chemical Biology, 3 (1):18-31, 2022. 1
|
| 379 |
+
Zorzi, A., Deyle, K., and Heinis, C. Cyclic peptide therapeutics: past, present and future. Current opinion in chemical biology, 38:24-29, 2017. 1
|
| 380 |
+
|
| 381 |
+
# A. Proofs
|
| 382 |
+
|
| 383 |
+
# A.1. Proof of Theorem 3.3
|
| 384 |
+
|
| 385 |
+
For clarity, we restate Theorem 3.3 below.
|
| 386 |
+
|
| 387 |
+
Proposition 3.3 (Injective). Both $f_{T}$ and $f_{D}$ are injective. That is, $f(\mathbb{C}^{1}) = f(\mathbb{C}^{2})$ if and only if $\mathbb{C}^{1} = \mathbb{C}^{2}$ , where $(f, \mathbb{C}^{1}, \mathbb{C}^{2})$ can be $(f_{T}, \mathbb{C}_{T}^{1}, \mathbb{C}_{T}^{2})$ or $(f_{D}, \mathbb{C}_{D}^{1}, \mathbb{C}_{D}^{2})$ . Furthermore, their product function $f(\mathbb{C}_{T}, \mathbb{C}_{D}) \coloneqq (f_{T}(\mathbb{C}_{T}), f_{D}(\mathbb{C}_{D}))$ is also injective.
|
| 388 |
+
|
| 389 |
+
To prove Theorem 3.3, we first prove the following lemma.
|
| 390 |
+
|
| 391 |
+
Lemma A.2. If $g: \mathbb{R}^J \mapsto \mathbb{R}^K$ is injective, then $f(\mathbb{X}) = \{(i, g(\pmb{k}_i))\}_{i \in \mathcal{V}_{\mathbb{X}}}$ is also injective, where $\mathbb{X} = \{(i, \pmb{k}_i)\}_{i \in \mathcal{V}_{\mathbb{X}}}$ .
|
| 392 |
+
|
| 393 |
+
Proof. $f(\mathbb{X}^1) = f(\mathbb{X}^2) \iff \{(i, g(\pmb{k}_i^1))\}_{i \in \mathcal{V}_{\mathbb{X}^1}} = \{(i, g(\pmb{k}_i^2))\}_{i \in \mathcal{V}_{\mathbb{X}^2}} \iff \mathcal{V}_{\mathbb{X}^1} = \mathcal{V}_{\mathbb{X}^2} := \mathcal{V}_{\mathbb{X}}, g(\pmb{k}_i^1) = g(\pmb{k}_i^2), \forall i \in \mathcal{V}_{\mathbb{X}} \iff \mathcal{V}_{\mathbb{X}^1} = \mathcal{V}_{\mathbb{X}^2} := \mathcal{V}_{\mathbb{X}}, \forall i \in \mathcal{V}_{\mathbb{X}} \iff \{(i, \pmb{k}_i^1)\}_{i \in \mathcal{V}_{\mathbb{X}^1}} = \{(i, \pmb{k}_i^2)\}_{i \in \mathcal{V}_{\mathbb{X}^2}} \iff \mathbb{X}^1 = \mathbb{X}^2$ , where the third deduction step leverages the injectivity of function $g$ .
|
| 394 |
+
|
| 395 |
+
Now we are ready to prove Theorem 3.3.
|
| 396 |
+
|
| 397 |
+
Proof. We first prove the injectivity of $f_{T}$ . We choose $g$ to be the one-hot encoding function $\mathrm{One - hot}(\cdot):\mathbb{R}\mapsto \mathbb{R}^{K}$ . It is straightforward that this function is injective. By leveraging Lemma A.2, the proof is completed.
|
| 398 |
+
|
| 399 |
+
For the injectivity of $f_{D}$ , similarly we instantiate $g$ as the RBF feature map $\phi(\cdot): \mathbb{R} \mapsto \mathbb{R}^{\infty}$ . Such map is injective, since $\| \phi(d_1) - \phi(d_2) \|^2 = < d_1, d_1 > + < d_2, d_2 > -2 < d_1, d_2 >= 1 + 1 - 2 \exp(-\gamma \| d_1 - d_2 \|^2)$ , which implies $\phi(d_1) = \phi(d_2) \iff d_1 = d_2$ , hence injectivity. By leveraging Lemma A.2, the proof is completed.
|
| 400 |
+
|
| 401 |
+
Since both $f_{T}$ and $f_{D}$ are injective, $(f_{T}(\mathbb{C}_{T}^{1}), f_{D}(\mathbb{C}_{D}^{1})) = (f_{T}(\mathbb{C}_{T}^{2}), f_{D}(\mathbb{C}_{D}^{2})) \iff f_{T}(\mathbb{C}_{T}^{1}) = f_{T}(\mathbb{C}_{T}^{2}), f_{D}(\mathbb{C}_{D}^{1}) = f_{D}(\mathbb{C}_{D}^{2}) \iff \mathbb{C}_{T}^{1} = \mathbb{C}_{T}^{2}, \mathbb{C}_{D}^{1} = \mathbb{C}_{D}^{2} \iff (\mathbb{C}_{T}^{1}, \mathbb{C}_{D}^{1}) = (\mathbb{C}_{T}^{2}, \mathbb{C}_{D}^{2})$ . Therefore the product function $\tilde{f}(\mathbb{C}_T, \mathbb{C}_D) \coloneqq (f_T(\mathbb{C}_T), f_D(\mathbb{C}_D))$ is also injective, which concludes the proof.
|
| 402 |
+
|
| 403 |
+
# A.2. Equivariance
|
| 404 |
+
|
| 405 |
+
Proposition A.3 (Equivalence). The conditional score $\epsilon_{\theta}(\mathcal{G}_{\mathbf{z}}^{(t)},\mathbb{C},t)$ is $E(3)$ -equivariant, where $\mathbb{C}$ is $\mathbb{C}_T$ or $\mathbb{C}_D$ .
|
| 406 |
+
|
| 407 |
+
The proof is straightforward since our encodings of $\mathbb{C}_T$ and $\mathbb{C}_D$ are both E(3)-invariant, therefore the E(3)-equivariance of the score is preserved, following the proof in Kong et al. (2023).
|
| 408 |
+
|
| 409 |
+
# B. Decompositions of Cyclic Strategies
|
| 410 |
+
|
| 411 |
+
As illustrated in Fig. 1, cyclic peptides are looped by four strategies, each of which can be decomposed into unit geometric constraints defined in Sec. 3.2 as follows. Specifically, the pair $(i,l_i)$ indicates a type constraint that node $i$ is required to be type $l_i$ , and the triplet $(i,j,d_{ij})$ means a distance constraint that the pairwise distance between node $i,j$ should be $d_{ij}$ .
|
| 412 |
+
|
| 413 |
+
Stapled peptide. Given a lysine (K) located at index $i$ , a stapled peptide can be formed via a covalent linkage between the lysine and either an aspartic acid (D) at $i + 3$ , with constraints as
|
| 414 |
+
|
| 415 |
+
$$
|
| 416 |
+
\mathbb {C} _ {\text {S t a p l e d - D}, i} = \left(\left\{\left(i, \mathrm {K}\right), (i + 3, \mathrm {D}) \right\}, \left\{\left(i, i + 3, d _ {K D}\right) \right\}\right), \tag {11}
|
| 417 |
+
$$
|
| 418 |
+
|
| 419 |
+
or a glutamic acid (E) at $i + 4$ , with constraints as
|
| 420 |
+
|
| 421 |
+
$$
|
| 422 |
+
\mathbb {C} _ {\text {S t a p l e d - E}, i} = \left(\{(i, \mathrm {K}), (i + 4, \mathrm {E}) \}, \{(i, i + 4, d _ {K E}) \}\right), \tag {12}
|
| 423 |
+
$$
|
| 424 |
+
|
| 425 |
+
where $d_{KD}, d_{KE}$ are the lengths of covalent linkages between the K-D and K-E pairs, respectively.
|
| 426 |
+
|
| 427 |
+
Head-to-tail peptide. Given a peptide composed of $N$ amino acids indexed by $0,1,\dots ,N - 1$ , an additional amide bond is linked between the head and tail amino acid as
|
| 428 |
+
|
| 429 |
+
$$
|
| 430 |
+
\mathbb {C} _ {\text {H e a d - t o - t a i l}} = \left(\{\}, \{(0, N - 1, d _ {P}) \}\right), \tag {13}
|
| 431 |
+
$$
|
| 432 |
+
|
| 433 |
+
where $d_P$ is the length of the amide bond.
|
| 434 |
+
|
| 435 |
+
Disulfide peptide. Connecting two non-adjacent cysteines (C) at $i, j$ with a disulfur bond, a disulfide peptide is constrained by
|
| 436 |
+
|
| 437 |
+
$$
|
| 438 |
+
\mathbb {C} _ {\text {D i s u l f i d e}, i, j} = \left(\{(i, \mathbf {C}), (j, \mathbf {C}) \}, \{(i, j, d _ {S}) \}\right), \tag {14}
|
| 439 |
+
$$
|
| 440 |
+
|
| 441 |
+
where $d_S$ is the length of the disulfur bond.
|
| 442 |
+
|
| 443 |
+
Bicycle peptide To link the three cysteines (C) at $i,j,k$ , a bicycle peptide is constrained by
|
| 444 |
+
|
| 445 |
+
$$
|
| 446 |
+
\mathbb {C} _ {\text {B i c y c l e}, i, j, k} = \left(\left\{\left(i, \mathbf {C}\right), (j, \mathbf {C}), (k, \mathbf {C}) \right\}, \left\{\left(i, j, d _ {T}\right), (i, k, d _ {T}), (j, k, d _ {T}) \right\}\right), \tag {15}
|
| 447 |
+
$$
|
| 448 |
+
|
| 449 |
+
where $d_{T}$ is the side length of the equilateral triangle formed by the centered 1,3,5-trimethylbenzene.
|
| 450 |
+
|
| 451 |
+
# C. Implementation Details
|
| 452 |
+
|
| 453 |
+
# C.1. Energy-based classifier guidance
|
| 454 |
+
|
| 455 |
+
With the definition of the geometric constraints, we now introduce their corresponding energy function, a scalar function that evaluates the satisfaction of the constraint given the input geometric graph.
|
| 456 |
+
|
| 457 |
+
Definition C.1 (Energy function of a constraint). An energy function of constraint $\mathbb{C}$ is a differentiable function $g_{\mathbb{C}}(\cdot):$ $\mathcal{X}\mapsto \mathbb{R}_{\geq 0}$ , such that $g_{\mathbb{C}}(\mathcal{G}) = 0$ if $\mathcal{G}\in \mathcal{X}$ satisfies the constraint $\mathbb{C}$ and $g_{\mathbb{C}}(\mathcal{G})\neq 0$ otherwise.
|
| 458 |
+
|
| 459 |
+
Intuitively, the energy function serves as an indicator of constraint satisfaction, following the conventional way of handling equality constraints (Bertsekas, 2014).
|
| 460 |
+
|
| 461 |
+
One naive way to tackle inverse problem is to directly optimize the energy function (Yang et al., 2020; Goldenthal et al., 2007) of the constraint with respect to the initial latents $\mathcal{G}_z^{(T)}$ , since its minima correspond to the data points $\mathcal{G}$ that satisfy the constraint. However, the large number of sampling steps $T$ required by diffusion models makes the optimization computationally prohibitive, as the gradient needs to be backpropagated through the denoiser $T$ times. Moreover, the energy function is not guaranteed to be convex, which further troubles the optimization.
|
| 462 |
+
|
| 463 |
+
Energy-based classifier guidance has been introduced to inject constraint as guidance of diffusion sampling in a soft and iterative manner. In our setting, we can pair up $p_t(\mathbb{C}|\mathcal{G}_z)$ and the energy function through Boltzmann distribution, i.e., $p_t(\mathbb{C}|\mathcal{G}_z) = \exp(-g_{\mathbb{C}}(\mathcal{D}_{\xi}(\mathcal{G}_z))) / Z$ , where $Z$ is the normalizing constant. In this way, we have,
|
| 464 |
+
|
| 465 |
+
$$
|
| 466 |
+
\nabla_ {\mathcal {G} _ {z}} \log p _ {t} (\mathcal {G} _ {z} | \mathbb {C}) = \nabla_ {\mathcal {G} _ {z}} \log p _ {t} (\mathcal {G} _ {z}) - w \nabla_ {\mathcal {G} _ {z}} g _ {\mathbb {C}} \left(\mathcal {D} _ {\xi} (\mathcal {G} _ {z})\right), \tag {16}
|
| 467 |
+
$$
|
| 468 |
+
|
| 469 |
+
where $w \in \mathbb{R}$ is added to control the guidance strength. Performing such sampling procedure is equivalent to sampling from the posterior (Ye et al., 2024):
|
| 470 |
+
|
| 471 |
+
$$
|
| 472 |
+
p \left(\mathcal {G} _ {z} \mid \mathbb {C}\right) := p \left(\mathcal {G} _ {z}\right) \exp \left(- w g _ {\mathbb {C}} \left(\mathcal {D} _ {\xi} \left(\mathcal {G} _ {z}\right)\right)\right) / Z, \tag {17}
|
| 473 |
+
$$
|
| 474 |
+
|
| 475 |
+
which concentrates the density more on the regions with lower energy function value, biasing the sampling towards data points better satisfying the constraint $\mathbb{C} = (\mathbb{C}_T,\mathbb{C}_D)$ .
|
| 476 |
+
|
| 477 |
+
In our implementation, we adopt the guidance function in Kong et al. (2024) as the energy function $g_{\mathbb{C}}$ . In particular, the choice of $w$ significantly influences the generation quality. A larger $w$ typically enhances control strength but degrades generation quality when becoming excessively large. To strike a balance between controllability and quality, we conduct a sweep across various $w$ values and ultimately employ $w \in \{10, 30, 50\}$ for energy-based classifier guidance. The best performance across different $w$ values is reported for all conditions.
|
| 478 |
+
|
| 479 |
+
# C.2. Distance Constraints as Edge-Level Control
|
| 480 |
+
|
| 481 |
+
To inject the edge-level control into the model, we apply the adapter mechanism by adding an additional dyMEAN block (Kong et al., 2023) to each layer, and changing the message passing process into
|
| 482 |
+
|
| 483 |
+
$$
|
| 484 |
+
\left\{\left(\boldsymbol {h} _ {i} ^ {(l + 0. 5)}, \vec {\boldsymbol {X}} _ {i} ^ {(l + 0. 5)}\right) \right\} _ {i \in \mathcal {V}} = \operatorname {A M E} \left(\left\{\left(\boldsymbol {h} _ {i} ^ {(l)}, \vec {\boldsymbol {X}} _ {i} ^ {(l)}\right) \right\} _ {i \in \mathcal {V}}, \left\{\boldsymbol {d} _ {i j} \right\} _ {(i, j) \in \mathcal {E} _ {D}}, \mathcal {E} _ {D}\right), \tag {18}
|
| 485 |
+
$$
|
| 486 |
+
|
| 487 |
+
$$
|
| 488 |
+
\left\{\left(\boldsymbol {h} _ {i} ^ {(l + 1)}, \vec {\boldsymbol {X}} _ {i} ^ {(l + 1)}\right) \right\} _ {i \in \mathcal {V}} = \operatorname {A M E} \left(\left\{\left(\boldsymbol {h} _ {i} ^ {(l + 0. 5)}, \vec {\boldsymbol {X}} _ {i} ^ {(l + 0. 5)}\right) \right\} _ {i \in \mathcal {V}}, \emptyset , \mathcal {E}\right), \tag {19}
|
| 489 |
+
$$
|
| 490 |
+
|
| 491 |
+
where $\mathcal{E}_D\subseteq \mathcal{E}$ is the set of constrained edges, and AME is the Adaptive Multi-Channel Equivariant layer proposed in Kong et al. (2023). Readers are referred to the original paper for further details.
|
| 492 |
+
|
| 493 |
+
# C.3. Molecular Dynamics
|
| 494 |
+
|
| 495 |
+
We perform molecular dynamics (MD) simulations to assess the stability and binding affinity of linear peptides from the test set and cyclic peptides generated by our model. Simulations are conducted using the Amber22 package with the CUDA implementation of particle-mesh Ewald (PME) MD and executed on GeForce RTX 4090 GPUs (Salomon-Ferrer et al., 2013). For system preparation, the ff14SB force field is applied to proteins and peptides (Maier et al., 2015). All systems are solvated to a $10\AA$ truncated octahedron transferable intermolecular potential three-point (TIP3P) water box and $150nM$ $\mathrm{Na}^{+}/\mathrm{Cl}^{-}$ counterions are added to neutralize charges and simulate the normal saline environment (Jorgensen et al., 1983; Li et al., 2024c). Prior to equilibration, two rounds of energy minimization are performed: the first relaxes solvent molecules and $\mathrm{Na}^{+}/\mathrm{Cl}^{-}$ counterions while keeping all other atoms fixed, and the second relaxes all atoms without constraints. The systems are then gradually heated from $0\mathrm{K}$ to $310\mathrm{K}$ over $500~\mathrm{ps}$ under harmonic restraints of $10\mathrm{kcal}\cdot \mathrm{mol}^{-1}\cdot \mathring{\mathrm{A}}^{-2}$ on proteins and peptides. Subsequently, equilibration is carried out at $300\mathrm{K}$ and 1 bar under NPT conditions, with harmonic restraints on protein and ligand atoms progressively reduced from 5.0 to 3.0, 1.0, 0.5, and finally $0.1\mathrm{kcal}\cdot \mathrm{mol}^{-1}\cdot \mathring{\mathrm{A}}^{-2}$ spanning a total of 2.5 ns. Production simulations are performed with temperature $(300\mathrm{K})$ and pressure (1 bar) using the Langevin thermostat and Berendsen barostat, respectively. The SHAKE algorithm is applied to constrain covalent bonds involving hydrogen atoms (Ryckaert et al., 1977), while non-bonded interactions are truncated at $10.0\AA$ , with long-range electrostatics treated using the PME method. To estimate peptide binding energies, we further employ MM/PBSA calculations (Genheden & Ryde, 2015). While MD simulations provide high accuracy in evaluating conformational stability and binding affinity, they are computationally expensive. Therefore, we randomly select two target proteins from the test set and generate one cyclic peptide using head-to-tail and disulfide bond cyclization strategies for evaluation.
|
| 496 |
+
|
| 497 |
+
# C.4. Hyperparameter details
|
| 498 |
+
|
| 499 |
+
We train CP-Composer on a 24G memory RTX 3090 GPU with AdamW optimizer. For the autoencoder, we train for up to 100 epochs and save the top 10 models based on validation performance. We ensure that the total number of edges (scaling with the square of the number of nodes) does not exceed 60,000. The initial learning rate is set to $10^{-4}$ and is reduced by a factor of 0.8 if the validation loss does not improve for 5 consecutive epochs. Regarding the diffusion model, we train for no more than 1000 epochs. The learning rate is $10^{-4}$ and decay by 0.6 and early stop the training process if the validation loss does not decrease for 10 epochs. During the training process, we set the guidance strength as 1 for sampling at the validation stage. The structure details of the autoencoder and the diffusion model are the same as Kong et al. (2024). For the RBF kernel, we use 32 feature channels.
|
| 500 |
+
|
| 501 |
+
# D. Further Analysis
|
| 502 |
+
|
| 503 |
+
# D.1. Necessity of RBFs
|
| 504 |
+
|
| 505 |
+
We evaluate the influence of the RBFs to the quality of the generation of peptide under most difficult setting: Bicycle peptide (26 samples in test set). In Table 5, Based on the validation and parameter sensitivity study, we can conclude the necessity of RBF design to support the distance control. Further, an saturation beyond 16 channels is observed, indicating that finite RBFs is enough for empirical performance.
|
| 506 |
+
|
| 507 |
+
# D.2. Generation efficiency
|
| 508 |
+
|
| 509 |
+
In Table 6, we show the runtime comparison between our method and the DiffPepBuilder when they both use a 24GB RTX3090 GPU.
|
| 510 |
+
|
| 511 |
+
Table 5. Success rates among different number of RBFs
|
| 512 |
+
|
| 513 |
+
<table><tr><td>Succ.(w=2)</td><td>Bicycle peptide</td></tr><tr><td>RBFs=0</td><td>26.92%</td></tr><tr><td>RBFs=16</td><td>30.76%</td></tr><tr><td>RBFs=32</td><td>30.76%</td></tr></table>
|
| 514 |
+
|
| 515 |
+
Table 6. Runtime of our method and DiffPepBuilder
|
| 516 |
+
|
| 517 |
+

|
| 518 |
+
Figure 7. Four types of generated cyclic peptides, with the red boxes highlighting the position for cyclization.
|
| 519 |
+
|
| 520 |
+
# E. Additional Visualizations
|
| 521 |
+
|
| 522 |
+
In Fig. 7, we show more cases of the stapled, Head-to-tail, disulfur and bicycle peptide.
|
| 523 |
+
|
| 524 |
+
# F. Code Availability
|
| 525 |
+
|
| 526 |
+
The codes for our CP-Composer is provided at the link https://github.com/jdp22/CP-Composer_final.
|
ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61afffc20132467b53e43e134f92c768e5f957a55fb881fdc99e06f98196cdfd
|
| 3 |
+
size 893865
|
ICML/2025/Zero-Shot Cyclic Peptide Design via Composable Geometric Constraints/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ead846565a3f928b6cf8400896f13695497fb8e7f8740ccb3e2ba116579542a
|
| 3 |
+
size 698215
|
ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/da9fbf2a-a1c3-4f09-b587-cf7ab97be4b0_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:26225f0b4f7b1b1d1ed5b634ea4e06b68a1e36bec030222602643b726b3d857d
|
| 3 |
+
size 219893
|
ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/da9fbf2a-a1c3-4f09-b587-cf7ab97be4b0_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d78db9f1722784ab2bc2b7d220778d8b502fb13aa22d2fadfedc4f7d2e14f34
|
| 3 |
+
size 257480
|
ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/da9fbf2a-a1c3-4f09-b587-cf7ab97be4b0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec5dfe53ad9be595dd90676610e7ff612ad9f1babe2fb67d399b232007891657
|
| 3 |
+
size 1545841
|
ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e2df5c55c63ec89c0592fadf293cc4785ceff3858ae627766f22ecaa12429e7
|
| 3 |
+
size 994195
|
ICML/2025/Zero-Shot Generalization of GNNs over Distinct Attribute Domains/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd6ca0ecb999b3c4833d0c284643b3e3227bfcf4f4933226c826e7b2faffc0a3
|
| 3 |
+
size 1485762
|
ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/8aedc65a-0768-45dc-94d5-6be5947ddcd7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb41757dc0a1f90a1e799778846915e35186d76d1f01c00230ba4101a7cbaab6
|
| 3 |
+
size 198853
|
ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/8aedc65a-0768-45dc-94d5-6be5947ddcd7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3bc297bc9cdb4f2376023a958c13506b21aac855db033265366eb8559ae7733e
|
| 3 |
+
size 243295
|
ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/8aedc65a-0768-45dc-94d5-6be5947ddcd7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e363cc7ae6b7a9fbe1457d79422fe62874b5f8b2cc4675fc64a89b9d60802115
|
| 3 |
+
size 17572026
|
ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:97e5521f171279ce0e317e949fb233bf151accdd9092d69698980e95f9a8bd13
|
| 3 |
+
size 3950533
|
ICML/2025/Zero-Shot Offline Imitation Learning via Optimal Transport/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9f297b595680fa265a2179f78fe2da941d3dbc8fae73775c112a420592f911d
|
| 3 |
+
size 1153045
|
ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/cad58b3c-c8ca-4697-bc6d-85f4b57f8922_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f60b7043d988115ea5c276c951ccede201782464a6cfec342c80b4c7b817ab5
|
| 3 |
+
size 115970
|
ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/cad58b3c-c8ca-4697-bc6d-85f4b57f8922_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:019f4799f34b934921983c56505907af5a9ebc148d85aa5472e47ada96f5f2df
|
| 3 |
+
size 144863
|
ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/cad58b3c-c8ca-4697-bc6d-85f4b57f8922_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fbf47b9fc3dd8c9779b5da31153bb1d93bc7b2acecaae14ad9421935fb8b47c
|
| 3 |
+
size 1157626
|
ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/full.md
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarily Pre-trained Transformer
|
| 2 |
+
|
| 3 |
+
Yulun Wu $^{1}$ Doron L. Bergman $^{2}$
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
We present an Adversarially Pre-trained Transformer (APT) that is able to perform zero-shot meta-learning on tabular prediction tasks without pre-training on any real-world dataset, extending on the recent development of Prior-Data Fitted Networks (PFNs) and TabPFN. Specifically, APT is pre-trained with adversarial synthetic data agents, who continue to shift their underlying data generating distribution and deliberately challenge the model with different synthetic datasets. In addition, we propose a mixture block architecture that is able to handle classification tasks with arbitrary number of classes, addressing the class size limitation – a crucial weakness of prior deep tabular zero-shot learners. In experiments, we show that our framework matches state-of-the-art performance on small classification tasks without filtering on dataset characteristics such as number of classes and number of missing values, while maintaining an average runtime under one second. On common benchmark dataset suites in both classification and regression, we show that adversarial pre-training was able to enhance TabPFN's performance. In our analysis, we demonstrate that the adversarial synthetic data agents were able to generate a more diverse collection of data compared to the ordinary random generator in TabPFN. In addition, we demonstrate that our mixture block neural design has improved generalizability and greatly accelerated pre-training.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
In standard deep learning workflows, models are either trained per dataset, or employed on data in a form com
|
| 12 |
+
|
| 13 |
+
<sup>1</sup>University of California, Berkeley <sup>2</sup>Capital One. Correspondence to: Yulun Wu <yulun_wu@berkeley.edu>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
|
| 16 |
+
|
| 17 |
+
patible with, and drawn from, the same distribution as the datasets it was previously trained on. Even in transfer learning, where the target of the model is changed, the input is at most expanded, but at least overlaps heavily with the data distribution that the model has previously seen in training. This is in contrast with meta learning (Finn et al., 2017; Nichol & Schulman, 2018; Lemke et al., 2015; Vanschooren, 2018; Feurer et al., 2022; Hospedales et al., 2021; Zintgraf et al., 2021), where a model is trained to be adaptive to new datasets such that few gradient updates or fine-tuning are needed, instead of training a new model specialized to every distinct dataset from scratch. In meta learning, rather than modeling a specific dataset, the model is trained to learn how to learn. This has multiple advantages. First, meta learning is highly adaptable (Huisman et al., 2021; Finn et al., 2017; Frans & Witkowski, 2021) – it learns more generalized representations that can be adapted to new tasks and different domains. Second, meta learning makes efficient use of data (Finn et al., 2017; Gevaert, 2021) – it supports learning from just a few samples. Third, as a consequence of its efficient use of (small) data, the model can reach a point where it is able to make meaningful predictions very quickly (Vanschooren, 2018).
|
| 18 |
+
|
| 19 |
+
In prior work, Verma et al. (2020) discussed the notion of zero-shot meta-learning. They train a generative adversarial network conditioned on class attributes, that can generate novel (previously unseen) class samples. This relies on the inputs present in the training data (class attributes) to be indicative of the new unseen classes. While they do not use gradient updates on the unseen data for prediction, they rely on the input data coming at the very least from a very similar distribution to that of the training data. The scope of problems this work aims to address is pristine zero-shot meta learning: given an unseen dataset from an unseen task after the model is pre-trained and deployed, can we do prediction on this dataset without training the model on it? Specifically, with zero gradient update on the model, and with no reliance on the context similarity between this dataset and the datasets that the model was pre-trained on. Note that this concept of zero-shot is slightly different from that in large vision and language models (Mann et al., 2020; Perez et al., 2021; Tsimpoukelli et al., 2021; Cahyawijaya et al., 2024; Ahmed & Devanbu, 2022) – the unseen datasets can
|
| 20 |
+
|
| 21 |
+
entail heterogeneous fields or class labels that were never observed during pre-training, and zero-shot in this context refers to the amount of model optimization conducted being zero given the unseen dataset rather than the amount of empirical examples seen being zero. The advantage of successfully establishing such a model is the exceptional generalizability and runtime.
|
| 22 |
+
|
| 23 |
+
A few recent breakthroughs (Müller et al., 2021; Hollmann et al., 2022) have demonstrated that achieving this aspiration is possible: Müller et al. (2021) introduced Prior-Data Fitted Networks (PFNs). They pursue zero-shot meta-learning by using transformers pre-trained on synthetic data generated from a collection of prior distributions, to perform approximate Bayesian inference using in-context learning (Luo et al., 2018; Mann et al., 2020). PFNs do not fit a model on downstream training data, instead feeding training data into the context in forward pass and making predictions conditioned on the context. Hollmann et al. (2022) introduced a PFN specifically aimed at tabular datasets – TabPFN. A more detailed background review on PFNs and specifically TabPFN can be found in Appendix A. Tabular data – data organized in rows and columns, and characterized by an unlimited heterogeneity of data fields, remains an area of machine learning where deep neural networks (DNNs) still struggle (Borisov et al., 2022; Shwartz-Ziv & Armon, 2022; McElfresh et al., 2024; Ye et al., 2024b) to push the boundaries of the state-of-the-art gradient boosted decision trees (GBDTs) (Prokhorenkova et al., 2018; Chen & Guestrin, 2016; Ke et al., 2017), despite numerous approaches (Borisov et al., 2022; Somepalli et al., 2021; Grinsztajn et al., 2022; Gorishniy et al., 2021; Rubachev et al., 2022; Levin et al., 2022; Kadra et al., 2021a; Arik & Pfister, 2021; Popov et al., 2019). Yet, tabular data is one of the most common data types in real-world machine learning (ML) applications (Chui et al., 2018; Borisov et al., 2022; Shwartz-Ziv & Armon, 2022). Although TabPFN has demonstrated exceptional zero-shot meta-learning capability on certain small tabular prediction tasks, we show that the distribution of synthetic data used in its pre-training is actually quite limited. Besides, the class size constraints of TabPFN pose a significant limitation on its generalizability – this might not be an important concern for the traditional one-model-for-one-domain pipeline, but is a crucial weakness for a zero-shot meta-learner (ZSML) since an unprecedented number of class labels could be present in inference time. Note that zero-shot meta-learning is largely similar to foundation modeling but slightly different in its scale and objective – it does not necessarily involve billions of parameters to learn the distribution of data and acquire token representations in a broad domain such as language or health records, but to model the general prediction logic and learn how to acquire data representations in unseen domains during inference time.
|
| 24 |
+
|
| 25 |
+
Similar to Hollmann et al. (2022), we investigate the capa
|
| 26 |
+
|
| 27 |
+
bility of zero-shot meta-learning under the scope of tabular data prediction problems. Our contributions are listed as follows:
|
| 28 |
+
|
| 29 |
+
- We propose an adversarial synthetic data pre-training approach on PFNs to establish a zero-shot meta-learner that is able to handle tabular prediction tasks with improved performance.
|
| 30 |
+
- We eliminated the class size limitation for TabPFN on classification tasks by proposing the mixture block neural design, which yields a zero-shot meta-learner with better generalizability.
|
| 31 |
+
- In experiments, we show that our framework achieves state-of-the-art performance on small tabular classification tasks without filtering on class size, feature size, number of categorical features or number of missing values, and improved upon TabPFN in both classification and regression. We show that the adversarial data agents are able to enrich the synthetic data generating distribution, and the mixture block is able to generalize to unseen class size and accelerate pre-training.
|
| 32 |
+
|
| 33 |
+
# 2. Proposed Method
|
| 34 |
+
|
| 35 |
+
Our Adversarily Pre-trained Transformer (APT) model is pre-trained once offline using a mix of random synthetic data generators and adversarial synthetic data agents. In this phase, the goal of the model is not to learn the specific pattern or probability distribution of any given dataset, but to learn the general prediction logic and means to represent various data, i.e. learning to learn. Once pre-trained and deployed, the model makes predictions on the testing set of any real-world dataset of interest in one forward pass, without performing any back-propagation or gradient updates of its weights. A demonstration of the workflow is shown in Figure 1. In Section 2.1, we describe the adversarial data agents in detail, whose goal is to continuously produce diverse and more challenging datasets for the meta-learning model during pre-training; in Section 2.2, we elaborate on the architecture of our transformer model, which has no restrictions on the class size of any real-world datasets practitioners provide.
|
| 36 |
+
|
| 37 |
+
# 2.1. Adversarial Data Agents
|
| 38 |
+
|
| 39 |
+
In the pre-training phase, we compose a batch of $m$ datasets $\{X^{(k)},\pmb{y}^{(k)}\}_{1\leq k\leq m}$ in each iteration using $m$ different data generators $\{g_1,\dots ,g_m\}$ that each independently generate $n$ number of data points, where $X^{(k)} = [x_i^{(k)}]_{i\leq n}^{\top} = [x_{i,j}^{(k)}]_{i\leq n,j\leq d_k}$ and $\pmb{y}^{(k)} = [y_i^{(k)}]_{i\leq n}^{\top}$ are the predictor matrix and response vector (denoted as $X$ and $\pmb{y}$ when no index is specified) with feature size $d_{k}$ . We adopted the multi-layer perceptron (MLP) construction introduced in
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Figure 1. The model workflow of Adversarially Pre-trained Transformer (APT). Pre-training is done once, offline, with datasets generated by a mix of random synthetic data generators and adversarial synthetic data agents. The train-test split is randomly sampled for each batch of datasets. After the model is pre-trained and deployed, predictions are done per real-world dataset, online, with one forward pass and no parameter update. The transformer is test-masked, meaning that each token only attends to training data tokens. For cleanliness of the figure, only the attentions to and from the first training data token and the first testing data token are plotted.
|
| 43 |
+
|
| 44 |
+
Hollmann et al. (2022) for each generator instance, where predictors $\boldsymbol{x}_i^{(k)}$ and response $y_i^{(k)}$ are values of randomly selected neurons in sparsified noisy MLPs with some additional pre-processing. More details regarding this approach can be found in Appendix A.1.
|
| 45 |
+
|
| 46 |
+
Different from Hollmann et al. (2022), instead of generating datasets solely from randomly initialized sparse MLPs, a subset of the $m$ generators in our framework are adversarial agents that learn from the model's performance on the generated data, and perform gradient ascent on the model's prediction loss. In other words, these adversarial agents challenge the model by constantly shifting the synthetic data generating distributions to deliberately produce datasets that are more difficult for the model to handle. The loss for an adversarial agent $g_{\eta}$ with respect to prediction model $q_{\theta}$ can be written as
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
\mathcal {L} \left(g _ {\eta}\right) = \mathbb {E} _ {X, \boldsymbol {y} \sim g _ {\eta}} \log q _ {\theta} \left(\boldsymbol {y} _ {(l + 1): n} \mid X _ {(l + 1): n}, \left\{X _ {1: l}, \boldsymbol {y} _ {1: l} \right\}\right) \tag {1}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
where $\{X_{1:l},\pmb{y}_{1:l}\}$ and $\{X_{(l + 1):n},\pmb{y}_{(l + 1):n}\}$ are the training and testing set split from generated dataset $\{X,\pmb{y}\}$ at position $l$ . In the following sections, we refer to the former (generators based on randomly initialized MLPs) as ordinary data generator, and the latter (generators based on adversarially updated MLPs) as adversarial data agents.
|
| 53 |
+
|
| 54 |
+
Relation to Classic Adversarial Training In relation to GANs (Goodfellow et al., 2014), the data agents here are the generators, and the meta-learner is the discriminator. Contrary to classic adversarial training, there is no real versus fake samples for the discriminator to distinguish in this context. The generator (data agent) and the discriminator (meta-learner) have one coherent competing objective: the meta-learner seeks to minimize the prediction loss on data generated by the data agents, while the data agent seeks to generate data that maximize the prediction loss by the meta-learner. As a result, the desired gradients for updating the discriminator is but a flip of sign to its gradients calculated through back propagation on the generator's objective. Hence, both the meta-learner and the data agents can be updated in one single iteration after loss calculation in this scenario. This results in a more efficient adversarial training, and we further reduce its potential of mode collapse with data agent reset described in the last paragraph of this section. Note that contrary to classic GANs, the discriminator is the final product in this context rather than the generator.
|
| 55 |
+
|
| 56 |
+
Discretization of Variables A key challenge in establishing adversarial data agents is the gradient flow under discretization: how do we generate synthetic data with categorical features while being able to perform end-to-end loss back-propagation? Inspired by the Gumbel-Softmax
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
(a) Model architecture for classification tasks
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
(b) Mixture block
|
| 63 |
+
Figure 2. Model architecture and the mixture block. a) $X = (X^{\mathrm{train}}, X^{\mathrm{test}})$ and $y^{\mathrm{train}}$ are embedded on $\mathbb{R}^{d_{\mathrm{model}}}$ using a feature embedding block and linear projection respectively. Then, embeddings for $X^{\mathrm{train}}$ and $y^{\mathrm{train}}$ are added as $h^{\mathrm{train}}$ , embeddings for $X^{\mathrm{test}}$ are denoted as $h^{\mathrm{test}}$ . Embeddings $(h^{\mathrm{train}}, h^{\mathrm{test}})$ are then passed to the transformer blocks with attention towards test embedding $h^{\mathrm{test}}$ masked, same as Hollmann et al. (2022). Finally, the outputs from transformer blocks are transformed to class probabilities through the mixture block for classification tasks, or directly transformed to point predictions through standard dense final layer for regression tasks. b) For each data point in the testing set, we use its output $q$ after transformer blocks to query training data's outputs $K$ . With two different dense feedforwards, two sets of logits are predicted: one set of logits are used to calculate the scaled softmax probabilities – these probabilities indicate how likely that the testing point is in the same class as the corresponding training points; the other set of logits are used to sample soft-discrete binary gates via Concrete distribution to sparsify these probabilities. Finally, the gated probabilities from the same class are added together to yield the final predictions.
|
| 64 |
+
|
| 65 |
+
trick (Jang et al., 2016) and the Concrete distribution (Maddison et al., 2016), we propose a continuous relaxation of discretization that naturally extends on the ranking discretization approach introduced in Hollmann et al. (2022), controlled by a user-specified temperature hyperparameter $\tau$ . For the $j$ -th feature column $\boldsymbol{x}_{..,j}$ of a predictor matrix $X$ and the corresponding $N_{j} - 1$ randomly sampled Gaussian quantiles $Q_{j}^{(1)} < Q_{j}^{(2)} < \dots < Q_{j}^{(N_{j} - 1)}$ at the initialization of the corresponding data agent, the soft-discretization that converts the $i$ -th value of the $j$ -th feature $x_{i,j}$ to a soft-categorical value with cardinality $N_{j}$ is given by
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\begin{array}{l} x _ {i, j} ^ {c a t} = \pi \left(\left| \left\{x _ {i, j} \geq \tilde {Q} _ {j} ^ {(l)} \right\} _ {l} \right|\right) + \\ \tau \cdot \log \left(1 + \frac {x _ {i , j} - \tilde {Q} _ {j} \binom {\left(\left|\left\{x _ {i , j} \geq \tilde {Q} _ {j} ^ {(l)} \right\} _ {l} \right|\right)} {\tilde {Q} _ {j} \left(1 + \left|\left\{x _ {i , j} \geq \tilde {Q} _ {j} ^ {(l)} \right\} _ {l} \right|\right) - \tilde {Q} _ {j} \binom {\left|\left\{x _ {i , j} \geq \tilde {Q} _ {j} ^ {(l)} \right\} _ {l} \right|}\right)}\left. \right) \tag {2} \\ \end{array}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
where $\pi$ is a permutation function on integer domain $\{1,2,\ldots ,N_{j} - 1\}$ $\tilde{Q}_j^{(l)} = \mu (\pmb{x}_{\cdot ,j}) + \sigma (\pmb{x}_{\cdot ,j})\cdot Q_j^{(l)}$ for $1\leq l < N_{j}$ are the unnormalized quantiles with boundaries $\tilde{Q}_j^{(0)} = \min (\pmb{x}_{\cdot ,j})$ and $\tilde{Q}_j^{(N_k)} = \max (\pmb{x}_{\cdot ,j})$ and $|\{v\geq \tilde{Q}_j^{(l)}\} _l| = \sum_lI(v\geq \tilde{Q}_j^{(l)})$ is the position of a value $v$ in the ordered sequence $\{\tilde{Q}_j^{(l)}\}_{1\leq l\leq N_j}$ . A visual demon
|
| 72 |
+
|
| 73 |
+
stration of this conversion can be found on the right side of Figure 6 in the Appendix. Same as Hollmann et al. (2022), the extended ranking discretization approach decides the value of a categorical variable using only the continuous scalar $x_{i,j}$ , i.e. the value of one neuron in the sparsified noisy MLP, as opposed to the Gumbel-Softmax or Concrete distribution approach which would require selecting $N_j$ neurons as logits of the $N_j$ classes. In our early experiments, we found that sampling multiple neurons to decide the value of one categorical feature achieved significantly worse performance than ranking discretization. Furthermore, since we do not desire to learn the explicit form of these distributions, explicitly generating class logits is not a necessity, and hence we prefer a more efficient differentiable discretization technique that does not involve reparameterization tricks, softmax operations or excessive samplings.
|
| 74 |
+
|
| 75 |
+
Data Agent Reset In terms of the diversity of generated data, there is a balance between adversarially updating the neurons in the MLPs and re-initializing the MLPs all together. Although in the short run, re-initializing the MLPs and the corresponding random factors (number of features, number of classes, etc.) instantaneously yield new datasets with a high chance of possessing much different fields and distributions from the previous, such generation is constrained by the domain of distribution defined by the preset
|
| 76 |
+
|
| 77 |
+
range of hyperparameters in the long run (we show some evidence on this in Section 3.2). On the other hand, although adversarial data agents are performance-driven and could explore out-of-distribution regions better than random initialization, it also has the potential to converge to the Nash equilibrium and reach a stalemate with the meta-learner – for example, converging to a state where generated predictors $x$ and response $y$ have no correlation. Hence, we combine the two approaches and reset the adversarial data agents every $N_{e}$ epochs to avoid such convergence. To speak from the GANs angle, we are letting the discriminator, i.e. the meta-learner, to periodically gain an advantage and slightly beat the generator. Different from classic GANs, the discriminator is the desired model here while the generator is the supporting entity, hence exploration is more important than optimization for the generator in this context.
|
| 78 |
+
|
| 79 |
+
# 2.2. Mixture Block Architecture
|
| 80 |
+
|
| 81 |
+
Contrary to modern DNNs, traditional ML algorithms such as K-nearest neighbors and tree-based methods are more flexible in terms of their ability to handle varying cardinality of classification labels, in the sense that they do not entail fixed-size dense layer parameters that cannot generalize to a different classification task with different label cardinality. This is not much of an issue for the traditional one-model-for-one-dataset ML pipeline, but is of significant importance for zero-shot meta-learners, yet unaddressed in prior works. Inspired by how tree-based methods solve classification tasks in a manner that is compliant to the empirical values and cardinality of training labels, we propose a scatter-sum mixture block as the output prediction head for classification tasks that significantly departs from the ordinary dense final layer approach. A visual demonstration can be found on the right of Figure 2. For each data point in the testing set, we use its embedding after the transformer blocks to query the embeddings of training data, and yield two sets of logits via two separate feedforwards: one set of logits is used to calculate softmax probability weights of keys and the other set is used to sample soft-discrete gates via Concrete distribution (Maddison et al., 2016) to sparsify these weights. In essence, these gates govern the splits of training data in relation to the testing query, such that the final prediction only pays attention to a subset of relevant training data representations. In our preliminary experiments, we discovered that sparsifying attention through these gates are crucial to performance, and the mixture block works poorly without this component. The output class probabilities are then acquired by a scatter summation of non-gated values using their original labels as index. Relating to tree-based methods, the gates here are used to determine the subset of training data that are in the same split of leaf nodes as a given testing data point, and the weights are used to determine the relative importance of each label in that split.
|
| 82 |
+
|
| 83 |
+
Contrary to tree-based methods, the splits are point-specific, i.e. there is a different split decided for each testing data point, and the decision within the split is weighted rather than via majority voting. Note that this approach does not change the order of computation complexity in terms of data size and data dimensions – it simply removes the final dense layer and adds two more multi-head attentions and feedforwards to the transformer architecture in a non-sequential manner.
|
| 84 |
+
|
| 85 |
+
Large Data Size and Feature Size Compared to the class size limitation, the feature size limitation of PFNs is relatively less tricky in theory, and there are already a few straightforward solutions concurrent with this work (Hollmann et al., 2025; Qu et al., 2025) that extend TabPFN's capabilities in handling datasets with larger number of features, as well as larger number of samples. Besides, the data capacity of PFNs could be adequately expanded by incorporating some of the recent advancements in general transformer and state-space model research (Wu et al., 2022; Bulatov et al., 2023). Therefore, we do not put emphasis on addressing these problems in this work, and make two simple adaptations to APT based on patch embedding and batch aggregation in the event that prediction on large datasets is required. See Appendix B for details. Note that concurrent solutions such as Hollmann et al. (2025); Qu et al. (2025) do not pose conflict with our proposed architecture (mixture block only modifies the last layer of the model), thus can be naturally incorporated into our framework as the practitioners desire.
|
| 86 |
+
|
| 87 |
+
# 3. Experiment
|
| 88 |
+
|
| 89 |
+
We evaluated our model and competing algorithms on common ML benchmarking dataset suites for tabular classification and tabular regression problems. In Section 3.1, we show that APT achieves state-of-the-art performance on small tabular classification tasks with a runtime comparable to that of TabPFN. In Section 3.2, we present qualitative analysis on the impact and characteristics of the adversarial data agents. In Section 3.3, we demonstrate the generalizability of the mixture block and its effect on pre-training. In Section 3.4, we provide ablation study and show that adversarial pre-training was able to enhance the performance of TabPFN on both classification and regression tasks.
|
| 90 |
+
|
| 91 |
+
Datasets For classification, we used the curated opensource OpenML-CC18 dataset suite (Bischl et al., 2021) containing 68 popular tabular benchmark datasets (4 vision datasets mnist_784, CIFAR_10, Devnagari-Script, and Fashion-MNIST are not treated as tabular and removed from the total 72 datasets), and our main results are presented on all small datasets (number of samples no larger than 2,000) in OpenML-CC18 similar to Hollmann et al. (2022).
|
| 92 |
+
|
| 93 |
+
Table 1. Performance of algorithms on 35 small datasets with no larger than 2,000 data points in the OpenML-CC18 suite, given one hour of time budget. Note that there are two styles of standard deviation (std.) calculation for AUC: 1) first take the mean of AUC across datasets, then calculate the std. across splits (std. of mean), as used by TabPFN (Hollmann et al., 2022); 2) first calculate the std. across splits on each dataset, then take the mean across datasets (mean of std.), as used by TabZilla (McElfresh et al., 2024). Our result table largely adopted the style of TabZilla, but we present both std.'s here for clarity. The std. of mean shows variation on suite level, which is more likely to result in a statistical significance compared to mean of std., which shows average variation on dataset level. The mean of AUC taken across splits are used as the scoring metric to calculate "Rank" and "Wins" of each algorithm across datasets. If many algorithms are tied for first, a win is assigned to each first-place algorithm. Same as TabZilla (McElfresh et al., 2024), the table is ordered by the mean of rank. The full results on each dataset for top algorithms are shown in Table 5 of Appendix D.
|
| 94 |
+
|
| 95 |
+
<table><tr><td rowspan="2"></td><td colspan="4">Rank ↓</td><td colspan="3">ROC-AUC ↑</td><td>Wins ↑</td><td colspan="2">Time (sec.) ↓ (Tune + Train + Predict)</td></tr><tr><td>mean</td><td>med.</td><td>min</td><td>max</td><td>mean</td><td>std. of mean</td><td>mean of std.</td><td>num.</td><td>mean</td><td>med.</td></tr><tr><td>APT</td><td>3.86</td><td>3</td><td>1</td><td>11</td><td>0.921</td><td>0.003</td><td>0.019</td><td>13</td><td>0.90</td><td>0.40</td></tr><tr><td>CatBoost</td><td>4.03</td><td>4</td><td>1</td><td>9</td><td>0.918</td><td>0.002</td><td>0.020</td><td>6</td><td>3542.42</td><td>3555.74</td></tr><tr><td>TabPFN</td><td>4.57</td><td>4</td><td>1</td><td>11</td><td>0.913</td><td>0.003</td><td>0.020</td><td>4</td><td>0.86</td><td>0.37</td></tr><tr><td>SVM</td><td>4.89</td><td>4</td><td>1</td><td>12</td><td>0.904</td><td>0.003</td><td>0.023</td><td>10</td><td>1175.58</td><td>481.50</td></tr><tr><td>XGBoost</td><td>5.37</td><td>5</td><td>1</td><td>10</td><td>0.914</td><td>0.006</td><td>0.020</td><td>4</td><td>3607.78</td><td>3598.91</td></tr><tr><td>LightGBM</td><td>5.60</td><td>6</td><td>1</td><td>11</td><td>0.917</td><td>0.003</td><td>0.019</td><td>3</td><td>3542.94</td><td>3582.07</td></tr><tr><td>LASSO-Logistic</td><td>6.69</td><td>8</td><td>1</td><td>12</td><td>0.908</td><td>0.001</td><td>0.023</td><td>3</td><td>1519.41</td><td>1227.52</td></tr><tr><td>Ridge-Logistic</td><td>6.91</td><td>8</td><td>1</td><td>11</td><td>0.907</td><td>0.001</td><td>0.022</td><td>1</td><td>1479.93</td><td>845.59</td></tr><tr><td>RandomForest</td><td>7.17</td><td>7</td><td>1</td><td>12</td><td>0.908</td><td>0.003</td><td>0.021</td><td>3</td><td>1736.71</td><td>1476.37</td></tr><tr><td>ResNet</td><td>7.69</td><td>9</td><td>1</td><td>12</td><td>0.825</td><td>0.004</td><td>0.040</td><td>3</td><td>3582.15</td><td>3597.41</td></tr><tr><td>KNN</td><td>9.57</td><td>11</td><td>1</td><td>12</td><td>0.884</td><td>0.006</td><td>0.024</td><td>1</td><td>127.82</td><td>77.31</td></tr><tr><td>SAINT</td><td>9.97</td><td>12</td><td>1</td><td>12</td><td>0.759</td><td>0.017</td><td>0.077</td><td>1</td><td>3597.41</td><td>3594.41</td></tr></table>
|
| 96 |
+
|
| 97 |
+
except that 1) there is no additional filtering, i.e. all datasets regardless of number of classes, number of features, number of categorical features, and number of missing values are kept in our evaluation pool, composing a more general collection of datasets. This brings the number of datasets in the evaluation pool from 18 to 35; 2) The train-test split is set to 80-20 instead of the unconventional 50-50. For regression benchmarking, we used the curated open-source OpenML-CTR23 dataset suite (Fischer et al., 2023).
|
| 98 |
+
|
| 99 |
+
Algorithms We compared APT to the top 3 GBDT algorithms (CatBoost (Prokhorenkova et al., 2018), XGBoost (Chen & Guestrin, 2016), LightGBM (Ke et al., 2017)) and the top 3 DNN methods (TabPFN (Hollmann et al., 2022), Tabular ResNet (Gorishniy et al., 2021), SAINT (Somepalli et al., 2021)) in the main experiments of TabZilla (McElfresh et al., 2024), as well as 5 standard machine learning algorithms (KNN (Cover & Hart, 1967), Ridge (Tikhonov, 1963), LASSO (Tibshirani, 1996), SVM (Cortes, 1995), Random Forest (Ho, 1995)).
|
| 100 |
+
|
| 101 |
+
Hyperparameters The hyperparameter search space of benchmark models is directly inherited from Hollmann et al. (2022), and directly inherited from McElfresh et al. (2024) if the benchmark model is not in Hollmann et al. (2022). TabPFN is pre-trained with hyperparameters directly inherited from their released checkpoint, only changing the maximum number of classes from 10 to 26, which is the
|
| 102 |
+
|
| 103 |
+
maximal class size of datasets in the OpenML-CC18 suite. For APT, all common hyperparameters shared with TabPFN are directly inherited from TabPFN. See Appendix C for more details. A total of $12.5\%$ of the data generators are adversarial data agents during the pre-training of APT, with learning rate $10^{-1}$ , weight decay $10^{-5}$ , soft-discretization temperature $10^{-2}$ , and 2,000 gradient steps between resets.
|
| 104 |
+
|
| 105 |
+
# 3.1. APT Achieves State-of-the-art Performance on Small Tabular Classification Tasks
|
| 106 |
+
|
| 107 |
+
We evaluated APT and benchmark models on small datasets in OpenML-CC18 using area under the receiver operating characteristic curve (ROC-AUC) with the one-vs-one (OVO) multi-class evaluation configuration, similar to Hollmann et al. (2022). Previously, Hollmann et al. (2022) has shown that TabPFN matches the performance of state-of-the-art GBDT algorithms and outperforms them on small datasets that have less than 100 features, less than 10 classes, no categorical features, and no missing values in their main results. In this work, we do not impose any of these restrictions to further examine APT's and TabPFN's zero-shot meta-learning capability. The results are presented in Table 1. For datasets with number of features larger than 100, we subsample 100 features similar to (McElfresh et al., 2024). In these experiments, APT achieved state-of-the-art performances with a runtime similar to that of TabPFN. The average runtime of APT increased by $4.6\%$ compared to
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
(a) Distribution of data in datasets generated by a set of ordinary data generators
|
| 111 |
+
|
| 112 |
+

|
| 113 |
+
Figure 3. Contour plot of two-dimensional data generated by ordinary data generators and adversarial data agents. Each subplot contains a total of 100,000 data points from 2,000 datasets. Note that subplot (a) and subplot (b) are two independent sets of ordinary generators with no mutual, as each dataset is generated by an independently initialized random sparse neural network. Each dataset in subplot (c) is generated by an adversarial data agent after each consecutive loss back-propagation.
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
(b) Distribution of data generated by another independent set of ordinary generators
|
| 117 |
+
(c) Distribution of data generated by a set of adversarial data agents
|
| 118 |
+
|
| 119 |
+
TabPFN and remained within a second on GPU (NVIDIA H100), showing that neural modifications from the mixture block have not made APT significantly heavier. Note that there is no cherry-picking being performed on model checkpoints for APT – the APT model that we released and used for evaluations is the last model after the final iteration of pre-training. Realistically,PFN-based models are pre-trained on synthetic data, and picking checkpoints for evaluations ad hoc is not ideal unless using a whole different collection of real-world datasets for validation. But even in that case, it would still raise the concern of data leakage.
|
| 120 |
+
|
| 121 |
+
In these experiments, the deep learning algorithms under the standard supervised learning pipeline, ResNet and SAINT, yielded subpar performances. Note that the computing budget in Hollmann et al. (2022) and ours is set to 1 hour per dataset per split contrary to the 10 hours in McElfresh et al. (2024). The deep learning algorithms under the zero-shot meta-learning pipeline, APT and TabPFN, yielded ideal performances, but it has been previously shown that TabPFN sees a significant drop in performance on datasets with categorical features or missing values (Hollmann et al., 2022). In Figure 4, we further break down the results on datasets with and without these characteristics.
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
Figure 4. A breakdown of performance by dataset characteristics. The mean of ranks are plotted as orange on datasets with the respective characteristic, and as blue on datasets without the respective characteristic.
|
| 125 |
+
|
| 126 |
+
From Figure 4, it can be observed that APT has fairly dealt with TabPFN's weakness in handling datasets with missing values, and has closed the gap between performance on datasets with and without categorical features compared to TabPFN, although GBDTs such as CatBoost still shows the greatest capability in handling datasets with categorical features. We further break down the performance contributions from each proposed component in Section 3.4.
|
| 127 |
+
|
| 128 |
+
# 3.2. Qualitative Analysis of the Adversarial Data Agents
|
| 129 |
+
|
| 130 |
+
Even though arbitrary MLPs have the potential to serve as universal function approximators given certain regularity constraints (Hornik et al., 1989), the pre-set hyperparameters (e.g. sampling distribution of neurons, sampling distribution of the number of layers, choices of activations, etc.) as well as the lack of gradient updates restrict the family of data distributions that randomly initialized sparse neural networks can put forward in practice. As shown in Figure 3, the distribution of two-dimensional data generated by two whole different sets of random sparse neural networks align fairly precisely with merely 2,000 independent initializations. On the contrary, even without resetting neural architecture and neural parameters, the adversarial data agents still managed to generate a more diverse collection of data and diffuse the concentrated peaks presented in the density distribution of data generated by ordinary data generators. To be exact, for a collection of 2,000 datasets generated by ordinary data generators, we evaluated a KL-divergence of $0.134 \pm 0.141$ between it and a collection of 2,000 datasets generated by another set of ordinary data generators, and a KL-divergence of $0.813 \pm 0.072$ between it and a collection of 2,000 datasets generated by adversarial data agents.
|
| 131 |
+
|
| 132 |
+
As a motivation of imposing data agent reset, we were wary that the data agents after many adversarial updates could yield synthetic datasets whose features have little to no
|
| 133 |
+
|
| 134 |
+
signal on the response variable. With our hyperparameter settings, we have not observed such behavior and to our surprise, the synthetic datasets generated by adversarial agents exhibit slightly stronger signal with a Pearson correlation of $0.311 \pm 0.026$ between predictors and responses on datasets with two-dimensional features as oppose to the $0.268 \pm 0.013$ of ordinary data generators. We postulate that this is partially in consequence of the high reset frequency and high generator learning rate.
|
| 135 |
+
|
| 136 |
+
# 3.3. Generalizability of the Mixture Block
|
| 137 |
+
|
| 138 |
+
After a ZSML is deployed, one should not be required to re-do its pre-training given certain characteristics of the datasets in evaluation pool that the model cannot handle, and this is why the mixture block architecture is important. For TabPFN, we have to look at the evaluation dataset pool first, calculate the largest class size, before using it as a hyperparameter for pre-training. This is not a procedure that fits well into the zero-shot learning concept. Our proposed mixture block architecture does not have such class size limitation, and we show the performance of APT on datasets with more than 10 classes in OpenML-CC18 in Table 2.
|
| 139 |
+
|
| 140 |
+
Table 2. The ROC-AUC on datasets with more than 10 classes. APT pre-trained on datasets with a maximum of 10 classes is able to match APT without mixture block pre-trained on datasets with a maximum of 26 classes on 3 of the 4 datasets.
|
| 141 |
+
|
| 142 |
+
<table><tr><td></td><td>letter</td><td>isolet</td><td>vowel</td><td>texture</td></tr><tr><td>APT w/o Mixture</td><td>.975 ± .002</td><td>.970 ± .003</td><td>1 ± 0</td><td>1 ± 0</td></tr><tr><td>APT</td><td>.975 ± .002</td><td>.939 ± .011</td><td>1 ± 0</td><td>1 ± 0</td></tr></table>
|
| 143 |
+
|
| 144 |
+
Interestingly, the mixture block's generalizability significantly accelerated pre-training in our experiments. The ROC-AUC evaluated after each iteration of pre-training with and without the mixture block is presented in Figure 5. Note that ensembling over permutations (Hollmann et al., 2022) is not performed in this experiment as it would dramatically increase runtime given that evaluation is performed following every gradient step.
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
Figure 5. ROC-AUC on the 35 small datasets in OpenML-CC18 evaluated after each of the first 30,000 gradient steps.
|
| 148 |
+
|
| 149 |
+
From Figure 5, we can see that models with mixture block
|
| 150 |
+
|
| 151 |
+
learn remarkably faster than models without it. For APT, the model reaches an AUC of 0.70 in merely 40 gradient steps, an AUC of 0.80 in 591 gradient steps and 0.90 in 11,780 gradient steps.
|
| 152 |
+
|
| 153 |
+
# 3.4. Ablation Study
|
| 154 |
+
|
| 155 |
+
Classification Although we discovered that the mixture block gives the model a nice performance acceleration in the previous section, the original purpose of designing such architecture was not performance-driven, and we still expect that the final performance improvement was largely contributed by the adversarial pre-training. We present ablation study in Table 3 to verify this expectation.
|
| 156 |
+
|
| 157 |
+
Table 3. Ablation study on tabular classification. Note that APT is TabPFN with adversarial pre-training and mixture block.
|
| 158 |
+
|
| 159 |
+
<table><tr><td rowspan="2"></td><td colspan="2">Small</td><td colspan="2">All</td></tr><tr><td>mean AUC ↑</td><td>rank ↓</td><td>mean AUC ↑</td><td>rank ↓</td></tr><tr><td>APT</td><td>0.921 ± 0.003</td><td>2.11 ± 0.16</td><td>0.918 ± 0.006</td><td>2.1 ± 0.2</td></tr><tr><td>APT w/o Mixture</td><td>0.917 ± 0.005</td><td>2.09 ± 0.06</td><td>0.917 ± 0.005</td><td>2.1 ± 0.1</td></tr><tr><td>TabPFN w/ Mixture</td><td>0.914 ± 0.004</td><td>2.55 ± 0.22</td><td>0.914 ± 0.005</td><td>2.6 ± 0.2</td></tr><tr><td>TabPFN</td><td>0.913 ± 0.003</td><td>2.49 ± 0.16</td><td>0.914 ± 0.005</td><td>2.4 ± 0.2</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Unsurprisingly, models with and without the mixture block did not dominate each other on mean AUC and rank collectively. Note that the mixture block was proposed for generalizing on datasets with unseen number of classes, and we expect it to have little to no impact on datasets with seen number of classes performance-wise.
|
| 162 |
+
|
| 163 |
+
Regression Although ZSMLs are gradually catching up with GBDTs on classification problems and likely reached a performance mark close to saturation on small classification problems, tabular regression remains an area where ZSMLs have not yet shown exceptional performance. We additionally report a study on the 35 datasets in OpenML-CTR23 regression suite in Table 4, and show the progress APT has made on regression tasks over TabPFN.
|
| 164 |
+
|
| 165 |
+
Table 4. Ablation study on tabular regression. Small datasets are the 12 datasets in OpenML-CTR23 with data size no larger than 2,000. Note that APT is TabPFN with adversarial pre-training in this setting, since the mixture block was only used for classification tasks.
|
| 166 |
+
|
| 167 |
+
<table><tr><td rowspan="2"></td><td colspan="2">Small</td><td colspan="2">All</td></tr><tr><td>mean MSE ↓</td><td>wins ↑</td><td>mean MSE ↓</td><td>wins ↑</td></tr><tr><td>TabPFN</td><td>0.412 ± 0.077</td><td>3.8 ± 1.2</td><td>0.340 ± 0.025</td><td>6.4 ± 1.4</td></tr><tr><td>APT</td><td>0.344 ± 0.068</td><td>8.2 ± 1.2</td><td>0.306 ± 0.023</td><td>28.6 ± 1.4</td></tr></table>
|
| 168 |
+
|
| 169 |
+
From Table 4, it can be observed that incorporating adversarial pre-training has boosted the performance of TabPFN, yielding a larger number of wins with a significant margin. Note that we used the exact same synthetic data sampling distributions and hyperparameters that were used in TabPFN
|
| 170 |
+
|
| 171 |
+
for the purpose of ablation, in order to clearly demonstrate the contribution of adversarial training. TabPFN was trained only on classification problems, and therefore it is possible that these hyperparameters are over-optimized for classification tasks, and under-optimized for regression tasks.
|
| 172 |
+
|
| 173 |
+
# 4. Related Work
|
| 174 |
+
|
| 175 |
+
# 4.1. Tabular Learning
|
| 176 |
+
|
| 177 |
+
GBDTs such as XGBoost and others (Chen & Guestrin, 2016; Prokhorenkova et al., 2018; Ke et al., 2017) are commonly used for tabular data problems, in the traditional one-model-for-one-dataset approach. At this point, numerous deep learning approaches have been developed for tabular data, mostly taking the one-model-for-one-dataset approach (Borisov et al., 2022; Somepalli et al., 2021; Gorishniy et al., 2021; Rubachev et al., 2022; Kadra et al., 2021a; Arik & Pfister, 2021; Popov et al., 2019; Arik & Pfister, 2021; Kotelnikov et al., 2023; Gorishniy et al., 2024; 2022; Chen et al., 2024; Kadra et al., 2021b; Huang et al., 2020), but some also venturing into transfer learning, many but not all leveraging large language models to find relevant information for the tabular data problem at hand (Levin et al., 2022; Yan et al., 2024; Borisov et al., 2023; Ye et al., 2024a; Spinaci et al., 2024; Hegselmann et al., 2023; Kim et al., 2024; Zhu et al., 2023).
|
| 178 |
+
|
| 179 |
+
Tabular Meta-Learning Auto-Sklearn introduced in Feurer et al. (2015) and improved upon in Feurer et al. (2022) use Bayesian optimization to determine the best algorithm and feature pre-processing steps for modeling a given dataset. Meta learning is used for initializing the Bayesian optimization. In contrast to Auto-Sklearn and methods of transfer learning for tabular data, TabPFN (Müller et al., 2021) is trained solely on synthetic data to learn the general prediction logic of tabular classification and to acquire meaningful data representations in inference time. Helli et al. (2024) introduced a variant of TabPFN that was trained on a drifting synthetic data distribution, but the drift is independent of the performance of the model being optimized.
|
| 180 |
+
|
| 181 |
+
# 4.2. Zero-shot Learning
|
| 182 |
+
|
| 183 |
+
Recent work such as Xian et al. (2018; 2017); Chang et al. (2008); Larochelle et al. (2008); Palatucci et al. (2009) have shown impressive capability of zero-shot learning in the space of language and vision problems. Recent approaches to zero-shot or few-shot learning for tabular data problems mostly encode tabular data as language, and then leverage large language models (LLMs) for their zero- or few-shot capabilities (see Hegselmann et al. (2023); Nam et al. (2023); Gardner et al. (2024)). These approaches rely on relevant information about the tabular data existing in LLMs – this is most obviously the case when column names are mean-
|
| 184 |
+
|
| 185 |
+
ingful, but not guaranteed for broad tabular data problems.
|
| 186 |
+
|
| 187 |
+
# 4.3. Adversarial Training
|
| 188 |
+
|
| 189 |
+
Upon generative adversarial networks (GANs) (Goodfellow et al., 2015; Madry et al., 2018; Kurakin et al., 2017), recent work such as Shafahi et al. (2019) improved on the efficiency by combining the back-propagation steps of the generator and discriminator. However, this method has been shown to suffer from catastrophic overfitting (Andriushchenko & Flamarion, 2020; Kim et al., 2021) without further modifications. Other works focusing on improving the efficiency of GAN training include Wong et al. (2020) and Zhang et al. (2019) where they restrict most of the forward and back propagation within the first layer of the network during adversarial updates. Zhang et al. (2021) in particular noted that weight updates frequently go back and forth in opposite directions in one training epoch, suggesting those updates might be redundant. Many other variations have been introduced to mitigate vanishing gradient and additional challenges of GAN training (Jabbar et al., 2021): failing at finding a Nash-equilibrium (Ratliff et al., 2016), and internal covariate shift (Ioffe, 2015).
|
| 190 |
+
|
| 191 |
+
# 5. Conclusion
|
| 192 |
+
|
| 193 |
+
In this work, we gave the first effort in exploring the adversarial pre-training of deep zero-shot meta-learners, specifically PFNs. We proposed APT, a zero-shot meta-learner that improves the performance of TabPFN on tabular prediction tasks and matches state-of-the-art GBDTs on small tabular classification tasks. In addition, we proposed a mixture block neural design to eliminate the class size restriction of PFNs, addressing a crucial issue in their generalizability to broad classification problems. As for limitations, APT does not outperform GBDTs on large tabular datasets, and shares the quadratic computational complexity of TabPFN. Hence, extensions of this work could explore means of acquiring data representations in a more inexpensive manner. For example, considerable research in recent years has significantly accelerated the transformer and increased its context length (Wu et al., 2022; Bulatov et al., 2023). It is a worthwhile effort for future research to apply these advancements to APT as well as other PFNs. Besides, future research could extend the mixture block to standard (non-zero-shot) classification settings in light of its ability to generalize and greatly accelerate convergence, which could improve the performance of traditional DNNs on small classification datasets. Mixture block or other alternatives to the dense final layer could also be explored in both standard and zero-shot regression settings, which could have an impact on the inductive bias of DNNs and their underperformance in comparison to GBDTs (Grinsztajn et al., 2022) under certain tabular data nature.
|
| 194 |
+
|
| 195 |
+
# Acknowledgements
|
| 196 |
+
|
| 197 |
+
We thank Tyler Farnan, Gang Mei, and C. Bayan Bruss for the insightful discussions.
|
| 198 |
+
|
| 199 |
+
# Impact Statement
|
| 200 |
+
|
| 201 |
+
This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none which we feel must be specifically highlighted here.
|
| 202 |
+
|
| 203 |
+
# References
|
| 204 |
+
|
| 205 |
+
Ahmed, T. and Devanbu, P. Few-shot training llms for project-specific code-summarization. In Proceedings of the 37th IEEE/ACM International Conference on Automated Software Engineering, pp. 1-5, 2022.
|
| 206 |
+
Andriushchenko, M. and Flammarion, N. Understanding and improving fast adversarial training. Advances in Neural Information Processing Systems, 33:16048-16059, 2020.
|
| 207 |
+
Arik, S. Ö. and Pfister, T. Tabnet: Attentive interpretable tabular learning. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 6679-6687, 2021.
|
| 208 |
+
Bischl, B., Casalicchio, G., Feurer, M., Gijsbers, P., Hutter, F., Lang, M., Mantovani, R. G., van Rijn, J. N., and Vanschoren, J. Openml benchmarking suites. Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, 2021.
|
| 209 |
+
Borisov, V., Leemann, T., Seßler, K., Haug, J., Pawelczyk, M., and Kasneci, G. Deep neural networks and tabular data: A survey. IEEE transactions on neural networks and learning systems, 2022.
|
| 210 |
+
Borisov, V., Sessler, K., Leemann, T., Pawelczyk, M., and Kasneci, G. Language models are realistic tabular data generators. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=cEygmmQNOeI.
|
| 211 |
+
Bulatov, A., Kuratov, Y., and Burtsev, M. S. Scaling transformer to 1m tokens and beyond with RMT. CoRR, abs/2304.11062, 2023. doi: 10.48550/ARXIV.2304.11062. URL https://doi.org/10.48550/arXiv.2304.11062.
|
| 212 |
+
Cahyawijaya, S., Lovenia, H., and Fung, P. Llms are few-shot in-context low-resource language learners. arXiv preprint arXiv:2403.16512, 2024.
|
| 213 |
+
Chang, M.-W., Ratinov, L.-A., Roth, D., and Srikumar, V. Importance of semantic representation: Dataless classification. In Aaii, volume 2, pp. 830-835, 2008.
|
| 214 |
+
|
| 215 |
+
Chen, J., Yan, J., Chen, Q., Chen, D. Z., Wu, J., and Sun, J. Can a deep learning model be a sure bet for tabular prediction? In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, KDD '24, pp. 288-296, New York, NY, USA, 2024. Association for Computing Machinery. ISBN 9798400704901. doi: 10.1145/3637528.3671893. URL https://doi.org/10.1145/3637528.3671893.
|
| 216 |
+
Chen, T. and Guestrin, C. Xgboost: A scalable tree boosting system. In Proceedings of the 22nd acm sigkdd international conference on knowledge discovery and data mining, pp. 785-794, 2016.
|
| 217 |
+
Chui, M., Manyika, J., Miremadi, M., Henke, N., Chung, R., Nel, P., and Malhotra, S. Notes from the ai frontier: Insights from hundreds of use cases. McKinsey Global Institute, 2:267, 2018.
|
| 218 |
+
Cortes, C. Support-vector networks. Machine Learning, 1995.
|
| 219 |
+
Cover, T. and Hart, P. Nearest neighbor pattern classification. IEEE transactions on information theory, 13(1):21-27, 1967.
|
| 220 |
+
Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., and Houlsby, N. An image is worth 16x16 words: Transformers for image recognition at scale. CoRR, abs/2010.11929, 2020. URL https://arxiv.org/abs/2010.11929.
|
| 221 |
+
Feurer, M., Klein, A., Eggensperger, K., Springenberg, J., Blum, M., and Hutter, F. Efficient and robust automated machine learning. In Advances in Neural Information Processing Systems 28 (2015), pp. 2962-2970, 2015.
|
| 222 |
+
Feurer, M., Eggensperger, K., Falkner, S., Lindauer, M., and Hutter, F. Auto-sklearn 2.0: Hands-free automl via meta-learning. Journal of Machine Learning Research, 23:1-61, 2022.
|
| 223 |
+
Finn, C., Abbeel, P., and Levine, S. Model-agnostic meta-learning for fast adaptation of deep networks. In International conference on machine learning, pp. 1126-1135. PMLR, 2017.
|
| 224 |
+
Fischer, S. F., Feurer, M., and Bischl, B. Openml-ctr23-a curated tabular regression benchmarking suite. In AutoML Conference 2023 (Workshop), 2023.
|
| 225 |
+
Frans, K. and Witkowski, O. Population-based evolution optimizes a meta-learning objective. arXiv preprint arXiv:2103.06435, 2021.
|
| 226 |
+
|
| 227 |
+
Gardner, J., Perdomo, J. C., and Schmidt, L. Large scale transfer learning for tabular data via language modeling, 2024. URL https://arxiv.org/abs/2406.12031.
|
| 228 |
+
Gevaert, O. Meta-learning reduces the amount of data needed to build ai models in oncology. British Journal of Cancer, 125(3):309-310, 2021.
|
| 229 |
+
Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., and Bengio, Y. Generative adversarial nets. Advances in neural information processing systems, 27, 2014.
|
| 230 |
+
Goodfellow, I., Shlens, J., and Szegedy, C. Explaining and harnessing adversarial examples. In International Conference on Learning Representations, 2015. URL http://arxiv.org/abs/1412.6572.
|
| 231 |
+
Gorishniy, Y., Rubachev, I., Khrulkov, V., and Babenko, A. Revisiting deep learning models for tabular data. Advances in Neural Information Processing Systems, 34: 18932-18943, 2021.
|
| 232 |
+
Gorishniy, Y., Rubachev, I., and Babenko, A. On embeddings for numerical features in tabular deep learning. In NeurIPS, 2022.
|
| 233 |
+
Gorishniy, Y., Rubachev, I., Kartashev, N., Shlenskii, D., Kotelnikov, A., and Babenko, A. Tabr: Tabular deep learning meets nearest neighbors. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=rhgIgTSSxW.
|
| 234 |
+
Grinsztajn, L., Oyallon, E., and Varoquaux, G. Why do tree-based models still outperform deep learning on typical tabular data? Advances in neural information processing systems, 35:507-520, 2022.
|
| 235 |
+
Hegselmann, S., Buendia, A., Lang, H., Agrawal, M., Jiang, X., and Sontag, D. Tabllm: Few-shot classification of tabular data with large language models. In Ruiz, F., Dy, J., and van de Meent, J.-W. (eds.), Proceedings of The 26th International Conference on Artificial Intelligence and Statistics, volume 206 of Proceedings of Machine Learning Research, pp. 5549-5581. PMLR, 25-27 Apr 2023. URL https://proceedings.mlr.press/v206/hecselmann23a.html.
|
| 236 |
+
Helli, K., Schnurr, D., Hollmann, N., Müller, S., and Hutter, F. Drift-resilient tabPFN: In-context learning distribution shifts on tabular data. In AutoML Conference 2024 (Workshop Track), 2024. URL https://openreview.net/forum?id=VbmqcoHpGT.
|
| 237 |
+
Ho, T. K. Random decision forests. In Proceedings of 3rd international conference on document analysis and recognition, volume 1, pp. 278-282. IEEE, 1995.
|
| 238 |
+
|
| 239 |
+
Hollmann, N., Müller, S., Eggensperger, K., and Hutter, F. Tabpfn: A transformer that solves small tabular classification problems in a second. arXiv preprint arXiv:2207.01848, 2022.
|
| 240 |
+
Hollmann, N., Müller, S., Purucker, L., Krishnakumar, A., Körfer, M., Hoo, S. B., Schirrmeister, R. T., and Hutter, F. Accurate predictions on small data with a tabular foundation model. Nature, 637(8045):319-326, 2025.
|
| 241 |
+
Hornik, K., Stinchcombe, M., and White, H. Multilayer feedforward networks are universal approximators. Neural networks, 2(5):359-366, 1989.
|
| 242 |
+
Hospedales, T., Antoniou, A., Micaelli, P., and Storkey, A. Meta-learning in neural networks: A survey. IEEE transactions on pattern analysis and machine intelligence, 44(9):5149-5169, 2021.
|
| 243 |
+
Huang, X., Khetan, A., Cvitkovic, M., and Karnin, Z. Tabtransformer: Tabular data modeling using contextual embeddings, 2020. URL https://arxiv.org/abs/2012.06678.
|
| 244 |
+
Huisman, M., Van Rijn, J. N., and Plaat, A. A survey of deep meta-learning. Artificial Intelligence Review, 54(6): 4483-4541, 2021.
|
| 245 |
+
Ioffe, S. Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv preprint arXiv:1502.03167, 2015.
|
| 246 |
+
Jabbar, A., Li, X., and Omar, B. A survey on generative adversarial networks: Variants, applications, and training. ACM Computing Surveys (CSUR), 54(8):1-49, 2021.
|
| 247 |
+
Jang, E., Gu, S., and Poole, B. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016.
|
| 248 |
+
Kadra, A., Lindauer, M., Hutter, F., and Grabocka, J. Well-tuned simple nets excel on tabular datasets. Advances in neural information processing systems, 34:23928-23941, 2021a.
|
| 249 |
+
Kadra, A., Lindauer, M., Hutter, F., and Grabocka, J. Well-tuned simple nets excel on tabular datasets. In Beygelzimer, A., Dauphin, Y., Liang, P., and Vaughan, J. W. (eds.), Advances in Neural Information Processing Systems, 2021b. URL https://openreview.net/forum?id=d3k38LTDCyO.
|
| 250 |
+
Ke, G., Meng, Q., Finley, T., Wang, T., Chen, W., Ma, W., Ye, Q., and Liu, T.-Y. Lightgbm: A highly efficient gradient boosting decision tree. Advances in neural information processing systems, 30, 2017.
|
| 251 |
+
|
| 252 |
+
Kim, H., Lee, W., and Lee, J. Understanding catastrophic overfitting in single-step adversarial training. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 8119-8127, 2021.
|
| 253 |
+
Kim, M. J., Grinsztajn, L., and Varoquaux, G. CARTE: Pretraining and transfer for tabular learning. In Forty-first International Conference on Machine Learning, 2024. URL https://openreview.net/forum?id=9kArQnKLDp.
|
| 254 |
+
Kotelnikov, A., Baranchuk, D., Rubachev, I., and Babenko, A. TabDDPM: Modelling tabular data with diffusion models, 2023. URL https://openreview.net/forum?id=EJka_dVXEcr.
|
| 255 |
+
Kurakin, A., Goodfellow, I. J., and Bengio, S. Adversarial machine learning at scale. In International Conference on Learning Representations, 2017. URL https://openreview.net/forum?id=BJm4T4Kgx.
|
| 256 |
+
Larochelle, H., Erhan, D., and Bengio, Y. Zero-data learning of new tasks. In AAAI, volume 1, pp. 3, 2008.
|
| 257 |
+
Lemke, C., Budka, M., and Gabrys, B. Metalearning: a survey of trends and technologies. Artificial intelligence review, 44:117-130, 2015.
|
| 258 |
+
Levin, R., Cherepanova, V., Schwarzschild, A., Bansal, A., Bruss, C. B., Goldstein, T., Wilson, A. G., and Goldblum, M. Transfer learning with deep tabular models. arXiv preprint arXiv:2206.15306, 2022.
|
| 259 |
+
Luo, R., Tian, F., Qin, T., Chen, E., and Liu, T.-Y. Neural architecture optimization. Advances in neural information processing systems, 31, 2018.
|
| 260 |
+
Maddison, C. J., Mnih, A., and Teh, Y. W. The concrete distribution: A continuous relaxation of discrete random variables. arXiv preprint arXiv:1611.00712, 2016.
|
| 261 |
+
Madry, A., Makelov, A., Schmidt, L., Tsipras, D., and Vladu, A. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=rJzIBfZAb.
|
| 262 |
+
Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., Agarwal, S., et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 1, 2020.
|
| 263 |
+
McElfresh, D., Khandagale, S., Valverde, J., Prasad C, V., Ramakrishnan, G., Goldblum, M., and White, C. When do neural nets outperform boosted trees on tabular data? Advances in Neural Information Processing Systems, 36, 2024.
|
| 264 |
+
|
| 265 |
+
Müller, S., Hollmann, N., Arango, S. P., Grabocka, J., and Hutter, F. Transformers can do bayesian inference. arXiv preprint arXiv:2112.10510, 2021.
|
| 266 |
+
Nagler, T. Statistical foundations of prior-data fitted networks. In International Conference on Machine Learning, pp. 25660-25676. PMLR, 2023.
|
| 267 |
+
Nam, J., Tack, J., Lee, K., Lee, H., and Shin, J. STUNT: Few-shot tabular learning with self-generated tasks from unlabeled tables. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=__xlsjehDvlY.
|
| 268 |
+
Nichol, A. and Schulman, J. Reptile: a scalable metalearning algorithm. arXiv preprint arXiv:1803.02999, 2(3):4, 2018.
|
| 269 |
+
Palatucci, M., Pomerleau, D., Hinton, G. E., and Mitchell, T. M. Zero-shot learning with semantic output codes. Advances in neural information processing systems, 22, 2009.
|
| 270 |
+
Perez, E., Kiela, D., and Cho, K. True few-shot learning with language models. Advances in neural information processing systems, 34:11054-11070, 2021.
|
| 271 |
+
Popov, S., Morozov, S., and Babenko, A. Neural oblivious decision ensembles for deep learning on tabular data arXiv preprint arXiv:1909.06312, 2019.
|
| 272 |
+
Press, O., Smith, N. A., and Lewis, M. Train short, test long: Attention with linear biases enables input length extrapolation. arXiv preprint arXiv:2108.12409, 2021.
|
| 273 |
+
Prokhorenkova, L., Gusev, G., Vorobev, A., Dorogush, A. V., and Gulin, A. Catboost: unbiased boosting with categorical features. Advances in neural information processing systems, 31, 2018.
|
| 274 |
+
Qu, J., Holzmüller, D., Varoquaux, G., and Morvan, M. L. Tabicl: A tabular foundation model for in-context learning on large data. arXiv preprint arXiv:2502.05564, 2025.
|
| 275 |
+
Ratliff, L. J., Burden, S. A., and Sastry, S. S. On the characterization of local nash equilibria in continuous games. IEEE transactions on automatic control, 61(8): 2301-2307, 2016.
|
| 276 |
+
Rubachev, I., Alekberov, A., Gorishniy, Y., and Babenko, A. Revisiting pretraining objectives for tabular deep learning. arXiv preprint arXiv:2207.03208, 2022.
|
| 277 |
+
Shafahi, A., Najibi, M., Ghiasi, M. A., Xu, Z., Dickerson, J., Studer, C., Davis, L. S., Taylor, G., and Goldstein, T. Adversarial training for free! Advances in neural information processing systems, 32, 2019.
|
| 278 |
+
|
| 279 |
+
Shwartz-Ziv, R. and Armon, A. Tabular data: Deep learning is not all you need. Information Fusion, 81:84-90, 2022.
|
| 280 |
+
Somepalli, G., Goldblum, M., Schwarzschild, A., Bruss, C. B., and Goldstein, T. Saint: Improved neural networks for tabular data via row attention and contrastive pretraining. arXiv preprint arXiv:2106.01342, 2021.
|
| 281 |
+
Spinaci, M., Polewczyk, M., Hoffart, J., Kohler, M. C., Thelin, S., and Klein, T. PORTAL: Scalable tabular foundation models via content-specific tokenization. In NeurIPS 2024 Third Table Representation Learning Workshop, 2024. URL https://openreview.net/forum?id=TSZQvknbLO.
|
| 282 |
+
Su, J., Ahmed, M., Lu, Y., Pan, S., Bo, W., and Liu, Y. Enhanced transformer with rotary position embedding. arXiv preprint arXiv:2104.09864, 2021.
|
| 283 |
+
Tibshirani, R. Regression shrinkage and selection via the lasso. Journal of the Royal Statistical Society Series B: Statistical Methodology, 58(1):267-288, 1996.
|
| 284 |
+
Tikhonov, A. N. Solution of incorrectly formulated problems and the regularization method. Sov Dok, 4:1035-1038, 1963.
|
| 285 |
+
Tsimpoukelli, M., Menick, J. L., Cabi, S., Eslami, S., Vinyals, O., and Hill, F. Multimodal few-shot learning with frozen language models. Advances in Neural Information Processing Systems, 34:200-212, 2021.
|
| 286 |
+
Vanschoren, J. Meta-learning: A survey. arXiv preprint arXiv:1810.03548, 2018.
|
| 287 |
+
Verma, V. K., Brahma, D., and Rai, P. Meta-learning for generalized zero-shot learning. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pp. 6062-6069, 2020.
|
| 288 |
+
Wong, E., Rice, L., and Kolter, J. Z. Fast is better than free: Revisiting adversarial training. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=BJx040EFvH.
|
| 289 |
+
Wu, Y., Rabe, M. N., Hutchins, D., and Szegedy, C. Memorizing transformers. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=TrjbxzRcnf-.
|
| 290 |
+
Xian, Y., Schiele, B., and Akata, Z. Zero-shot learning - the good, the bad and the ugly. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), July 2017.
|
| 291 |
+
Xian, Y., Lampert, C. H., Schiele, B., and Akata, Z. Zero-shot learning—a comprehensive evaluation of the good, the bad and the ugly. IEEE transactions on pattern analysis and machine intelligence, 41(9):2251-2265, 2018.
|
| 292 |
+
|
| 293 |
+
Yan, J., Zheng, B., Xu, H., Zhu, Y., Chen, D., Sun, J., Wu, J., and Chen, J. Making pre-trained language models great on tabular prediction. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=anzIzGZuLi.
|
| 294 |
+
Ye, C., Lu, G., Wang, H., Li, L., Wu, S., Chen, G., and Zhao, J. Towards cross-table masked pretraining for web data mining. In The Web Conference 2024, 2024a. URL https://openreview.net/forum?id=9jj7cMOXQo.
|
| 295 |
+
Ye, H.-J., Liu, S.-Y., Cai, H.-R., Zhou, Q.-L., and Zhan, D.-C. A closer look at deep learning on tabular data. CoRR, abs/2407.00956, 2024b. URL https://doi.org/10.48550/arXiv.2407.00956.
|
| 296 |
+
Zhang, D., Zhang, T., Lu, Y., Zhu, Z., and Dong, B. You only propagate once: Accelerating adversarial training via maximal principle. In Wallach, H., Larochelle, H., Beygelzimer, A., d'Alché-Buc, F., Fox, E., and Garnett, R. (eds.), Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper_files/paper/2019/file/812b4ba287f5ee0bc9d43bbf5bbe87fb-Paper.pdf.
|
| 297 |
+
Zhang, H., Shi, Y., Dong, B., Han, Y., Li, Y., and Kuang, X. Free adversarial training with layerwise heuristic learning. In International Conference on Image and Graphics, pp. 120-131. Springer, 2021.
|
| 298 |
+
Zhu, B., Shi, X., Erickson, N., Li, M., Karypis, G., and Shoaran, M. Xtab: Cross-table pretraining for tabular transformers. arXiv preprint arXiv:2305.06090, 2023.
|
| 299 |
+
Zintgraf, L., Schulze, S., Lu, C., Feng, L., Igl, M., Shiarlis, K., Gal, Y., Hofmann, K., and Whiteson, S. Varibad: Variational bayes-adaptive deep rl via meta-learning. Journal of Machine Learning Research, 22(289):1-39, 2021. URL http://jmlr.org/papers/v22/21-0657.html.
|
| 300 |
+
|
| 301 |
+
# A. Background
|
| 302 |
+
|
| 303 |
+
In this section, we give a brief introduction to PFNs, and specifically the synthetic data generating mechanism of TabPFN. For a more complete description, see Müller et al. (2021); Hollmann et al. (2022); Nagler (2023). Given training dataset $D^{\mathrm{train}} = (X^{\mathrm{train}}, \mathbf{y}^{\mathrm{train}})$ , the goal is to approximate the conditional outcome distribution $y^{\mathrm{test}} \sim p(\cdot | \mathbf{x}^{\mathrm{test}}, D^{\mathrm{train}})$ given a test point $\mathbf{x}^{\mathrm{test}}$ . In the Bayesian framework for supervised learning, the prior of the dataset is a hypothesis of the data generating mechanism $\phi$ drawn from hypothesis space $\Phi$ , under which $p(\cdot | \mathbf{x}^{\mathrm{test}}, D^{\mathrm{train}})$ is a posterior predictive distribution (PPD) and can be factorized as follows by the Bayes' rule:
|
| 304 |
+
|
| 305 |
+
$$
|
| 306 |
+
\begin{array}{l} p \left(\cdot | \boldsymbol {x} ^ {\text {t e s t}}, D ^ {\text {t r a i n}}\right) = \int_ {\phi \in \Phi} p \left(\cdot | \boldsymbol {x} ^ {\text {t e s t}}, \phi\right) p \left(\phi \mid D ^ {\text {t r a i n}}\right) d \phi (3) \\ = \int_ {\phi \in \Phi} p (\cdot | \boldsymbol {x} ^ {\text {t e s t}}, \phi) \frac {p (\phi) p (D ^ {\text {t r a i n}} | \phi)}{p (D ^ {\text {t r a i n}})} d \phi (4) \\ \propto \int_ {\phi \in \Phi} p (\cdot | \boldsymbol {x} ^ {\text {t e s t}}, \phi) p (D ^ {\text {t r a i n}} | \phi) p (\phi) d \phi . (5) \\ \end{array}
|
| 307 |
+
$$
|
| 308 |
+
|
| 309 |
+
PFNs conduct synthetic prior fitting by defining a family of data generating mechanisms $\Phi$ from which independent samples $\pmb{x}_i\sim p(\pmb {x}_i) = \mathbb{E}_{p(\phi)}[p(\pmb {x}_i|\phi)]$ and $y_{i}\sim p(y_{i}) = \mathbb{E}_{p(\phi)}[p(y_{i}|\pmb{x}_{i},\phi)]$ are drawn to compose feature matrix $(X^{\mathrm{train}},X^{\mathrm{test}})$ and response vector $(\pmb{y}^{\mathrm{train}},\pmb{y}^{\mathrm{test}})$ of a synthetic dataset $D = D^{\mathrm{train}}\cup D^{\mathrm{test}}$ , and use a transformer model $q_{\theta}(\cdot |X^{\mathrm{test}},D^{\mathrm{train}})$ to approximate $p(\cdot |X^{\mathrm{test}},D^{\mathrm{train}})$ by minimizing their expected divergence over the synthetic data distribution
|
| 310 |
+
|
| 311 |
+
$$
|
| 312 |
+
\mathbb {E} _ {p \left(D ^ {\text {t r a i n}}, X ^ {\text {t e s t}}\right)} \left[ \mathrm {K L} \left(p \left(\boldsymbol {y} ^ {\text {t e s t}} \mid X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}\right), q _ {\theta} \left(\boldsymbol {y} ^ {\text {t e s t}} \mid X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}\right)\right) \right]. \tag {6}
|
| 313 |
+
$$
|
| 314 |
+
|
| 315 |
+
Since
|
| 316 |
+
|
| 317 |
+
$$
|
| 318 |
+
\begin{array}{l} \nabla_ {\theta} \mathbb {E} _ {p (D ^ {\text {t r a i n}}, X ^ {\text {t e s t}})} \left[ \mathrm {K L} \left(p (\boldsymbol {y} ^ {\text {t e s t}} | X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}), q _ {\theta} (\boldsymbol {y} ^ {\text {t e s t}} | X ^ {\text {t e s t}}, D ^ {\text {t r a i n}})\right) \right] (7) \\ = \nabla_ {\theta} \mathbb {E} _ {p \left(D ^ {\text {t r a i n}}, X ^ {\text {t e s t}}\right)} \left[ H \left(p \left(\boldsymbol {y} ^ {\text {t e s t}} \mid X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}\right), q _ {\theta} \left(\boldsymbol {y} ^ {\text {t e s t}} \mid X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}\right)\right) \right] (8) \\ = \nabla_ {\theta} \mathbb {E} _ {p (D ^ {\text {t r a i n}}, D ^ {\text {t e s t}})} \left[ - \log q _ {\theta} \left(\boldsymbol {y} ^ {\text {t e s t}} \mid X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}\right) \right], (9) \\ \end{array}
|
| 319 |
+
$$
|
| 320 |
+
|
| 321 |
+
it is equivalent to minimizing the expected negative log-likelihood loss
|
| 322 |
+
|
| 323 |
+
$$
|
| 324 |
+
\mathcal {L} (q _ {\theta}) = \mathbb {E} _ {p (D)} \left[ - \log q _ {\theta} (\boldsymbol {y} ^ {\text {t e s t}} | X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}) \right]. \tag {10}
|
| 325 |
+
$$
|
| 326 |
+
|
| 327 |
+
TabPFN in particular, conducts synthetic prior fitting by defining a family of sparsified-random-MLP-based data generating mechanisms $\Phi$ , which we call ordinary data generators in the context of this paper. The following section gives a detailed description of the workflow of these generators.
|
| 328 |
+
|
| 329 |
+
# A.1. Ordinary Data Generator
|
| 330 |
+
|
| 331 |
+
To sample data generating mechanism $\phi \sim \Phi$ , TabPFN first initializes a random MLP by sampling a collection of hyperparameters from a pre-defined hyperparameter space, including number of layers, hidden size, activation function, dropout probability, noise scales, etc. Specifically, dropout probability is used to sparsify neural connections between neurons, and noise scales dictate the amount of random noise injected into neurons at each layer. After the sparsified noisy random MLP is initialized, TabPFN randomly selects a subset of neurons in this MLP to be predictors $x_{i}$ , and randomly select one neuron to be response $y_{i}$ . With $n$ different random inputs to the MLP, a dataset with $n$ instances of $(x,y)$ is thus generated.
|
| 332 |
+
|
| 333 |
+
Discretization Since generated data are selected neurons from MLPs, their values are naturally continuous. To mimic real-world datasets that possess categorical features and to generate discrete class labels for classification tasks, TabPFN uses a ranking discretization approach that converts a subset of continuous values to discrete by designating certain quantile ranges of the continuous value $v$ to certain categories. A visual demonstration of this conversion can be found on the left side of Figure 6.
|
| 334 |
+
|
| 335 |
+
Normalization The generated synthetic data (as well as real-world datasets during inference time) are normalized across samples within each dataset, with the range of the values clipped to four standard deviations. Although the meta-learner might see datasets with unseen fields and out-of-distribution predictor-response relations during inference time, this ensures that at least the range of values will not be out-of-distribution as well.
|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
(a) Ranking Discretization
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
(b) Ranking Soft-discretization
|
| 342 |
+
Figure 6. Discretization of continuous variables. $x$ -axis is the value generated by the data generator, and $y$ -axis is its value after discretization. The soft-discretization approach produces near-categorical features that are differentiable and thus do not disrupt gradient flow. Intuitively, the adversarial data agents will try to produce new value that escapes the range of the current category if the meta-learner becomes very good at identifying signal from the current category. However, the new category it escapes to is arbitrary and cannot be targeted by gradient updates, giving additional exploration potentials to the adversarial agents.
|
| 343 |
+
|
| 344 |
+
# A.2. Limitations
|
| 345 |
+
|
| 346 |
+
Although there is no theoretical limitation on the number of data PFNs can handle, the transformer architecture does entail significant computation complexity and memory usage for large datasets. Besides, given the nature of dense input embedding layer and dense final prediction layer, there is a theoretical limitation on the number of features and the number of classes that PFNs can handle. The former is less of an issue since feature selections or simply random sampling of features can be performed, and PFNs would still yield ideal performance as shown in McElfresh et al. 2024. The latter is a rather big problem for classification tasks because there is hardly any direct and effective work-around.
|
| 347 |
+
|
| 348 |
+
# B. Handling of Large Datasets
|
| 349 |
+
|
| 350 |
+
Since expanding TabPFN's capabilities in handling large datasets is not the focus of this work, we only provide two simple adaptations such that APT can practically handle datasets of this nature. We recommend that practitioners try out the concurrent and future developments in more involved model innovations for large datasets, but in case they do not wish to do so, the following approaches can serve as a baseline.
|
| 351 |
+
|
| 352 |
+
# B.1. Uncertainty-based Batch Aggregation
|
| 353 |
+
|
| 354 |
+
For datasets with large number of samples, to avoid calculating attention spanning all training data points which results in quadratic order of operations and memory usage with respect to data size, we estimate the PPD with batches drawn from the training set:
|
| 355 |
+
|
| 356 |
+
$$
|
| 357 |
+
p (\boldsymbol {y} ^ {\text {t e s t}} | X ^ {\text {t e s t}}, D ^ {\text {t r a i n}}) \approx \int_ {b} q _ {\theta} (\boldsymbol {y} ^ {\text {t e s t}} | X ^ {\text {t e s t}}, b) \cdot p (b | D ^ {\text {t r a i n}}), \tag {11}
|
| 358 |
+
$$
|
| 359 |
+
|
| 360 |
+
which is equivalent to drawing uniform samples from training set $D^{\mathrm{train}}$ and scale the resulting predictions with weights $p(b|D^{\mathrm{train}})$ . We cap the batch size at 3000 in alignment with McElfresh et al. (2024). For classification datasets with number of samples larger than 3000, we split the training set into batches and weigh the resulting predictions in proportion to the batch size (prediction from the last batch may have less weight than the others). For regression tasks, we parameterize model $q_{\theta}(y_i^{\mathrm{test}}|\boldsymbol{x}_i^{\mathrm{test}},b)$ as Gaussian distribution $(\mu_{\theta}(\boldsymbol{x}_i^{\mathrm{test}},b),\sigma_{\theta}(\boldsymbol{x}_i^{\mathrm{test}},b))$ similar to Hollmann et al. (2022), and directly produce the point estimation using the inverse variance estimator in inference time:
|
| 361 |
+
|
| 362 |
+
$$
|
| 363 |
+
\begin{array}{l} \mathbb {E} \left[ y _ {i} ^ {\text {t e s t}} \mid \boldsymbol {x} _ {i} ^ {\text {t e s t}}, D ^ {\text {t r a i n}} \right] \approx \int_ {b} \mathbb {E} _ {\theta} \left[ y _ {i} ^ {\text {t e s t}} \mid \boldsymbol {x} _ {i} ^ {\text {t e s t}}, b \right] \cdot p (b \mid D ^ {\text {t r a i n}}) (12) \\ = \left(\sum_ {k} \frac {1}{\sigma_ {\theta} ^ {2} \left(\boldsymbol {x} _ {i} ^ {\text {t e s t}} , b _ {k}\right)}\right) ^ {- 1} \sum_ {k = 1} ^ {N} \frac {1}{\sigma_ {\theta} ^ {2} \left(\boldsymbol {x} _ {i} ^ {\text {t e s t}} , b _ {k}\right)} \mu_ {\theta} \left(\boldsymbol {x} _ {i} ^ {\text {t e s t}}, b _ {k}\right). (13) \\ \end{array}
|
| 364 |
+
$$
|
| 365 |
+
|
| 366 |
+
The intuition is that, prediction on each batch is weighted by its uncertainty - more weights are put to the predictions that the model is more certain of, and vice versa.
|
| 367 |
+
|
| 368 |
+
# B.2. Patch-based Feature Embedding
|
| 369 |
+
|
| 370 |
+
We drew inspiration from Dosovitskiy et al. (2020) and developed a patch-based embedding approach that adapts to datasets with arbitrary number of features. In Hollmann et al. (2022), embeddings of $x$ are acquired by padding or clipping the number of features $d_{k}$ to a certain maximum feature size $d^{*}$ , such that $x$ can be fed to a dense feedforward $e_{\theta}: \mathbb{R}^{d^{*}} \to \mathbb{R}^{d_{\mathrm{model}}}$ . Instead, we split features into patches, setting $d^{*}$ as the patch size, and only pad the last patch to $d^{*}$ dimensions if $d \neq 0$ (mod $d^{*}$ ). After feeding each patch to dense feedforward $e_{\theta}$ , we pass them to an attention block with optional relative positional encoding (Su et al., 2021; Press et al., 2021), and average pool across the resulting embeddings of patches. Essentially, this is a half-way approach between using a dense feedforward to embed all features, and using an attention block to tokenize each individual feature. In this way, the embedding block can handle features in a more flexible manner while controlling computational complexity and memory usage.
|
| 371 |
+
|
| 372 |
+
# C. Hyperparameter Settings
|
| 373 |
+
|
| 374 |
+
All common hyperparameters of APT are directly inherited from TabPFN and not tuned, including learning rate $10^{-4}$ , number of blocks 12, hidden dimensions 512, hidden feedforward dimensions 1024, number of heads 4, effective batch size (batch size per step $\times$ number of gradient accumulation steps) 64, total number of training datasets (number of epochs $\times$ steps per epoch $\times$ number of datasets per step) 6, 400, 000, as well as all data generator hyperparameters. For more details on the data generator hyperparameters, see the code repository in our supplementary material.
|
| 375 |
+
|
| 376 |
+
# D. More Results
|
| 377 |
+
|
| 378 |
+
We list the performance of top algorithms on small classification datasets in Table 5. Standard deviations are calculated across 5 different splits.
|
| 379 |
+
|
| 380 |
+
Table 5. The ROC-AUC of top algorithms on the 35 small datasets in OpenML-CC18.
|
| 381 |
+
|
| 382 |
+
<table><tr><td></td><td>LightGBM</td><td>XGBoost</td><td>SVM</td><td>TabPFN</td><td>CatBoost</td><td>APT</td></tr><tr><td>mfeat-fourier</td><td>.981 ± .004</td><td>.982 ± .004</td><td>.982 ± .004</td><td>.985 ± .002</td><td>.984 ± .002</td><td>.983 ± .003</td></tr><tr><td>breast-w</td><td>.993 ± .006</td><td>.993 ± .006</td><td>.995 ± .007</td><td>.997 ± .003</td><td>.996 ± .005</td><td>.997 ± .003</td></tr><tr><td>mfeat-karhunen</td><td>.999 ± .001</td><td>.999 ± .001</td><td>1 ± 0</td><td>.999 ± 0</td><td>.999 ± 0</td><td>1 ± 0</td></tr><tr><td>mfeat-morphological</td><td>.959 ± .004</td><td>.961 ± .002</td><td>.965 ± .006</td><td>.967 ± .003</td><td>.964 ± .003</td><td>.966 ± .006</td></tr><tr><td>mfeat-zernike</td><td>.970 ± .004</td><td>.973 ± .004</td><td>.992 ± .003</td><td>.982 ± .001</td><td>.974 ± .003</td><td>.977 ± .003</td></tr><tr><td>cmc</td><td>.751 ± .036</td><td>.758 ± .036</td><td>.690 ± .020</td><td>.736 ± .031</td><td>.758 ± .037</td><td>.739 ± .026</td></tr><tr><td>credit-approval</td><td>.931 ± .030</td><td>.920 ± .022</td><td>.912 ± .024</td><td>.928 ± .029</td><td>.931 ± .030</td><td>.930 ± .022</td></tr><tr><td>credit-g</td><td>.809 ± .018</td><td>.824 ± .028</td><td>.816 ± .020</td><td>.835 ± .018</td><td>.816 ± .025</td><td>.846 ± .024</td></tr><tr><td>diabetes</td><td>.821 ± .027</td><td>.812 ± .037</td><td>.811 ± .050</td><td>.817 ± .026</td><td>.827 ± .025</td><td>.824 ± .016</td></tr><tr><td>tic-tac-toe</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>.993 ± .003</td><td>1 ± 0</td><td>.997 ± .002</td></tr><tr><td>vehicle</td><td>.936 ± .009</td><td>.945 ± .008</td><td>.965 ± .011</td><td>.965 ± .005</td><td>.941 ± .008</td><td>.961 ± .008</td></tr><tr><td>eucalyptus</td><td>.900 ± .022</td><td>.894 ± .024</td><td>.874 ± .009</td><td>.908 ± .013</td><td>.905 ± .019</td><td>.912 ± .017</td></tr><tr><td>analcatdata_authorship</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td></tr><tr><td>pc4</td><td>.953 ± .008</td><td>.954 ± .012</td><td>.907 ± .058</td><td>.957 ± .013</td><td>.961 ± .011</td><td>.964 ± .016</td></tr><tr><td>pc3</td><td>.814 ± .031</td><td>.831 ± .048</td><td>.706 ± .055</td><td>.848 ± .044</td><td>.829 ± .042</td><td>.865 ± .032</td></tr><tr><td>kc2</td><td>.887 ± .060</td><td>.862 ± .102</td><td>.881 ± .052</td><td>.875 ± .079</td><td>.885 ± .084</td><td>.896 ± .087</td></tr><tr><td>blood-transfusion-service-center</td><td>.740 ± .085</td><td>.722 ± .068</td><td>.705 ± .075</td><td>.750 ± .082</td><td>.732 ± .077</td><td>.751 ± .086</td></tr><tr><td>cnae-9</td><td>.981 ± .005</td><td>.994 ± .005</td><td>.998 ± .001</td><td>.812 ± .032</td><td>.991 ± .005</td><td>.901 ± .014</td></tr><tr><td>ilpd</td><td>.767 ± .067</td><td>.751 ± .038</td><td>.628 ± .085</td><td>.792 ± .046</td><td>.787 ± .059</td><td>.808 ± .035</td></tr><tr><td>wdbc</td><td>.993 ± .006</td><td>.989 ± .007</td><td>.998 ± .003</td><td>.997 ± .003</td><td>.993 ± .003</td><td>.997 ± .004</td></tr><tr><td>dresses-sales</td><td>.685 ± .028</td><td>.618 ± .045</td><td>.669 ± .027</td><td>.552 ± .056</td><td>.637 ± .051</td><td>.617 ± .049</td></tr><tr><td>MiceProtein</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td></tr><tr><td>steel-plates-fault</td><td>.975 ± .003</td><td>.979 ± .003</td><td>.964 ± .006</td><td>.970 ± .005</td><td>.978 ± .003</td><td>.969 ± .006</td></tr><tr><td>climate-model-simulation-crashes</td><td>.944 ± .043</td><td>.936 ± .052</td><td>.951 ± .070</td><td>.960 ± .053</td><td>.949 ± .044</td><td>.960 ± .058</td></tr><tr><td>balance-scale</td><td>.970 ± .027</td><td>.998 ± .003</td><td>.994 ± .006</td><td>.997 ± .004</td><td>.949 ± .014</td><td>.998 ± .003</td></tr><tr><td>mfeat-factors</td><td>.999 ± .001</td><td>.999 ± .001</td><td>.999 ± .001</td><td>.999 ± .001</td><td>.999 ± 0</td><td>.999 ± .001</td></tr><tr><td>vowel</td><td>.999 ± .001</td><td>.999 ± .001</td><td>.999 ± .001</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td></tr><tr><td>analcatdata_dmft</td><td>.595 ± .032</td><td>.597 ± .029</td><td>.601 ± .033</td><td>.577 ± .044</td><td>.582 ± .027</td><td>.593 ± .040</td></tr><tr><td>pc1</td><td>.901 ± .065</td><td>.917 ± .063</td><td>.802 ± .127</td><td>.917 ± .059</td><td>.916 ± .058</td><td>.942 ± .041</td></tr><tr><td>banknote-authentication</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td></tr><tr><td>qsar-biodeg</td><td>.934 ± .015</td><td>.925 ± .012</td><td>.932 ± .017</td><td>.944 ± .016</td><td>.935 ± .017</td><td>.944 ± .013</td></tr><tr><td>semeion</td><td>.998 ± .001</td><td>.999 ± .001</td><td>.999 ± 0</td><td>.984 ± .004</td><td>.999 ± .001</td><td>.980 ± .004</td></tr><tr><td>cylinder-bands</td><td>.898 ± .041</td><td>.873 ± .036</td><td>.913 ± .035</td><td>.911 ± .021</td><td>.904 ± .044</td><td>.913 ± .031</td></tr><tr><td>car</td><td>1 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>.999 ± .001</td><td>1 ± 0</td><td>.997 ± .005</td></tr><tr><td>mfeat-pixel</td><td>.999 ± 0</td><td>1 ± 0</td><td>1 ± 0</td><td>.999 ± 0</td><td>1 ± 0</td><td>.999 ± 0</td></tr></table>
|
ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d873cde2e5f485cd0dce09da5717c5351c4a6653f09e5e835fcac6d627a57269
|
| 3 |
+
size 705988
|
ICML/2025/Zero-shot Meta-learning for Tabular Prediction Tasks with Adversarially Pre-trained Transformer/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee1fa0c19d5a0d5bfc0fcf26bdd67aa0ed14c81038687c0f7bbfd24972225070
|
| 3 |
+
size 525287
|
ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/607f6af7-947e-47ac-8e4d-68bda5925360_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:deaf3d5130c54c56c0b482f41e9d59ae251539c37722567da6c374764c737892
|
| 3 |
+
size 128152
|
ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/607f6af7-947e-47ac-8e4d-68bda5925360_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6232ec0ecaacd2e5ba0a0f7a0a9b4f0dca6fe2c2a8e1c922f2819eda64ec3088
|
| 3 |
+
size 162171
|
ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/607f6af7-947e-47ac-8e4d-68bda5925360_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eedc6a5121f1b70a55b96ab918cd444f5cfa0b77b004285fe1e49e79a3d56e7c
|
| 3 |
+
size 5663905
|
ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/full.md
ADDED
|
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ZeroFlow: Overcoming Catastrophic Forgetting is Easier than You Think
|
| 2 |
+
|
| 3 |
+
Tao Feng<sup>1</sup> Wei Li<sup>1*</sup> Didi Zhu<sup>2</sup> Hangjie Yuan<sup>2</sup> Wendi Zheng<sup>1</sup> Dan Zhang<sup>1</sup> Jie Tang<sup>1</sup> https://zeroflow-bench.github.io/
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Backpropagation provides a generalized configuration for overcoming catastrophic forgetting. Optimizers such as SGD and Adam are commonly used for weight updates in continual learning and continual pre-training. However, access to gradient information is not always feasible in practice due to black-box APIs, hardware constraints, or non-differentiable systems, a challenge we refer to as the gradient bans. To bridge this gap, we introduce ZeroFlow, the first benchmark designed to evaluate gradient-free optimization algorithms for overcoming forgetting. ZeroFlow examines a suite of forward pass-based methods across various algorithms, forgetting scenarios, and datasets. Our results show that forward passes alone can be sufficient to mitigate forgetting. We uncover novel optimization principles that highlight the potential of forward pass-based methods in mitigating forgetting, managing task conflicts, and reducing memory demands. Additionally, we propose new enhancements that further improve forgetting resistance using only forward passes. This work provides essential tools and insights to advance the development of forward-pass-based methods for continual learning.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Catastrophic forgetting remains one of the major challenges on the path to artificial general intelligence (AGI) (Hadsell et al., 2020; Zhou et al., 2023b), i.e., models tend to forget previously learned tasks when trained on new ones on time-evolving data flow (Feng et al., 2022b). This phenomenon is commonly seen across various tasks, including continual learning (CL) (Wang et al., 2023), fine-tuning of foundation models (FMs) (Sun et al., 2025; Yuan et al., 2024), and
|
| 12 |
+
|
| 13 |
+
*Core contribution 1Tsinghua University 2Zhejiang University. Correspondence to: Jie Tang <jietang@tsinghua.edu.cn>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Illustrations of ZeroFlow. New tasks (or downstream tasks) arrive sequentially, the gradient bans block the model from learning and memorizing using backpropagation. ZeroFlow overcome this issue via forward passes.
|
| 19 |
+
|
| 20 |
+
continual pre-training (CPT) (Shi et al., 2024; Zhu et al., 2024b), etc. Among them, optimization algorithms play a crucial role, e.g., SGD has become the default choice during CL (van de Ven et al., 2022), while Adam is frequently seen in fine-tuning FMs (Luo et al., 2023; Zhu et al., 2024a). These optimization algorithms in tandem with various methods (ranging from regularization and rehearsal strategies to architectural changes) rely on gradient information to avoid forgetting (Zhou et al., 2023c; Bian et al., 2024). Nonetheless, in real-world scenarios, gradient information is not always available or computable (i.e., the gradient bans), like, Scenario i: large language models as a service (LLMaaS) and black-box APIs. Scenario ii: hardware systems that do not support principled backpropagation. Scenario iii: AI for science with non-differentiable underlying systems.
|
| 21 |
+
|
| 22 |
+
In other words, Scenario $i$ implies that pretrained models are monetized (Miura et al., 2024) (model owners do not publicly release their pretrained models but instead the service), i.e., only the inputs and outputs are accessible (Gan et al., 2023; Sun et al., 2022). Scenarios ii/iii implies that the limitations prevent or restrict the execution of backpropagation (Lillicrap et al., 2020), i.e., extremely high memory demands (Mangrulkar et al., 2022), unsupported systems and hardware (Jabri & Flower, 1992), or non-differentiable functions, etc (Tavanaei et al., 2019; Gu et al., 2021). The above means that typical methods for overcoming forgetting are not available because backpropagation is banned, as Figure 1. This yields the primary question to be explored,
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
(a) EASE on average accuracy
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
(b) EASE on forgetting
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
(c) APER on average accuracy
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
(d) APER on forgetting
|
| 35 |
+
Figure 2: ZeroFlow Evaluation Results of Catastrophic Forgetting. We visualize the evaluation results of 2 models (EASE (Zhou et al., 2024b) and APER (Zhou et al., 2023a)) in several ZeroFlow dimensions (average accuracy over all tasks and a forgetting metric). For comprehensive numerical results, please refer to Table 1.
|
| 36 |
+
|
| 37 |
+
(Q) Could we establish a benchmark under gradient bans for overcoming catastrophic forgetting, and explore the overlooked optimization principles?
|
| 38 |
+
|
| 39 |
+
To tackle $(Q)$ , a natural idea is to use the forward pass-based method (Hinton, 2022; Baydin et al., 2022; Ren et al., 2022) instead of backpropagation to overcome forgetting. The zeroth-order (ZO) optimization methods (Flaxman et al., 2004; Nesterov & Spokoiny, 2017; Malladi et al., 2023; Ghadimi & Lan, 2013), as representative methods, are well-suited to this issue due to their relaxed information requirements, as they rely only on function values rather than gradients. Under gradient bans, DECL and DFCL (Yang et al., 2024) first attempt to overcome forgetting from a stream of APIs, but they focus on synthetic data level rather than optimization. Therefore, it remains elusive whether benchmark studies using gradient-free methods can mitigate forgetting.
|
| 40 |
+
|
| 41 |
+
In this work, we explore several Zeroth-order optimization methods on dynamic data Flow (as shown in Figure 1), examining their performance across various forgetting scenarios, model types, and evaluation metrics. Through a detailed analysis, we reveal the overlooked potential of forward passes and various ZO methods in overcoming catastrophic forgetting. This benchmark study offers an easier way to overcome forgetting and helps reveal the pros and cons of these methods in alleviating forgetting. Extended from the gained insights, we introduce three new enhancement variants that further improve ZO optimization to overcome catastrophic forgetting. Simply put, we can mitigate forgetting more effectively and efficiently using only forward passes.
|
| 42 |
+
|
| 43 |
+
Our rationale for choosing the ZO optimization algorithms to overcome forgetting for the following two key considerations: (i) implementation cost minimization, that is, we expect minimal modifications to existing optimizers. (ii) theory of diversity, that is, we expect to cover diverse optimization methods. These considerations ensure that our benchmark is comprehensive and simplified. And, an ap
|
| 44 |
+
|
| 45 |
+
pealing property is that we need only forward passes to be enough to overcome forgetting. Maybe, once is all it takes!
|
| 46 |
+
|
| 47 |
+
To sum up, our contributions are listed below,
|
| 48 |
+
|
| 49 |
+
(i) We propose the first benchmark ZeroFlow for overcoming forgetting under gradient bans. This benchmark includes our investigations into 7 forward pass optimization algorithms, several forgetting scenarios and datasets with varying complexity, and task sequences (as Figure 2).
|
| 50 |
+
(ii) Through this benchmark, we uncover overlooked optimization principles and insights into how forward passes can mitigate forgetting. These include the role of forward passes in managing task conflicts and the trade-offs between forgetting and memory efficiency. We proved that catastrophic forgetting can be overcome in an easier way!
|
| 51 |
+
(iii) Apart from a comprehensive evaluation of catastrophic forgetting, we introduce three enhancement techniques, which further improve the performance and efficiency of just forward passes to overcome forgetting.
|
| 52 |
+
|
| 53 |
+
# 2. Literatures
|
| 54 |
+
|
| 55 |
+
Catastrophic forgetting. Catastrophic forgetting occurs across various tasks, including CL, fine-tuning of FMs, and CPT (Zhou et al., 2023b; Wang et al., 2023; Zhuang et al., 2022a; Luo et al., 2023). To mitigate this issue, various methods have been proposed (Aojun et al., 2025; Jeeveswaran et al., 2023; Sun et al., 2023b; Li et al., 2024). In CL, methods range from regularization and rehearsal strategies to architectural changes (Zhuang et al., 2023; Bian et al., 2024; Lu et al., 2024). Lately, pre-trained models (PTM) further advanced these methods due to their strong generalization (Yuan et al., 2022; Feng et al., 2022a), as seen in PTM-based CL (Zhou et al., 2024a). All these methods share a common goal: achieving an optimal balance between learning plasticity and memory stability (Wang et al., 2023). In FMs, catastrophic forgetting often arises from overfitting to small fine-tuning datasets during CPT or
|
| 56 |
+
|
| 57 |
+
fine-tuning (Luo et al., 2023; Zhu et al., 2024a). Common techniques to address this include learning rate adjustment, parameter-efficient fine-tuning, mixed data strategies, and instruction tuning (Luo et al., 2023; Zhang et al., 2025). Additionally, as foundational models increasingly gain multimodal capabilities, the complexity of catastrophic forgetting also intensifies (Zhao et al., 2024a; Zhu et al., 2024a).
|
| 58 |
+
|
| 59 |
+
Optimization for catastrophic forgetting. Two broad categories of optimization methods exist for overcoming forgetting, (i) Standard Optimization. SGD and the Adam family are frequently employed to investigate catastrophic forgetting (Hadsell et al., 2020; Masana et al., 2022). For instance, in CL, various CL methods predominantly utilize the SGD optimizer for standard evaluations (van de Ven et al., 2022; Sun et al., 2023a; Zhou et al., 2024c). In fine-tuning the LLM, the Adam series is commonly used to observe forgetting phenomena (Luo et al., 2023; Zhu et al., 2024a). Some works explored orthogonal spaces with these standard optimizers to alleviate forgetting (Lopez-Paz & Ranzato, 2017; Feng et al., 2022c; Saha et al., 2020), such as OGD (Farajtabar et al., 2020), and GPM (Saha et al., 2020). Moreover, other works (Farajtabar et al., 2020; Chaudhry et al., 2018; Lopez-Paz & Ranzato, 2017) modified the gradients in the standard optimization process to align the learning spaces of new and old tasks, such as Uni-Grad (Li et al., 2024). The core of these efforts (Deng et al., 2021; Shi et al., 2021) is to find an equilibrium between learning and forgetting in optimization. (ii) Sharpness-aware Optimization. This series of methods (He et al., 2019; Foret et al., 2020; Zhong et al., 2022; Zhuang et al., 2022b) has gained attention due to the effectiveness of the flat minimum in mitigating forgetting (Li et al., 2024; Kong et al., 2023; Cha et al., 2021; Mehta et al., 2023). Methods such as FS-DPGM (Deng et al., 2021), F2M (Shi et al., 2021), DFGP (Yang et al., 2023), SAM-CL (Tung et al., 2023) overcome forgetting in the flatness areas of different configurations. C-Flat (Bian et al., 2024) proposed a CL-friendly general optimization framework, that holds promise as a baseline optimizer for overcoming forgetting.
|
| 60 |
+
|
| 61 |
+
Our work. The works mentioned above are all rooted in a gradient feedback mechanism. Such mechanisms are powerless against catastrophic forgetting without explicit gradient information. Our work overcomes forgetting only via forward pass instead of gradient feedback.
|
| 62 |
+
|
| 63 |
+
# 3. Exploring Zeroth-Order Optimization to Overcome Forgetting
|
| 64 |
+
|
| 65 |
+
# C.1. Zeroth-Order Optimization
|
| 66 |
+
|
| 67 |
+
Zeroth-order (ZO) optimization has been extensively studied over the years within the realms of numerical computation and approximation algorithms. It functions as an alterna-
|
| 68 |
+
|
| 69 |
+
# Algorithm 1 Genetic formulation of ZO optimization
|
| 70 |
+
|
| 71 |
+
Require: Initialized model parameters $\theta_0\in \Theta \subseteq \mathbb{R}^d$ training dataset $\mathcal{D} = \{(x_i,y_i)\}_{i = 1}^m\in \mathcal{X}\times \mathcal{Y}$ , empirical loss function $\mathcal{L}$ , learning rate $\eta_t$ , gradient perturbation vector $\xi$ , and descent direction computation $\phi (\cdot)$
|
| 72 |
+
|
| 73 |
+
1: while $\theta_t$ not converged do
|
| 74 |
+
|
| 75 |
+
2: Sample mini-batch $\mathcal{B}$ from $\mathcal{D}$
|
| 76 |
+
|
| 77 |
+
3: Step 1. ZO gradient estimation:
|
| 78 |
+
|
| 79 |
+
4: $\hat{\mathbf{g}}_t = \hat{\nabla}\mathcal{L}(\theta ,\xi ;\mathcal{B})$
|
| 80 |
+
|
| 81 |
+
5: Step 2. Descent direction computation:
|
| 82 |
+
|
| 83 |
+
6: $\mathbf{h}_t = \phi (\{\hat{\mathbf{g}}_i\}_{i = 1}^t)$
|
| 84 |
+
|
| 85 |
+
7: Step 3. Parameter updating:
|
| 86 |
+
|
| 87 |
+
8: $\theta_{t + 1} = \theta_t - \eta_t\cdot \mathbf{h}_t$
|
| 88 |
+
|
| 89 |
+
9: $t = t + 1$
|
| 90 |
+
|
| 91 |
+
10: end while
|
| 92 |
+
|
| 93 |
+
Ensure: Updated model $\theta_t$
|
| 94 |
+
|
| 95 |
+
tive solution for estimating descent directions in scenarios where first-order (FO) gradients are either inaccessible or infeasible to compute. Considering a deep learning model parameterized with $\theta \in \Theta \subseteq \mathbb{R}^d$ , and given a mini-batch $\mathcal{B}$ extracted from the training dataset $D = \{(x_i, y_i)\}_{i=1}^m$ . Let $L(\theta; \mathcal{B})$ denote the empirical loss, then the genetic formulation of ZO optimization follows Algorithm 1.
|
| 96 |
+
|
| 97 |
+
1) ZO gradient estimation. Randomized Gradient Estimation (RGE (Nesterov & Spokoiny, 2017)) and Coordinate-wise Gradient Estimation (CGE (Berahas et al., 2022)) perturb the model using $\xi$ , which is generated either from a random unknown distribution (in RGE) or by modifying individual coordinates (in CGE), and then observe the changes in the loss function $\mathcal{L}$ after each perturbation, step by step, to provide a reliable gradient estimate. However, due to their reliance on slow single-direction perturbation, these methods are not well-suited for deep learning tasks, as performing a full perturbation in high-dimensional parameter spaces is time-consuming. For instance, typical vision models like ResNet trained on ImageNet have over 25 million parameters. Performing per-dimension perturbations over such a large parameter space renders ZO-based querying highly inefficient. Standard Simultaneous Perturbation Stochastic Approximation (SPSA(Spall, 1992)) improves efficiency by generating pairs of symmetric vectors and perturbing in multiple directions simultaneously, as follows,
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\hat {\nabla} L (\theta , \xi ; \mathcal {B}) = \frac {L (\theta + \epsilon \xi ; \mathcal {B}) - L (\theta - \epsilon \xi ; \mathcal {B})}{2 \epsilon} \xi^ {- 1}. \tag {1}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
Where $\epsilon$ is a positive scalar and $\xi$ is recommended to follow a symmetric distribution with finite inverse moments (e.g., the Rademacher distribution). The symmetric distribution ensures unbiased exploration of perturbations in both positive and negative directions of parameters at each step. And the finite inverse moments property guarantees that the steps
|
| 104 |
+
|
| 105 |
+
are well-controlled, avoiding excessively large steps due to $\xi^{-1}$ drawn from the distribution (e.g., $\mathbb{E}[1 / |\xi|^p]$ for some large p), which would otherwise lead to an unstable optimization process. In practical implementations for models with a large number of parameters (e.g., MeZO (Malladi et al., 2023) in LLMs (Zhao et al., 2024b)), Gaussian noise with zero mean induces substantial perturbations, thereby enhancing exploration across the parameter space and facilitating the escape from local minima. This methodology achieves gradient estimation with only two objective function evaluations, rendering its computational cost independent of input dimensionality. Such computational efficiency has established SPSA as a preferred method for addressing the complexities of high-dimensional deep learning tasks. While increasing $q$ in $q$ -SPSA can improve stability in the update direction, setting $q = 1$ is sufficient for pretrained LLMs (Malladi et al., 2023).
|
| 106 |
+
|
| 107 |
+
2) Descent direction computation. In unconstrained optimization for deep learning, the last gradients $h_t$ generally coincide with the estimated ZO gradients $\hat{g}_t$ (e.g., ZOSGD (Ghadimi & Lan, 2013), ZO-SCD (Lian et al., 2016)). To reduce approximation errors, ZO-SGD-Sign (Liu et al., 2019) applies an element-wise sign(·) operation. Additionally, ZO-SVRG (Liu et al., 2018), inspired by variance reduction methods in first-order optimization, adjusts the update step by using estimated gradients from previous training examples. CARS (Kim et al., 2021) adaptively selects the smallest function value in each iteration, which helps maintain monotonicity during optimization.
|
| 108 |
+
|
| 109 |
+
3) Parameter updating. Normally, for most ZO methods, parameters are updated in a similar way with FO optimizers, and the learning rate $\eta_t$ is set to constant. Except for the special design for achieving some constraint prerequisites, several methods make an effort to strike a balance between converge speed and accuracy. ZO-AdaMM (Chen et al., 2019) uses an adaptive learning rate and refines gradient estimation by incorporating momentum from past information. This approach is particularly effective in handling complex and evolving optimization landscapes, where the function's behavior may vary over time or be hard to capture with straightforward gradient approximations.
|
| 110 |
+
|
| 111 |
+
# C.2. Zeroth-Order Optimization for Catastrophic Forgetting
|
| 112 |
+
|
| 113 |
+
Rationality. ZO optimization leverages the function values of the forward passes to approximate FO gradients, making it feasible to avoid gradient bans. This feature enables seamless integration into common forgetting scenarios, such as CL. We explore it in the following three categories.
|
| 114 |
+
|
| 115 |
+
i) Memory-based methods maintain a repository of exemplars from previous tasks and dynamically adjust the overall loss function by combining these stored samples with new
|
| 116 |
+
|
| 117 |
+

|
| 118 |
+
(a) FO-Adam
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
(b) ZO-Adam
|
| 122 |
+
Figure 3: Trajectory of FO and ZO Optimization during Overcoming Forgetting. The trajectory is taken when using the total loss from both tasks (cyan) and the gradients from each individual task at fixed points during optimization (red and orange). The trends of ZO optimization hold the potential to manage forgetting and learning.
|
| 123 |
+
|
| 124 |
+
data based on learning progress.
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\mathcal {L} _ {\text {t o t a l}} = \frac {1}{N _ {\text {c o n t e x t}}} \mathcal {L} _ {\text {c u r}} + \left(1 - \frac {1}{N _ {\text {c o n t e x t}}}\right) \mathcal {L} _ {\text {r e p l a y}}, \tag {2}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
where $N_{\text{context}}$ represents the number of contexts encountered so far. In Experience Replay (Rolnick et al., 2019), both components use classification loss based on their respective data distributions, so ZO gradients can be expressed as $\hat{\nabla}\mathcal{L}_{cur}$ and $\hat{\nabla}\mathcal{L}_{repl}$ respectively. However, in the emerging generative replay workflows (Shin et al., 2017), Equation (2) may introduce additional loss for the training of generators. In this case, the generator can be trained using standard backpropagation or in conjunction with ZO training without FO gradients.
|
| 131 |
+
|
| 132 |
+
ii) Extension-based methods can be divided into fixed and dynamic architectures. Fixed architectures separate model parameters for specialized context learning, while dynamic architectures expand the model size during adaptation. Both approaches mitigate forgetting from the model's perspective and enable model-agnostic ZO solutions.
|
| 133 |
+
iii) Regularization-based methods penalize significant changes to parameters important for old tasks or maintain the output distribution with respect to previous inputs. The template loss function is given by
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {c u r}} + \alpha \mathcal {L} _ {\text {r e g}}, \tag {3}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
where $\alpha$ is a coefficient hyperparameter. The FO gradients from dual objectives (Lcur for adaptation and Lreg for preservation) drive optimization toward their respective optima, achieving inter-task equilibrium. Notably, ZO gradient estimates, though obtained in a noisy environment, exhibit comparable optimization behavior.
|
| 140 |
+
|
| 141 |
+
As shown in Figure 3, we visualize and compare the optimization trajectories of ZO and FO methods under the learning-memory trade-off dynamics in continual learning. The objective is defined over two-dimensional parameters, with axes specified in Appendix A.2. The striking similarity
|
| 142 |
+
|
| 143 |
+
Table 1: ZeroFlow Evaluation on CIFAR-100, ImageNet-A, CUB and OmniBenchmark. This table compares average accuracy, final accuracy, and forgetting measures of 2 models, and 4 forgetting scenarios. More intuitive trend please see Figure 2. All ZO optimizations use a query budget of $q = 1$ . Bold indicates the best accuracy achieved among ZeroFlow.
|
| 144 |
+
|
| 145 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Optimizer</td><td rowspan="2">Strategy</td><td colspan="3">CIFAR-100</td><td colspan="3">CUB</td><td colspan="3">ImageNet-A</td><td colspan="3">OmniBenchmark</td></tr><tr><td>Avg</td><td>Last</td><td>Fgt</td><td>Avg</td><td>Last</td><td>Fgt</td><td>Avg</td><td>Last</td><td>Fgt</td><td>Avg</td><td>Last</td><td>Fgt</td></tr><tr><td rowspan="9">EASE</td><td rowspan="4">SGD</td><td>FO</td><td>91.23</td><td>85.96</td><td>7.32</td><td>89.31</td><td>83.76</td><td>9.61</td><td>61.24</td><td>51.02</td><td>10.84</td><td>74.73</td><td>67.40</td><td>15.11</td></tr><tr><td>ZO</td><td>78.62</td><td>68.40</td><td>15.64</td><td>88.94</td><td>82.91</td><td>8.08</td><td>57.87</td><td>48.32</td><td>11.08</td><td>73.50</td><td>66.60</td><td>17.78</td></tr><tr><td>Sign</td><td>83.21</td><td>75.88</td><td>10.58</td><td>89.81</td><td>84.61</td><td>8.10</td><td>59.15</td><td>49.31</td><td>11.77</td><td>73.81</td><td>66.75</td><td>17.21</td></tr><tr><td>Conserve</td><td>82.22</td><td>75.88</td><td>8.93</td><td>89.21</td><td>83.42</td><td>10.31</td><td>58.61</td><td>48.58</td><td>12.41</td><td>77.07</td><td>70.73</td><td>14.87</td></tr><tr><td rowspan="4">Adam</td><td>FO</td><td>90.56</td><td>84.82</td><td>7.69</td><td>84.44</td><td>77.10</td><td>10.51</td><td>59.60</td><td>47.20</td><td>19.08</td><td>74.27</td><td>66.28</td><td>15.63</td></tr><tr><td>ZO</td><td>83.36</td><td>76.09</td><td>10.16</td><td>89.49</td><td>84.14</td><td>8.67</td><td>58.90</td><td>48.72</td><td>12.35</td><td>76.15</td><td>69.69</td><td>15.87</td></tr><tr><td>Sign</td><td>83.14</td><td>76.01</td><td>10.44</td><td>89.82</td><td>84.65</td><td>8.21</td><td>58.97</td><td>48.85</td><td>12.20</td><td>77.12</td><td>71.08</td><td>14.68</td></tr><tr><td>Conserve</td><td>82.15</td><td>75.65</td><td>9.24</td><td>89.82</td><td>84.61</td><td>8.40</td><td>59.23</td><td>48.85</td><td>12.81</td><td>77.19</td><td>70.99</td><td>14.68</td></tr><tr><td>-</td><td>Forward</td><td>82.26</td><td>76.05</td><td>8.74</td><td>89.26</td><td>83.67</td><td>9.35</td><td>57.76</td><td>48.19</td><td>11.03</td><td>77.00</td><td>70.74</td><td>14.99</td></tr><tr><td rowspan="9">APER</td><td rowspan="4">SGD</td><td>FO</td><td>82.31</td><td>76.21</td><td>7.33</td><td>90.56</td><td>85.16</td><td>5.19</td><td>59.50</td><td>49.37</td><td>9.91</td><td>78.61</td><td>72.21</td><td>7.87</td></tr><tr><td>ZO</td><td>82.33</td><td>76.21</td><td>7.36</td><td>90.53</td><td>85.20</td><td>5.12</td><td>59.58</td><td>49.51</td><td>10.02</td><td>78.60</td><td>72.21</td><td>7.85</td></tr><tr><td>Sign</td><td>82.32</td><td>76.23</td><td>7.32</td><td>90.42</td><td>85.28</td><td>4.96</td><td>59.65</td><td>49.77</td><td>9.89</td><td>78.60</td><td>72.26</td><td>7.78</td></tr><tr><td>Conserve</td><td>82.31</td><td>76.21</td><td>7.33</td><td>90.62</td><td>85.28</td><td>5.05</td><td>59.68</td><td>49.70</td><td>10.18</td><td>78.61</td><td>72.21</td><td>7.87</td></tr><tr><td rowspan="4">Adam</td><td>FO</td><td>82.31</td><td>76.21</td><td>7.33</td><td>90.56</td><td>85.16</td><td>5.19</td><td>59.60</td><td>49.77</td><td>10.06</td><td>76.60</td><td>72.21</td><td>7.85</td></tr><tr><td>ZO</td><td>82.12</td><td>75.45</td><td>7.47</td><td>90.33</td><td>84.31</td><td>6.01</td><td>58.89</td><td>49.24</td><td>9.32</td><td>78.44</td><td>72.10</td><td>7.87</td></tr><tr><td>Sign</td><td>82.01</td><td>75.60</td><td>7.38</td><td>89.86</td><td>84.18</td><td>5.99</td><td>57.82</td><td>48.12</td><td>9.72</td><td>78.26</td><td>72.05</td><td>7.75</td></tr><tr><td>Conserve</td><td>82.21</td><td>75.98</td><td>7.34</td><td>89.96</td><td>84.48</td><td>5.90</td><td>57.86</td><td>47.53</td><td>10.00</td><td>78.61</td><td>72.21</td><td>7.87</td></tr><tr><td>-</td><td>Forward</td><td>82.32</td><td>76.22</td><td>7.32</td><td>89.47</td><td>83.38</td><td>6.24</td><td>58.25</td><td>47.99</td><td>9.62</td><td>77.61</td><td>71.45</td><td>7.87</td></tr></table>
|
| 146 |
+
|
| 147 |
+
between the two trajectories highlights the potential of ZO optimization in effectively balancing learning and forgetting, thereby motivating our further investigation.
|
| 148 |
+
|
| 149 |
+
Potential. The intrinsic optimization mechanism of ZO exhibits particular promise in continual learning scenarios. Intuitively, ZO perturbs parameters using random or coordinate-wise directional vectors and observes changes in the evaluation function, effectively optimizing within a noisy environment. This approach enables small parameter modifications to yield significant impacts on target objectives, resulting in distinctive gradient estimations compared to FO optimization. Notably, while ZO methods do not explicitly incorporate sharpness regularization terms, they naturally facilitate the exploration of flat regions in parameter space. The influence of optimizing flat regions with ZO approaches in continual learning can be summarized in two main manifolds: (i) For previous tasks, the noise-induced parameter robustness enhances resilience against perturbations from new task adaptation; (ii) For new tasks, empirical evidence suggests that convergence to flat minima generally leads to lower generalization error.
|
| 150 |
+
|
| 151 |
+
Risk. Although ZO demonstrates superior generalization abilities, its practical performance is limited by optimization strategies and the complexity of the optimization setting. Despite significant efforts to reduce convergence error, optimizing models from scratch in high-dimensional space remains challenging due to slow convergence speed (proportional to the parameter dimension $d$ ). For instance, origin CGE-based ZO training for a model with 12k parameters
|
| 152 |
+
|
| 153 |
+
takes 70.32 hours in DeepZero (Chen et al., 2023). Such computational demands render from scratch training impractical for high-dimensional CL models, particularly those employing expansion-based architectures. Consequently, we focus our discussion on leveraging ZO optimization to overcome forgetting within a pre-training context.
|
| 154 |
+
|
| 155 |
+
# 4. ZeroFlow Benchmark
|
| 156 |
+
|
| 157 |
+
This section delves into the empirical performance of ZO optimization in overcoming catastrophic forgetting. Our ZeroFlow benchmark evaluates average performance across incremental stages, final-stage accuracy, forgetting, and efficiency, while accounting for dataset complexity and model diversity.
|
| 158 |
+
|
| 159 |
+
# D.1. Benchmark Setups
|
| 160 |
+
|
| 161 |
+
Forgetting scenarios, schemes, and models. We conduct evaluations under a standard catastrophic forgetting setting, namely class incremental learning. For this purpose, we investigate two state-of-the-art schemes: EASE and APER. Both models are initialized with ViT-B/16 pretrained on ImageNet-1K (IN1K), and are subsequently fine-tuned on four downstream tasks of varying complexity—ranging from standard benchmarks such as CIFAR-100 and CUB, to more challenging datasets like ImageNet-A and OmniBenchmark, which exhibit a large domain gap from the pretraining distribution (Zhou et al., 2024a;c). Following (Zhou et al., 2023a), each dataset is evenly split into 10
|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
(a) FO-Adam
|
| 175 |
+
(f) FO-SGD
|
| 176 |
+
Figure 4: The Trajectory of Different Optimization during Overcoming Forgetting. $\downarrow$ , $\downarrow$ , and $\star$ denote the minima for the new, old, and both tasks, respectively. The trajectory is taken when using the total loss from both tasks (cyan).
|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
(b) ZO-Adam
|
| 180 |
+
(g) ZO-SGD
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
(c) ZO-Adam $(q = 4)$
|
| 184 |
+
(h) ZO-SGD $(q = 4)$
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
(d) ZO-Adam-Sign
|
| 188 |
+
(i) ZO-SGD-Sign
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
(e) ZO-Adam-Conserve
|
| 192 |
+
(j) ZO-SGD-Conserve
|
| 193 |
+
|
| 194 |
+
incremental tasks by class. For instance, OmniBenchmark contains 300 classes, with 30 classes introduced at each stage. No memory is permitted for storing past examples.
|
| 195 |
+
|
| 196 |
+
Benchmark setup and details. To evaluate the application of ZeroFlow in forgetting scenarios, we include the methods described in Section C.1, specifically ZO (Ghadimi & Lan, 2013), Sign (Liu et al., 2019), and Conserve (Kim et al., 2021; Zhang et al., 2024), in comparison with their FO counterparts using SGD and Adam optimizers (Chen et al., 2019). Additionally, as highlighted in (Zhang et al., 2024), Forward-Grad (Baydin et al., 2022) which relies on forward mode automatic differentiation, potentially becomes a missing but competitive forward pass baseline. In a nutshell, ZeroFlow covers 7 forward pass-based methods: ZO-SGD, ZO-SGD-Sign, ZO-SGD-Conserve, ZO-Adam, ZO-Adam-Sign, ZO-Adam-Conserve, Forward-Grad. Unless otherwise specified, the query budget is fixed to 1 for efficiency. Notably, here we consider generating one set of perturbation vectors for the entire model as one query. In other words, we usually require 2 forward propagations for two-point finite difference gradient estimations.
|
| 197 |
+
|
| 198 |
+
Evaluation metrics. Overall, we adopt two categories of evaluation metrics in ZeroFlow: accuracy and efficiency. The accuracy metrics include average accuracy across all tasks, final-task accuracy, and a forgetting score (BWT in Appendix B.5). The efficiency metrics encompass memory usage (GPU), query budget, and runtime. Together, these metrics provide insights into the resource demands of ZO optimization for mitigating forgetting.
|
| 199 |
+
|
| 200 |
+
# D.2. Evaluation Results of ZeroFlow
|
| 201 |
+
|
| 202 |
+
ZeroFlow evaluation on continual learning. In Table 1, we evaluate the performance of different BP-free and BP-
|
| 203 |
+
|
| 204 |
+
based (FO-SGD and FO-Adam) methods in a typical forgetting scenario (continual learning). We use two SOTA models as examples (EASE (Zhou et al., 2024b) and APER (Zhou et al., 2023a)) and investigate SGD and Adam optimizers, 7 forward pass-based methods, and four commonly used datasets. Several observations are listed below,
|
| 205 |
+
|
| 206 |
+
First, the performance of ZO method is comparable to or even surpasses that of the FO method across almost all forgetting metrics and datasets. However, as will be shown later, the FO method requires significantly more memory overhead. This suggests that forward passes alone can effectively mitigate forgetting, and the ZO method offers a simpler, more efficient alternative. In some cases, such as with ZO-Adam and ZO-SGD on OmniBenchmark, ZO methods even outperform FO methods.
|
| 207 |
+
|
| 208 |
+
Second, Forward Grad demonstrates competitive performance when compared to other ZO and FO methods. Unlike typical ZO methods, Forward Grad utilizes a unique forward pass mechanism, making it a promising baseline for future studies. A more intuitive trend in overcoming forgetting refers to Figure 6. These observations motivate further exploration into the effectiveness of ZO method.
|
| 209 |
+
|
| 210 |
+
ZeroFlow helps manage memory and runtime. In Table 2, we compare the efficiency of various ZO and FO optimizers in mitigating catastrophic forgetting, focusing on two key aspects: memory cost (in GB) and runtime cost (in seconds). First, naive ZO optimization reduces memory usage by approximately fivefold compared to FO optimization. Moreover, ZO methods reduce runtime per iteration by around $50\%$ relative to FO, significantly improving their practicality for overcoming forgetting. Notably, we regenerate the perturbation vectors for model parameters iteratively by storing random seeds. This degrades the vec
|
| 211 |
+
|
| 212 |
+
Table 2: Memory Cost (GB) and Runtime Cost (s) of Each Optimizer on 3 Forgetting Scenarios. The per-epoch runtime in seconds (s). ZO-SGD w/ query budget $q = 1,4$ and all other optimizers w/ query budget $q = 1$ .
|
| 213 |
+
|
| 214 |
+
<table><tr><td>Optimizer</td><td colspan="2">Memory ↓ CIFAR-100</td><td>CUB</td><td>ImageNet-A</td></tr><tr><td>FO-SGD</td><td>12.08 GB</td><td>59.3s</td><td>16.1s</td><td>12.2s</td></tr><tr><td>ZO-SGD (q=1)</td><td>2.41 GB</td><td>32.4s</td><td>8.3s</td><td>6.8s</td></tr><tr><td>ZO-SGD (q=4)</td><td>2.41 GB</td><td>111.7s</td><td>28.7s</td><td>18.0s</td></tr><tr><td>ZO-SGD-Sign</td><td>2.41 GB</td><td>32.4s</td><td>8.3s</td><td>6.8s</td></tr><tr><td>ZO-SGD-Conserve</td><td>2.41 GB</td><td>70.1s</td><td>15.7s</td><td>12.4s</td></tr><tr><td>Forward-Grad</td><td>3.94 GB</td><td>45.9s</td><td>11.1s</td><td>9.0s</td></tr></table>
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
Figure 5: Performance Comparison under Different Query Numbers. Both optimizers show improved performance as query numbers increase.
|
| 218 |
+
|
| 219 |
+
tor granularity from full-model to per-layer level, thereby further reducing the memory required for forward evaluations in ZeroFlow, at the cost of additional runtime for regenerating the vectors. Second, the ZO and Sign variants demonstrate comparable efficiency in both memory and runtime. Although increasing the number of queries can impact runtime efficiency, it does not compromise memory advantages. Third, Conserve also demonstrates efficient memory management, although its runtime is approximately twice as long as that of naive ZO. This may partly explain its stronger performance in some scenarios, as shown in Table 1. Finally, the Forward Gradient method requires more memory than other ZO-based approaches because it involves computing gradients via the Jacobian-vector product (JVP), which necessitates storing all intermediate activations during the forward pass. For models like ViT, this includes large attention maps and other intermediate representations. In contrast, naive ZO methods only require two forward passes on perturbed inputs and avoid storing these intermediate values, resulting in much lower memory usage.
|
| 220 |
+
|
| 221 |
+
Trade-off between performance and query number. As shown in Figure 5, we investigate the impact of query numbers on optimization performance, comparing SGD and Adam optimizers in the zeroth-order setting. Both optimizers demonstrate improved performance as query numbers
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
(a) EASE on last accuracy
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
(b) APER on last accuracy
|
| 228 |
+
Figure 6: ZeroFlow Evaluation Results for Forgetting. We visualize the evaluation of 2 models in last-task accuracy.
|
| 229 |
+
|
| 230 |
+
increase across $\{1,2,4,8,16,32\}$ , suggesting that additional function evaluations enable more accurate gradient estimation. The results suggest that in scenarios where function evaluation costs are manageable, higher query numbers can yield substantially better performance, with Adam being particularly effective at leveraging the additional gradient information for enhanced optimization outcomes.
|
| 231 |
+
|
| 232 |
+
# 5. Insights and Discussions
|
| 233 |
+
|
| 234 |
+
As shown in Figure 4, we visualized the optimization trajectories of both forward passes and backpropagation methods. Our analysis reveals several key insights:
|
| 235 |
+
|
| 236 |
+
Convergence behavior across optimizer families. In Figure 4, both FO and ZO methods demonstrate successful convergence to the minima of new and old knowledge spaces, regardless of whether they use Adam or SGD as their base optimizer. This convergence consistency validates our theoretical foundation.
|
| 237 |
+
|
| 238 |
+
Distinct trajectory characteristics of FO and ZO. FO approaches (Figure 4a, 4f) show smoother optimization paths due to their access to exact gradient information. In contrast, ZO methods demonstrate varying degrees of exploration behavior through trajectory jitter. This exploration pattern is particularly pronounced in ZO-Adam variants compared to ZO-SGD variants, indicating that the base optimizer choice significantly influences the exploration-exploitation trade-off during optimization.
|
| 239 |
+
|
| 240 |
+
Path characteristics in ZO optimization. Comparing base ZO methods with their $q = 4$ counterparts (Figure 4b vs 4c, Figure 4g vs 4h), we observe that increasing query numbers leads to smoother trajectories, suggesting that more queries help provide more stable gradient estimates. The Sign variants (Figure 4d, 4i) demonstrate more pronounced oscillations in their trajectories, particularly visible in the ZO-Adam-Sign case. In contrast, the conservative variants (Figure 4e, 4j) maintain relatively stable paths that better balance between the old and new task minima.
|
| 241 |
+
|
| 242 |
+
Distinct characteristics between optimizer families. Adam-based approaches (Figure 4a-4e) demonstrate more
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 7: Effectiveness of Hybrid ZO in Overcoming Forgetting. In Hybrid ZO, backward benefits from forward passes.
|
| 246 |
+
|
| 247 |
+
Table 3: Effectiveness of Historical Estimation in Mitigating Forgetting. Proportion of $0\%$ denotes that the plain optimizer ZO-SGD. Bold indicates the best performance.
|
| 248 |
+
|
| 249 |
+
<table><tr><td rowspan="2">Metrics</td><td colspan="5">Proportion</td></tr><tr><td>0%</td><td>20%</td><td>40%</td><td>60%</td><td>80%</td></tr><tr><td>Avg</td><td>57.87</td><td>58.90</td><td>58.76</td><td>58.34</td><td>57.83</td></tr><tr><td>Last</td><td>48.32</td><td>49.04</td><td>48.84</td><td>48.42</td><td>48.10</td></tr><tr><td>Fgt</td><td>11.08</td><td>11.79</td><td>11.78</td><td>11.60</td><td>11.57</td></tr></table>
|
| 250 |
+
|
| 251 |
+
oscillatory trajectories with frequent direction adjustments, indicating a more dynamic exploration of the loss landscape. In contrast, SGD-based methods (Figure 4f-4j) exhibit smoother and more stable trajectories, suggesting a more gradual progression toward the optimization objective. These distinct optimization patterns could influence how each method balances between preserving old task knowledge and adapting to new tasks.
|
| 252 |
+
|
| 253 |
+
# 6. New Enhancement to Mitigate Forgetting
|
| 254 |
+
|
| 255 |
+
In ZO optimization, the estimation of the gradients relies on a finite difference of the objective function. We set query budget $q = 1$ in the benchmark for efficiency. However, limited queries cannot capture the accurate ZO directions. When the model learns tasks sequentially, the high variance inherent in ZO gradient estimation poses a critical challenge. Though increasing query numbers can stabilize the gradient estimates, it leads to prohibitive overhead Thus, exploring variance-reduced optimization algorithms is crucial for ZO-based CL. Specifically, we propose 3 enhancements to stabilize the ZO optimization process:
|
| 256 |
+
|
| 257 |
+
Enhancement 1: Hybrid ZO to overcome forgetting. While ZO methods does not explicitly minimize sharpness, it stabilizes optimization by approximating gradients and assessing the rate of change in loss function through perturbations. This indirect approach helps reduce the curvature of the loss landscape, steering the optimization away from sharp and unstable regions. This insight motivates us to investigate Hybrid ZO method. Figure 7 illustrates results
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
(a) FO-SGD
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
(b) ZO-SGD
|
| 264 |
+
Figure 8: Variation in Function Values of Forward Passes. Function values for new tasks is highlighted in red, old tasks is highlighted in green.
|
| 265 |
+
|
| 266 |
+
hybrid ZO. We first use FO to coarsely optimize to a local minimum (first 140 or 160 epochs) and then refine the solution by searching for flatter regions around it using ZO (last 30 or 60 epochs). As the first two subfigures in Figure 7, ZO provides only limited gains to FO. This is because FO inherits strong generalization from the pretrained backbone but loses its generalization ability quickly after two incremental stages. In later stages, ZO helps to remedy the vulnerabilities of backbone trained by FO, leading to significant enhancements compared to the FO baseline.
|
| 267 |
+
|
| 268 |
+
Enhancement 2: Leverage historical information to overcome forgetting. When learning new tasks, models leverage previously learned parameters while prioritizing the preservation of crucial parameters for old tasks. To mitigate interference from new tasks, we propose reweighting old task gradients with historical gradients, which can stabilize perturbations caused by low query loops in ZO optimization. Figure 8 illustrates the function value trajectories for both old and new tasks. While FO optimization shows smooth convergence toward the global optimum, ZO optimization exhibits a more volatile path. Notably, objectives related to old tasks demonstrate smaller changes in both magnitude and variance. This observation motivates us to stabilize the optimization by reducing changes to old gradients through a linear combination with historical gradients: $g_{old} = (1 - \alpha)g_{old} + \alpha g_{historical}$ , where larger $\alpha$ indicates greater reliance on historical information for stability, at the cost of reduced contrast with new task gradients.
|
| 269 |
+
|
| 270 |
+
Table 4: Effectiveness of Sparsity-induced Estimation in Overcoming Forgetting. Proportion of $0\%$ denotes the plain ZO-SGD. Bold indicates the best performance.
|
| 271 |
+
|
| 272 |
+
<table><tr><td>Ratio</td><td>0%</td><td>10%</td><td>20%</td><td>30%</td><td>40%</td><td>50%</td><td>60%</td><td>70%</td><td>80%</td><td>90%</td></tr><tr><td>Avg</td><td>57.87</td><td>59.17</td><td>59.46</td><td>59.29</td><td>59.39</td><td>59.45</td><td>59.26</td><td>59.39</td><td>59.38</td><td>59.47</td></tr><tr><td>Last</td><td>48.32</td><td>48.58</td><td>49.05</td><td>48.72</td><td>48.91</td><td>49.24</td><td>49.11</td><td>49.05</td><td>49.11</td><td>49.24</td></tr><tr><td>Fgt</td><td>11.08</td><td>12.65</td><td>12.17</td><td>12.76</td><td>12.53</td><td>12.37</td><td>12.36</td><td>12.54</td><td>12.46</td><td>12.33</td></tr></table>
|
| 273 |
+
|
| 274 |
+
Table 5: Ablation Studies on the Effectiveness of Combining Enhancements.
|
| 275 |
+
|
| 276 |
+
<table><tr><td>Optimizer</td><td>Hybrid</td><td>Historical</td><td>Sparsity</td><td>Avg</td><td>Last</td></tr><tr><td>FO-SGD</td><td>-</td><td>-</td><td>-</td><td>61.24</td><td>51.02</td></tr><tr><td rowspan="5">ZO-SGD</td><td>-</td><td>-</td><td>-</td><td>57.87</td><td>48.32</td></tr><tr><td>✓</td><td></td><td></td><td>61.40(+3.53)</td><td>51.34(+3.02)</td></tr><tr><td></td><td>✓</td><td></td><td>58.90(+1.03)</td><td>49.04(+0.72)</td></tr><tr><td></td><td></td><td>✓</td><td>59.47(+1.60)</td><td>49.24(+0.92)</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>62.07(+4.20)</td><td>51.94(+3.62)</td></tr></table>
|
| 277 |
+
|
| 278 |
+
In Table 3, we validate the effectiveness of historical estimation in mitigating catastrophic forgetting. Modest proportions of historical information (e.g., $20\%$ , $40\%$ , $60\%$ ) outperform ZO-SGD $(0\%)$ , effectively controlling perturbations while maintaining a low query budget $(q = 1)$ .
|
| 279 |
+
|
| 280 |
+
Enhancement 3: Sparsity-induced estimation helps to overcome forgetting. In ZO optimization, the gradients for new tasks are often highly uncertain due to the approximation nature of the gradient estimation. To reduce this variance, we implement random sparsification by creating a seed-based mask and setting gradients outside the mask to zero. By reducing the number of non-zero gradient components, we aim to stabilize the optimization process and mitigate the noise in gradient updates.
|
| 281 |
+
|
| 282 |
+
In Table 4, we report the performance of sparsity-induced ZO in overcoming forgetting. The sparsity level is varied in this experiments, ranging from $10\%$ to $90\%$ . We observe that the sparse technique improves the average and last accuracy across all scales, which implies that forgetting is effectively controlled. The reduction in volatility can be attributed to the sparse strategy yielding smoother gradient estimates compared to plain ZO-SGD, effectively bounding variance to a low level and thus mitigating forgetting. Moreover, the robust performance across different sparsity ratios provides strong evidence for the efficacy of variance control in addressing forgetting.
|
| 283 |
+
|
| 284 |
+
Complementary Enhancements: The results in Table 5 demonstrate that the proposed enhancements are not mutually exclusive and can be effectively integrated. Specifically, FO training can substantially benefit from subsequent finetuning with hybrid ZO optimization, as illustrated in Figure 7. Notably, the inherent instability of ZO with large step
|
| 285 |
+
|
| 286 |
+
fluctuations can sometimes facilitate escaping local minima and encourage broader exploration, which in turn benefits FO convergence. Furthermore, incorporating historical gradients and sparsity perturbations contributes to mitigating forgetting and stabilizing the optimization process.
|
| 287 |
+
|
| 288 |
+
# 7. Conclusion
|
| 289 |
+
|
| 290 |
+
This paper introduces ZeroFlow, a benchmark study that probes a series of forward pass-based methods for overcoming catastrophic forgetting. This work resorts to an easier way (no need for backpropagation and activation storage) to overcome forgetting. Concretely, our benchmarks include various forward pass-based methods, forgetting scenarios, and evaluation metrics. We also reveal the overlooked optimization principles for overcoming forgetting via forward passes. Based on these insights, we propose two easier and better enhancement to overcome forgetting and extend the application of related methods easily.
|
| 291 |
+
|
| 292 |
+
# Impact Statement
|
| 293 |
+
|
| 294 |
+
This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none which we feel must be specifically highlighted here.
|
| 295 |
+
|
| 296 |
+
# Acknowledgments
|
| 297 |
+
|
| 298 |
+
This work was supported in part by the National Natural Science Foundation of China (NSFC) under Grant 62495063. This work was supported in part by the China Postdoctoral Science Foundation under Grant 2024M761677.
|
| 299 |
+
|
| 300 |
+
# References
|
| 301 |
+
|
| 302 |
+
Aojun, L., Hangjie, Y., Tao, F., and Yanan, S. Rethinking the stability-plasticity trade-off in continual learning from an architectural perspective. ICML, 2025.
|
| 303 |
+
Baydin, A. G., Pearlmutter, B. A., Syme, D., Wood, F., and Torr, P. Gradients without backpropagation. arXiv preprint arXiv:2202.08587, 2022.
|
| 304 |
+
Berahas, A. S., Cao, L., Choromanski, K., and Scheinberg, K. A theoretical and empirical comparison of gradient approximations in derivative-free optimization. Foundations of Computational Mathematics, 22(2):507-560, 2022.
|
| 305 |
+
Bergou, E. H., Gorbunov, E., and Richtarik, P. Stochastic three points method for unconstrained smooth minimization. SIAM Journal on Optimization, 30(4):2726-2749, 2020.
|
| 306 |
+
Bian, A., Li, W., Yuan, H., Yu, C., Wang, M., Zhao, Z., Lu, A., Ji, P., and Feng, T. Make continual learning stronger via c-flat. NeurIPS, 2024.
|
| 307 |
+
Cha, S., Hsu, H., Hwang, T., Calmon, F. P., and Moon, T. Cpr: classifier-projection regularization for continual learning. *ICLR*, 2021.
|
| 308 |
+
Chaudhry, A., Ranzato, M., Rohrbach, M., and Elhoseiny, M. Efficient lifelong learning with a-gem. arXiv preprint arXiv:1812.00420, 2018.
|
| 309 |
+
Chen, A., Zhang, Y., Jia, J., Diffenderfer, J., Liu, J., Parasyris, K., Zhang, Y., Zhang, Z., Kailkhura, B., and Liu, S. Deepzero: Scaling up zeroth-order optimization for deep model training. arXiv preprint arXiv:2310.02025, 2023.
|
| 310 |
+
Chen, X., Liu, S., Xu, K., Li, X., Lin, X., Hong, M., and Cox, D. Zo-adamm: Zeroth-order adaptive momentum method for black-box optimization. NeurIPS, 32, 2019.
|
| 311 |
+
Deng, D., Chen, G., Hao, J., Wang, Q., and Heng, P.-A. Flattening sharpness for dynamic gradient projection memory benefits continual learning. NeurIPS, 34, 2021.
|
| 312 |
+
Farajtabar, M., Azizan, N., Mott, A., and Li, A. Orthogonal gradient descent for continual learning. In International Conference on Artificial Intelligence and Statistics, pp. 3762-3773. PMLR, 2020.
|
| 313 |
+
Feng, T., Ji, K., Bian, A., Liu, C., and Zhang, J. Identifying players in broadcast videos using graph convolutional network. Pattern Recognition, 124:108503, 2022a.
|
| 314 |
+
Feng, T., Wang, M., and Yuan, H. Overcoming catastrophic forgetting in incremental object detection via elastic response distillation. In CVPR, 2022b.
|
| 315 |
+
|
| 316 |
+
Feng, T., Yuan, H., Wang, M., Huang, Z., Bian, A., and Zhang, J. Progressive learning without forgetting. arXiv preprint arXiv:2211.15215, 2022c.
|
| 317 |
+
Flaxman, A. D., Kalai, A. T., and McMahan, H. B. Online convex optimization in the bandit setting: gradient descent without a gradient. arXiv preprint cs/0408007, 2004.
|
| 318 |
+
Foret, P., Kleiner, A., Mobahi, H., and Neyshabur, B. Sharpness-aware minimization for efficiently improving generalization. arXiv preprint arXiv:2010.01412, 2020.
|
| 319 |
+
Gan, W., Wan, S., and Philip, S. Y. Model-as-a-service (maas): A survey. In 2023 IEEE International Conference on Big Data (BigData), 2023.
|
| 320 |
+
Ghadimi, S. and Lan, G. Stochastic first-and zeroth-order methods for nonconvex stochastic programming. SIAM journal on optimization, 2013.
|
| 321 |
+
Gu, J., Zhu, H., Feng, C., Jiang, Z., Chen, R., and Pan, D. L2ight: Enabling on-chip learning for optical neural networks via efficient in-situ subspace optimization. Advances in Neural Information Processing Systems, 2021.
|
| 322 |
+
Hadsell, R., Rao, D., Rusu, A. A., and Pascanu, R. Embracing change: Continual learning in deep neural networks. Trends in cognitive sciences, 24(12):1028-1040, 2020.
|
| 323 |
+
He, H., Huang, G., and Yuan, Y. Asymmetric valleys: Beyond sharp and flat local minima. NeurIPS, 32, 2019.
|
| 324 |
+
Hinton, G. The forward-forward algorithm: Some preliminary investigations. arXiv preprint arXiv:2212.13345, 2022.
|
| 325 |
+
Jabri, M. and Flower, B. Weight perturbation: An optimal architecture and learning technique for analog vsi feedforward and recurrent multilayer networks. IEEE Transactions on Neural Networks, 1992.
|
| 326 |
+
Jeeveswaran, K., Bhat, P., Zonooz, B., and Arani, E. Birt: Bio-inspired replay in vision transformers for continual learning. ICML, 2023.
|
| 327 |
+
Kim, B., Cai, H., McKenzie, D., and Yin, W. Curvature-aware derivative-free optimization. arXiv preprint arXiv:2109.13391, 2021.
|
| 328 |
+
Kong, Y., Liu, L., Chen, H., Kacprzyk, J., and Tao, D. Overcoming catastrophic forgetting in continual learning by exploring eigenvalues of hessian matrix. IEEE Transactions on Neural Networks and Learning Systems, 2023.
|
| 329 |
+
Li, W., Feng, T., Yuan, H., Bian, A., Du, G., Liang, S., Gan, J., and Liu, Z. Unigrad-fs: Unified gradient projection with flatter sharpness for continual learning. IEEE Transactions on Industrial Informatics, 2024.
|
| 330 |
+
|
| 331 |
+
Lian, X., Zhang, H., Hsieh, C.-J., Huang, Y., and Liu, J. A comprehensive linear speedup analysis for asynchronous stochastic parallel optimization from zeroth-order to first-order. Advances in Neural Information Processing Systems, 29, 2016.
|
| 332 |
+
Lillicrap, T. P., Santoro, A., Marris, L., Akerman, C. J., and Hinton, G. Backpropagation and the brain. Nature Reviews Neuroscience, 2020.
|
| 333 |
+
Liu, B., Liu, X., Jin, X., Stone, P., and Liu, Q. Conflict-averse gradient descent for multi-task learning. *NeurIPS*, 2021.
|
| 334 |
+
Liu, S., Kailkhura, B., Chen, P.-Y., Ting, P., Chang, S., and Amini, L. Zeroth-order stochastic variance reduction for nonconvex optimization. Advances in Neural Information Processing Systems, 31, 2018.
|
| 335 |
+
Liu, S., Chen, P.-Y., Chen, X., and Hong, M. signsgd via zeroth-order oracle. In International Conference on Learning Representations, 2019.
|
| 336 |
+
Lopez-Paz, D. and Ranzato, M. Gradient episodic memory for continual learning. NeurIPS, 2017.
|
| 337 |
+
Lu, A., Feng, T., Yuan, H., Song, X., and Sun, Y. Revisiting neural networks for continual learning: An architectural perspective. *IJCAI*, 2024.
|
| 338 |
+
Luo, Y., Yang, Z., Meng, F., Li, Y., Zhou, J., and Zhang, Y. An empirical study of catastrophic forgetting in large language models during continual fine-tuning. arXiv preprint arXiv:2308.08747, 2023.
|
| 339 |
+
Malladi, S., Gao, T., Nichani, E., Damian, A., Lee, J. D., Chen, D., and Arora, S. Fine-tuning large language models with just forward passes. NeurIPS, 2023.
|
| 340 |
+
Mangrulkar, S., Gugger, S., Debut, L., Belkada, Y., Paul, S., and Bossan, B. Peft: State-of-the-art parameter-efficient fine-tuning methods. https://github.com/huggingface/peft, 2022.
|
| 341 |
+
Masana, M., Liu, X., Twardowski, B., Menta, M., Bagdanov, A. D., and Van De Weijer, J. Class incremental learning: survey and performance evaluation on image classification. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022.
|
| 342 |
+
Mehta, S. V., Patil, D., Chandar, S., and Strubell, E. An empirical investigation of the role of pre-training in lifelong learning. J. Mach. Learn. Res., 24:214:1-214:50, 2023. URL https://jmlr.org/papers/v24/22-0496.html.
|
| 343 |
+
Miura, T., Shibahara, T., and Yanai, N. Megex: Data-free model extraction attack against gradient-based explainable ai. In Proceedings of the 2nd ACM Workshop on Secure and Trustworthy Deep Learning Systems, 2024.
|
| 344 |
+
|
| 345 |
+
Nesterov, Y. and Spokoiny, V. Random gradient-free minimization of convex functions. Foundations of Computational Mathematics, 2017.
|
| 346 |
+
Reddi, S. J., Kale, S., and Kumar, S. On the convergence of adam and beyond. arXiv preprint arXiv:1904.09237, 2019.
|
| 347 |
+
Ren, M., Kornblith, S., Liao, R., and Hinton, G. Scaling forward gradient with local losses. arXiv preprint arXiv:2210.03310, 2022.
|
| 348 |
+
Rolnick, D., Ahuja, A., Schwarz, J., Lillicrap, T., and Wayne, G. Experience replay for continual learning. Advances in neural information processing systems, 32, 2019.
|
| 349 |
+
Saha, G., Garg, I., and Roy, K. Gradient projection memory for continual learning. In International Conference on Learning Representations, 2020.
|
| 350 |
+
Shi, G., Chen, J., Zhang, W., Zhan, L.-M., and Wu, X.-M. Overcoming catastrophic forgetting in incremental few-shot learning by finding flat minima. NeurIPS, 2021.
|
| 351 |
+
Shi, H., Xu, Z., Wang, H., Qin, W., Wang, W., Wang, Y., Wang, Z., Ebrahimi, S., and Wang, H. Continual learning of large language models: A comprehensive survey. arXiv preprint arXiv:2404.16789, 2024.
|
| 352 |
+
Shin, H., Lee, J. K., Kim, J., and Kim, J. Continual learning with deep generative replay. Advances in neural information processing systems, 30, 2017.
|
| 353 |
+
Spall, J. C. Multivariate stochastic approximation using a simultaneous perturbation gradient approximation. IEEE transactions on automatic control, 37(3):332-341, 1992.
|
| 354 |
+
Sun, H.-L., Zhou, D.-W., Ye, H.-J., and Zhan, D.-C. Pilot: A pre-trained model-based continual learning toolbox. arXiv preprint arXiv:2309.07117, 2023a.
|
| 355 |
+
Sun, M., Wang, Y., Feng, T., Zhang, D., Zhu, Y., and Tang, J. A stronger mixture of low-rank experts for fine-tuning foundation models, 2025.
|
| 356 |
+
Sun, T., Shao, Y., Qian, H., Huang, X., and Qiu, X. Black-box tuning for language-model-as-a-service. In International Conference on Machine Learning, 2022.
|
| 357 |
+
Sun, Z., Mu, Y., and Hua, G. Regularizing second-order influences for continual learning. In CVPR, 2023b.
|
| 358 |
+
Tavanaei, A., Ghodrati, M., Kheradpisheh, S. R., Masquelier, T., and Maida, A. Deep learning in spiking neural networks. Neural networks, 2019.
|
| 359 |
+
|
| 360 |
+
Tung, L. T., Van, V. N., Hoang, P. N., and Than, K. Sharpness and gradient aware minimization for memory-based continual learning. In Proceedings of the 12th International Symposium on Information and Communication Technology, SOICT. ACM, 2023.
|
| 361 |
+
van de Ven, G. M., Tuytelaars, T., and Tolias, A. S. Three types of incremental learning. Nature Machine Intelligence, pp. 1185-1197, 2022.
|
| 362 |
+
Wang, L., Zhang, X., Su, H., and Zhu, J. A comprehensive survey of continual learning: Theory, method and application. arXiv preprint arXiv:2302.00487, 2023.
|
| 363 |
+
Wang, L., Zhang, X., Su, H., and Zhu, J. A comprehensive survey of continual learning: Theory, method and application. TPAMI, 2024.
|
| 364 |
+
Yang, E., Shen, L., Wang, Z., Liu, S., Guo, G., and Wang, X. Data augmented flatness-aware gradient projection for continual learning. In IEEE/CVF International Conference on Computer Vision, 2023.
|
| 365 |
+
Yang, E., Wang, Z., Shen, L., Yin, N., Liu, T., Guo, G., Wang, X., and Tao, D. Continual learning from a stream of apis. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024.
|
| 366 |
+
Yuan, H., Jiang, J., Albanie, S., Feng, T., Huang, Z., Ni, D., and Tang, M. Rlip: Relational language-image pre-training for human-object interaction detection. In NeurIPS, 2022.
|
| 367 |
+
Yuan, H., Zhang, S., Wang, X., Wei, Y., Feng, T., Pan, Y., Zhang, Y., Liu, Z., Albanie, S., and Ni, D. Instructvideo: instructing video diffusion models with human feedback. In CVPR, 2024.
|
| 368 |
+
Zhang, D., Feng, T., Xue, L., Wang, Y., Dong, Y., and Tang, J. Parameter-efficient fine-tuning for foundation models. arXiv, 2025.
|
| 369 |
+
Zhang, Y., Li, P., Hong, J., Li, J., Zhang, Y., Zheng, W., Chen, P.-Y., Lee, J. D., Yin, W., Hong, M., Wang, Z., Liu, S., and Chen, T. Revisiting zeroth-order optimization for memory-efficient LLM fine-tuning: A benchmark. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=THPjMr2r0S.
|
| 370 |
+
Zhao, Z., Bai, H., Zhang, J., Zhang, Y., Zhang, K., Xu, S., Chen, D., Timofte, R., and Van Gool, L. Equivariant multi-modality image fusion. In CVPR, 2024a.
|
| 371 |
+
Zhao, Z., Deng, L., Bai, H., Cui, Y., Zhang, Z., Zhang, Y., Qin, H., Chen, D., Zhang, J., Wang, P., and Gool, L. V. Image fusion via vision-language model. In ICML, 2024b.
|
| 372 |
+
|
| 373 |
+
Zhong, Q., Ding, L., Shen, L., Mi, P., Liu, J., Du, B., and Tao, D. Improving sharpness-aware minimization with fisher mask for better generalization on language models. arXiv preprint arXiv:2210.05497, 2022.
|
| 374 |
+
Zhou, D.-W., Cai, Z.-W., Ye, H.-J., Zhan, D.-C., and Liu, Z. Revisiting class-incremental learning with pre-trained models: Generalizability and adaptivity are all you need. arXiv preprint arXiv:2303.07338, 2023a.
|
| 375 |
+
Zhou, D.-W., Wang, Q.-W., Qi, Z.-H., Ye, H.-J., Zhan, D.-C., and Liu, Z. Deep class-incremental learning: A survey. arXiv preprint arXiv:2302.03648, 2023b.
|
| 376 |
+
Zhou, D.-W., Wang, Q.-W., Ye, H.-J., and Zhan, D.-C. A model or 603 exemplars: Towards memory-efficient class-incremental learning. *ICLR*, 2023c.
|
| 377 |
+
Zhou, D.-W., Sun, H.-L., Ning, J., Ye, H.-J., and Zhan, D.-C. Continual learning with pre-trained models: A survey. In *IJCAI*, pp. 8363-8371, 2024a.
|
| 378 |
+
Zhou, D.-W., Sun, H.-L., Ye, H.-J., and Zhan, D.-C. Expandable subspace ensemble for pre-trained model-based class-incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024b.
|
| 379 |
+
Zhou, D.-W., Wang, Q.-W., Qi, Z.-H., Ye, H.-J., Zhan, D.-C., and Liu, Z. Class-incremental learning: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024c.
|
| 380 |
+
Zhu, D., Sun, Z., Li, Z., Shen, T., Yan, K., Ding, S., Kuang, K., and Wu, C. Model tailor: Mitigating catastrophic forgetting in multi-modal large language models. ICML, 2024a.
|
| 381 |
+
Zhu, T., Qu, X., Dong, D., Ruan, J., Tong, J., He, C., and Cheng, Y. Llama-moe: Building mixture-of-experts from llama with continual pre-training. arXiv preprint arXiv:2406.16554, 2024b. URL https://arxiv.org/abs/2406.16554.
|
| 382 |
+
Zhuang, H., Weng, Z., Wei, H., Xie, R., Toh, K.-A., and Lin, Z. ACIL: Analytic class-incremental learning with absolute memorization and privacy protection. In NeurIPS, 2022a.
|
| 383 |
+
Zhuang, H., Weng, Z., He, R., Lin, Z., and Zeng, Z. GKEAL: Gaussian kernel embedded analytic learning for few-shot class incremental task. In CVPR, 2023.
|
| 384 |
+
Zhuang, J., Gong, B., Yuan, L., Cui, Y., Adam, H., Dvornek, N., Tatikonda, S., Duncan, J., and Liu, T. Surrogate gap minimization improves sharpness-aware training. arXiv preprint arXiv:2203.08065, 2022b.
|
| 385 |
+
|
| 386 |
+
# ZeroFlow: Overcoming Catastrophic Forgetting is Easier than You Think Supplementary Material
|
| 387 |
+
|
| 388 |
+
# A. Experimental Details
|
| 389 |
+
|
| 390 |
+
In this section, we provide an overview of zeroth-order optimization algorithms and the function settings used for the trajectory analysis.
|
| 391 |
+
|
| 392 |
+
# A.1. Concise Overview of Zeroth-Order Estimation
|
| 393 |
+
|
| 394 |
+
Zeroth-order optimization aims to minimize/maximize an objective function $f: \mathbb{R}^n \to \mathbb{R}$ without derivative information. The core problem is formulated as $\min_{\theta \in \mathbb{R}^n} L(\theta)$ , where $\theta$ denotes the optimization variable. To enable gradient-based updates, Simultaneous Perturbation Stochastic Approximation (SPSA(Spall, 1992)) is a commonly used technique to approximate gradients by perturbing the input variables. Specifically, the gradient $\hat{\nabla} L(\theta)$ at point $\theta$ is estimated as:
|
| 395 |
+
|
| 396 |
+
$$
|
| 397 |
+
\hat {\nabla} L (\theta , \xi ; B) = \frac {L (\theta + \epsilon \xi ; B) - L (\theta - \epsilon \xi ; B)}{2 \epsilon} \cdot \xi^ {- 1}, \tag {4}
|
| 398 |
+
$$
|
| 399 |
+
|
| 400 |
+
where $\xi \sim \mathcal{N}(\mathbf{0},\mathbf{I})$ is a random perturbation vector, and $\epsilon > 0$ is a small perturbation step size (typically adjusted during optimization).
|
| 401 |
+
|
| 402 |
+
ZO-SGD(Ghadimi & Lan, 2013): Using the gradient estimator $\hat{\nabla} L(\theta, \xi; B)$ , zeroth-order algorithms, such as ZO-SGD, follow the iterative update rule:
|
| 403 |
+
|
| 404 |
+
$$
|
| 405 |
+
\theta_ {t + 1} = \theta_ {t} - \eta_ {t} \cdot \hat {\nabla} L \left(\theta_ {t}, \xi_ {t}; B\right), \tag {5}
|
| 406 |
+
$$
|
| 407 |
+
|
| 408 |
+
where $\eta_{t}$ is the learning rate at step $t$ . ZO-SGD bypasses explicit gradient computation through local function evaluations, making it suitable for high-dimensional, non-convex optimization problems.
|
| 409 |
+
|
| 410 |
+
ZO-SGD-Sign(Liu et al., 2019): A variant of ZO-SGD, known as ZO-SGD-Sign, improves upon the original approach by approximating the gradient direction using the sign of the gradient estimate. The update rule becomes:
|
| 411 |
+
|
| 412 |
+
$$
|
| 413 |
+
\theta_ {t + 1} = \theta_ {t} - \eta_ {t} \cdot \operatorname {s i g n} (\hat {\nabla} L (\theta_ {t}, \xi_ {t}; B)), \tag {6}
|
| 414 |
+
$$
|
| 415 |
+
|
| 416 |
+
where $\mathrm{sign}(\cdot)$ denotes the element-wise sign function. This approach often leads to faster convergence in some problems where the magnitude of the gradient is not as important as its direction.
|
| 417 |
+
|
| 418 |
+
ZO-SGD-Conserve(Bergou et al., 2020): ZO-SGD-Conserve is another variant that conservatively selects the update direction by locally comparing three candidate points, rather than directly committing to a single gradient step. The update rule for this method is:
|
| 419 |
+
|
| 420 |
+
$$
|
| 421 |
+
\theta_ {t + 1} = \arg \min _ {y \in \mathcal {C} _ {t}} f (y), \quad \mathcal {C} _ {t} = \left\{\theta_ {t}, \theta_ {t} - \eta_ {t} \cdot \hat {\nabla} L \left(\theta_ {t}, \xi_ {t}; B\right), \theta_ {t} + \eta_ {t} \cdot \hat {\nabla} L \left(\theta_ {t}, \xi_ {t}; B\right) \right\}, \tag {7}
|
| 422 |
+
$$
|
| 423 |
+
|
| 424 |
+
This method mitigates overly aggressive updates by evaluating possible directions and choosing the one that locally minimizes the objective function.
|
| 425 |
+
|
| 426 |
+
ZO-Adam(Zhang et al., 2024): ZO-AdaMM (Chen et al., 2019) is the first attempt to apply the Adam family (specifically AMSGrad(Reddi et al., 2019)) to zeroth-order (ZO) optimization algorithms, providing convergence guarantees for both convex and nonconvex settings. The update rule is given by:
|
| 427 |
+
|
| 428 |
+
$$
|
| 429 |
+
\theta_ {t + 1} = \theta_ {t} - \eta_ {t} \cdot \frac {m _ {t}}{\sqrt {V _ {t}} + \epsilon}, \quad V _ {t} = \operatorname {D i a g} \left(\max \left(v _ {t}, v _ {t - 1}\right)\right), \tag {8}
|
| 430 |
+
$$
|
| 431 |
+
|
| 432 |
+
$$
|
| 433 |
+
m _ {t} = \beta_ {1} m _ {t - 1} + (1 - \beta_ {1}) \hat {\nabla} L (\theta_ {t}, \xi_ {t}; B), v _ {t} = \beta_ {2} v _ {t - 1} + (1 - \beta_ {2}) (\hat {\nabla} L (\theta_ {t}, \xi_ {t}; B)) ^ {2},
|
| 434 |
+
$$
|
| 435 |
+
|
| 436 |
+
In our implementation, we simply replace SGD with Adam for convenience, referring to this variant as ZO-Adam. Nevertheless, we also provide a reference implementation of the original oracle ZO-AdaMM algorithm.
|
| 437 |
+
|
| 438 |
+
Forward Gradient Descent (FGD)(Baydin et al., 2022): FGD replaces backpropagation with forward-mode automatic differentiation to estimate gradient directions using Jacobian-vector products (JVPs). Instead of computing full gradients via reverse-mode automatic differentiation (AD), FGD samples probe vectors to construct unbiased estimators of the gradient direction. A typical FGD update step is:
|
| 439 |
+
|
| 440 |
+
$$
|
| 441 |
+
\theta_ {t + 1} = \theta_ {t} - \eta_ {t} \cdot \mathrm {J V P} _ {\theta_ {t}} (v _ {t}) = \theta_ {t} - \eta_ {t} \cdot \left. \frac {d f (\theta)}{d \theta} \cdot v _ {t} \right| _ {\theta = \theta_ {t}}, \tag {9}
|
| 442 |
+
$$
|
| 443 |
+
|
| 444 |
+
where $v_{t}$ is a random probe vector (e.g., Rademacher or Gaussian), and $\mathrm{JVP}_{\theta_t}(v_t)$ represents the forward-mode gradient approximation in direction $v_{t}$ . FGD enables training when reverse-mode AD is impractical or unavailable, and offers flexibility for hardware or software systems that only support forward execution. We denote Forward as FGD throughout this paper.
|
| 445 |
+
|
| 446 |
+
# A.2. Function Settings
|
| 447 |
+
|
| 448 |
+
Following the setup in CAGrad (Liu et al., 2021), we visualize the optimization behavior of first-order (FO) and zeroth-order (ZO) methods in mitigating forgetting. Specifically, we consider a two-dimensional parameter space $\theta = (\theta_{1},\theta_{2})\in \mathbb{R}^{2}$ , with the following task-specific loss functions: $L_{1}(\theta) = c_{1}(\theta)f_{1}(\theta) + c_{2}(\theta)g_{1}(\theta)$ for the old task (orange), and $L_{2}(\theta) = c_{1}(\theta)f_{2}(\theta) + c_{2}(\theta)g_{2}(\theta)$ for the new task (red). The parameter point is initialized at $[-8.5, - 5]$ to be closer to old tasks, facilitating better adaptation to them. The contour plot in Figure 3 illustrates the overall objective function defined as $L(\theta) = L_{1}(\theta) + L_{2}(\theta)$ , where the $x-$ and $y$ axes correspond to $\theta_{1}$ and $\theta_{2}$ , respectively.
|
| 449 |
+
|
| 450 |
+
$$
|
| 451 |
+
f _ {1} (\theta) = \log \left(\max \left(| 0. 5 (- \theta_ {1} - 7) - \tanh (- \theta_ {2}) |, 5 \times 1 0 ^ {- 6}\right)\right) + 6,
|
| 452 |
+
$$
|
| 453 |
+
|
| 454 |
+
$$
|
| 455 |
+
\begin{array}{l} f _ {2} (\theta) = \log \left(\max \left(| 0. 5 (- \theta_ {1} + 3) - \tanh (- \theta_ {2} + 2) |, 5 \times 1 0 ^ {- 6}\right)\right) + 6, \\ g _ {1} (\theta) = \frac {(- \theta_ {1} + 7) ^ {2} + 0 . 1 (\theta_ {2} - 8) ^ {2}}{1 0} - 2 0, \\ g _ {2} (\theta) = \frac {(- \theta_ {1} - 7) ^ {2} + 0 . 1 (\theta_ {2} - 8) ^ {2}}{1 0} - 2 0, \\ \end{array}
|
| 456 |
+
$$
|
| 457 |
+
|
| 458 |
+
$$
|
| 459 |
+
c _ {1} (\theta) = \max \left(\tanh (0. 5 \cdot \theta_ {2}), 0\right), \quad c _ {2} (\theta) = \max \left(\tanh (- 0. 5 \cdot \theta_ {2}), 0\right).
|
| 460 |
+
$$
|
| 461 |
+
|
| 462 |
+
# B. Additional Results
|
| 463 |
+
|
| 464 |
+
# B.1. Comprehensive Analysis of Memory Usage on ZeroFlow
|
| 465 |
+
|
| 466 |
+

|
| 467 |
+
Figure 9: Comparison of Memory Usage between FO-SGD and ZO-SGD with Different Batch Sizes. $\Delta$ denotes the increase in memory usage of the final task compared to the initial one.
|
| 468 |
+
|
| 469 |
+
In this subsection, we provide a detailed comparison of memory usage during incremental learning to demonstrate the storage efficiency of ZeroFlow (ZO-SGD) compared to its counterpart, FO-SGD. Figure 9 illustrates the peak memory usage of MEMO when trained on the CIFAR-100 dataset. The backbone is fixed as a pretrained ViT-B/16-IN1K model, which is subsequently fine-tuned with batch sizes ranging from 64 to 512. The experimental results highlight the following observations:
|
| 470 |
+
|
| 471 |
+
First, doubling the training batch size significantly increases the memory consumption of FO-SGD, requiring more GPU resources. For instance, completing the entire incremental training process on FO requires 1, 2, 3, and 6 GPUs, respectively, for batch sizes of 64, 128, 256, and 512, with each GPU equipping with 24GB of memory. In contrast, ZO-SGD training consistently requires only one GPU resource.
|
| 472 |
+
|
| 473 |
+
Second, as training progresses, the memory demands for larger batch sizes increase rapidly. For FO, the memory consumption for 512 batches at stage 5 grows by 30.08 GB compared to the initial stage. In contrast, ZO-SGD shows a modest increase of only 3.92 GB, maintaining a low growth rate. As training advances, the memory efficiency of ZO-SGD becomes more pronounced, especially for model-expansion based CL models.
|
| 474 |
+
|
| 475 |
+
# B.2. More Observations on Optimization Trajectories during Overcoming Forgetting
|
| 476 |
+
|
| 477 |
+

|
| 478 |
+
(a) FO-Adam
|
| 479 |
+
|
| 480 |
+

|
| 481 |
+
(b) ZO-Adam
|
| 482 |
+
|
| 483 |
+

|
| 484 |
+
(c) ZO-Adam $(q = 4)$
|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
(d) ZO-Adam-Sign
|
| 488 |
+
|
| 489 |
+

|
| 490 |
+
(e) ZO-Adam-Conserve
|
| 491 |
+
|
| 492 |
+

|
| 493 |
+
(f) FO-SGD
|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
(g) ZO-SGD
|
| 497 |
+
|
| 498 |
+

|
| 499 |
+
(h) ZO-SGD $(q = 4)$
|
| 500 |
+
|
| 501 |
+

|
| 502 |
+
(i) ZO-SGD-Sign
|
| 503 |
+
|
| 504 |
+

|
| 505 |
+
(j) ZO-SGD-Conserve
|
| 506 |
+
|
| 507 |
+

|
| 508 |
+
(k) FO-Adam
|
| 509 |
+
|
| 510 |
+

|
| 511 |
+
(1) ZO-Adam
|
| 512 |
+
|
| 513 |
+

|
| 514 |
+
(m) ZO-Adam $(q = 4)$
|
| 515 |
+
|
| 516 |
+

|
| 517 |
+
(n) ZO-Adam-Sign
|
| 518 |
+
|
| 519 |
+

|
| 520 |
+
(o) ZO-Adam-Conserve
|
| 521 |
+
|
| 522 |
+

|
| 523 |
+
(p) FO-SGD
|
| 524 |
+
|
| 525 |
+

|
| 526 |
+
(q) ZO-SGD
|
| 527 |
+
|
| 528 |
+

|
| 529 |
+
(r) ZO-SGD $(q = 4)$
|
| 530 |
+
|
| 531 |
+

|
| 532 |
+
(s) ZO-SGD-Sign
|
| 533 |
+
Figure 10: The Trajectory of Different Optimization during Overcoming Forgetting. The first and last two rows are trained for 100k steps with learning rates of 0.001 and 0.01, respectively. Red denotes the minimum of new task, orange denotes the minimum of old task. The cyan trajectory taken when using the total loss from both tasks.
|
| 534 |
+
|
| 535 |
+

|
| 536 |
+
(t) ZO-SGD-Conserve
|
| 537 |
+
|
| 538 |
+
In this subsection, we present a different scenario where the model is initialized at a local minimum $\theta_{1},\theta_{2} = \{-4.0,5.0\}$ surrounded by intricate valleys, but training with different learning rate as shown in Figure 10. For a learning rate of
|
| 539 |
+
|
| 540 |
+
0.001, the first-row subfigures demonstrate that Adam using both FO and ZeroFlow stagnate in the valley. Even with bias correction, the Adam optimizer still fails to escape the local region without sufficient momentum. However, ZO-Adam-Sign seems to successfully optimize towards the region around the global minimum. Unlike ZO-Adam, ZO-Adam-Sign applies the gradient using a sign function, which outputs either +1 or -1 depending on the gradient direction. This discrete update method, which lacks continuous gradient information, causes ZO-Adam-Sign to take larger, step-like jumps. Particularly in the early stages, where gradient information is sparse or noisy, this leads to more fluctuations and introduces greater randomness in the optimization process, helping it to cross over the valleys. The second-row subfigures use SGD as the base optimizer. We observe that, except for ZO-SGD-Sign, both ZeroFlow and FO-SGD converge effectively. This can be attributed to SGD's simple update rule based on function values. Notably, FO-SGD escapes the valley by leaping to a higher and flatter region, while ZeroFlow demonstrates the ability to traverse beneath valleys. With a higher learning rate of 0.01, FO-Adam, ZO-Adam with four queries, and ZO-Adam-Sign escape the local region more easily. However, ZO-Adam still stagnates along the valley, demonstrating the stabilizing effect of multiple query loops. Similarly, ZO-Adam-Conserve suffers from the risk of an overly conservative strategy. ZO-SGD also fails to converge to the optimum due to gradient explosion caused by the large learning rate. In contrast, ZeroFlow shows minimal degradation despite its inherent randomness.
|
| 541 |
+
|
| 542 |
+
As a result, the behavior of ZeroFlow—sometimes escaping the valley but failing to converge to the optimum, and sometimes getting trapped with low query counts but not with higher ones—highlights the trade-off between randomness and stability during updates. With larger search loops, lower learning rates, and more stable update steps, the model becomes increasingly prone to getting stuck in local minima, especially in continual learning scenarios where balancing old and new tasks introduces additional complexity.
|
| 543 |
+
|
| 544 |
+
# B.3. Extra Evaluation on Memory Replay Methods
|
| 545 |
+
|
| 546 |
+
We further evaluate the performance of ZeroFlow when applied to a representative replay-based method (MEMO (Zhou et al., 2023c), replay buffer $= 2000$ ), to demonstrate its broader applicability. As shown below, ZeroFlow consistently remains stable in mitigating forgetting. Notably, although the average accuracies exhibit slight gaps compared to FO methods, the accuracies at the final stage progressively approach or even surpass those of the FO baselines on the CIFAR-100 dataset.
|
| 547 |
+
|
| 548 |
+
Table 6: Accuracy Results on MEMO.
|
| 549 |
+
|
| 550 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Optimizer</td><td rowspan="2">Strategy</td><td colspan="2">CIFAR-100</td><td colspan="2">ImageNet-A</td></tr><tr><td>Avg</td><td>Last</td><td>Avg</td><td>Last</td></tr><tr><td rowspan="8">MEMO</td><td rowspan="4">SGD</td><td>FO</td><td>87.43</td><td>79.66</td><td>53.15</td><td>38.97</td></tr><tr><td>ZO</td><td>85.92</td><td>79.00</td><td>46.87</td><td>25.81</td></tr><tr><td>Sign</td><td>85.72</td><td>79.10</td><td>53.31</td><td>38.18</td></tr><tr><td>Conserve</td><td>85.86</td><td>79.20</td><td>47.20</td><td>28.51</td></tr><tr><td rowspan="4">Adam</td><td>FO</td><td>86.45</td><td>76.17</td><td>54.06</td><td>41.54</td></tr><tr><td>ZO</td><td>85.86</td><td>78.59</td><td>52.70</td><td>39.01</td></tr><tr><td>Sign</td><td>86.16</td><td>76.38</td><td>53.10</td><td>39.82</td></tr><tr><td>Conserve</td><td>85.89</td><td>77.71</td><td>53.20</td><td>39.57</td></tr><tr><td></td><td>-</td><td>Forward</td><td>84.63</td><td>76.32</td><td>53.59</td><td>40.64</td></tr></table>
|
| 551 |
+
|
| 552 |
+
# B.4. Memory and Time Efficiency on Larger Transformers
|
| 553 |
+
|
| 554 |
+
To assess the scalability of ZeroFlow, we evaluated its efficiency on two larger vision transformers, ViT-L/16 and ViT-H/14. As shown below, ZeroFlow consistently offers substantial memory savings across all model sizes. Notably, even when using ZO-SGD-Sign, the runtime remains faster than that of standard FO optimization.
|
| 555 |
+
|
| 556 |
+
# B.5. Longer Task Sequence
|
| 557 |
+
|
| 558 |
+
To further assess robustness, we evaluate performance on an extended task sequence consisting of 20 tasks. As shown below, ZeroFlow continues to deliver comparable performance. Additionally, following (Wang et al., 2024), we additionally
|
| 559 |
+
|
| 560 |
+
Table 7: Evaluation on laser transformers.
|
| 561 |
+
|
| 562 |
+
<table><tr><td rowspan="2">Optimizer</td><td colspan="2">ViT-B/16</td><td colspan="2">ViT-L/16</td><td colspan="2">ViT-H/14</td></tr><tr><td>Memory↓</td><td>Runtime↓</td><td>Memory↓</td><td>Runtime↓</td><td>Memory↓</td><td>Runtime↓</td></tr><tr><td>FO-SGD</td><td>12.08GB</td><td>59.3s</td><td>33.27GB</td><td>65.0s</td><td>78.09GB</td><td>190.1s</td></tr><tr><td>ZO-SGD (q=1)</td><td>2.41GB</td><td>32.4s</td><td>3.77GB</td><td>47.0s</td><td>6.45GB</td><td>118.7s</td></tr><tr><td>ZO-SGD (q=4)</td><td>2.41GB</td><td>111.7s</td><td>3.77GB</td><td>178.3s</td><td>6.45GB</td><td>442.6s</td></tr><tr><td>ZO-SGD-Sign</td><td>2.41GB</td><td>32.4s</td><td>3.77GB</td><td>48.7s</td><td>6.45GB</td><td>119.3s</td></tr><tr><td>ZO-SGD-Conserve</td><td>2.41GB</td><td>70.1s</td><td>3.77GB</td><td>108.9s</td><td>6.45GB</td><td>222.3s</td></tr><tr><td>Forward</td><td>3.94GB</td><td>45.9s</td><td>5.82GB</td><td>142.0s</td><td>9.85GB</td><td>372.5s</td></tr></table>
|
| 563 |
+
|
| 564 |
+
Table 8: Additional Experimental Results of EASE on 20 Sequential Tasks.
|
| 565 |
+
|
| 566 |
+
<table><tr><td>Method</td><td>Optimizer</td><td>Strategy</td><td>Avg</td><td>Last</td><td>FWT</td><td>BWT</td></tr><tr><td rowspan="9">EASE</td><td rowspan="4">SGD</td><td>FO</td><td>87.32</td><td>80.20</td><td>-6.89</td><td>-6.79</td></tr><tr><td>ZO</td><td>82.65</td><td>75.98</td><td>-8.33</td><td>-7.71</td></tr><tr><td>Sign</td><td>83.47</td><td>76.13</td><td>-8.01</td><td>-7.22</td></tr><tr><td>Conserve</td><td>82.20</td><td>75.94</td><td>-8.64</td><td>-7.93</td></tr><tr><td rowspan="4">Adam</td><td>FO</td><td>86.67</td><td>78.19</td><td>-7.17</td><td>-6.80</td></tr><tr><td>ZO</td><td>84.07</td><td>76.89</td><td>-7.92</td><td>-7.19</td></tr><tr><td>Sign</td><td>84.16</td><td>76.90</td><td>-7.95</td><td>-7.20</td></tr><tr><td>Conserve</td><td>83.82</td><td>76.76</td><td>-8.04</td><td>-7.07</td></tr><tr><td>-</td><td>Forward</td><td>82.84</td><td>76.32</td><td>-8.25</td><td>-10.84</td></tr></table>
|
| 567 |
+
|
| 568 |
+
adopt the FWT and BWT metrics to assess the overall performance of ZeroFlow. FWT (Forward Transfer) quantifies the average influence of prior knowledge on the learning of new tasks, while BWT (Backward Transfer) measures the average influence of learning new tasks on the performance of previously learned $K - 1$ tasks.
|
| 569 |
+
|
| 570 |
+
$$
|
| 571 |
+
\mathrm {F W T} = \frac {1}{K - 1} \sum_ {j = 2} ^ {K} \left(a _ {j, j} - \tilde {a} _ {j}\right), \quad \mathrm {B W T} = \frac {1}{K - 1} \sum_ {j = 1} ^ {K - 1} \left(a _ {K, j} - a _ {j, j}\right) \tag {10}
|
| 572 |
+
$$
|
| 573 |
+
|
| 574 |
+
Here, $a_{k,j}$ denotes the accuracy on task $j$ after training on the $k$ -th dataset, and $\tilde{a}_j$ represents the accuracy of a random initialized model trained only on dataset $\mathbb{D}_j$ .
|
ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a09ef8ec5e61327dfca90a9e11b3d572e3e48e6e0852b2788783022e4b76b50a
|
| 3 |
+
size 1162003
|
ICML/2025/ZeroFlow_ Overcoming Catastrophic Forgetting is Easier than You Think/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d67540535a32989274b18e7a0c32cc1343cd58412e84e8e40a4c9a8572ab511b
|
| 3 |
+
size 653993
|
ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/dc43e24f-9f75-429e-86a4-434685053d80_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:983c14ad17bf956a619be261d3f3867b5a69358cce6f69d7d21322ae31436ec0
|
| 3 |
+
size 68273
|
ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/dc43e24f-9f75-429e-86a4-434685053d80_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4844415273f6eb96672c66bce4a7d81149b8cd21dd62f9ad7ed72ca6bc1f67db
|
| 3 |
+
size 84573
|
ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/dc43e24f-9f75-429e-86a4-434685053d80_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2aeb808cc0a2e9394d7aaa6cc842574a20c2763ed8001f7cc9130b4f342b0dc
|
| 3 |
+
size 4846766
|
ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/full.md
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ZipAR: Parallel Autoregressive Image Generation through Spatial Locality
|
| 2 |
+
|
| 3 |
+
Yefei He $^{1,2}$ Feng Chen $^{3}$ Yuanyu He $^{1}$ Shaoxuan He $^{1}$ Hong Zhou $^{1}$ Kaipeng Zhang $^{2}$ Bohan Zhuang
|
| 4 |
+
|
| 5 |
+
Prompt: A bustling downtown street in Tokyo at night, with neon signs, sidewalks, and tall skyscrapers
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
Prompt: A mountain lake at sunrise, with mist rising off, and snow-capped peaks in the background
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
Figure 1: Up to $91\%$ forward step reduction with ZipAR. Samples are generated by Emu3-Gen model with next-token prediction paradigm (the first column) and ZipAR (the right three columns).
|
| 13 |
+
|
| 14 |
+
Steps: 8190
|
| 15 |
+
|
| 16 |
+
Steps: 2849 (-65%)
|
| 17 |
+
|
| 18 |
+
Steps: 1425 (-82%)
|
| 19 |
+
|
| 20 |
+
Steps: 713 $(-91\%)$
|
| 21 |
+
|
| 22 |
+
# Abstract
|
| 23 |
+
|
| 24 |
+
In this paper, we propose ZipAR, a training-free, plug-and-play parallel decoding framework for accelerating autoregressive (AR) visual generation. The motivation stems from the observation that images exhibit local structures, and spatially distant regions tend to have minimal interdependence. Given a partially decoded set of visual tokens, in addition to the original next-token prediction scheme in the row dimension, the tokens
|
| 25 |
+
|
| 26 |
+
<sup>1</sup>Zhejiang University, China <sup>2</sup>Shanghai AI Laboratory, China <sup>3</sup>The University of Adelaide, Australia. Correspondence to: Hong Zhou <zhuohong.zju@zju.edu.cn>, Kaipeng Zhang <kp_zhang@foxmail.com>.
|
| 27 |
+
|
| 28 |
+
Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
|
| 29 |
+
|
| 30 |
+
corresponding to spatially adjacent regions in the column dimension can be decoded in parallel. To ensure alignment with the contextual requirements of each token, we employ an adaptive local window assignment scheme with rejection sampling analogous to speculative decoding. By decoding multiple tokens in a single forward pass, the number of forward passes required to generate an image is significantly reduced, resulting in a substantial improvement in generation efficiency. Experiments demonstrate that ZipAR can reduce the number of model forward passes by up to $91\%$ on the Emu3-Gen model without requiring any additional retraining.
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
(a) Raster order AR Modeling
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
(b) Medusa/Jacobi
|
| 37 |
+
|
| 38 |
+

|
| 39 |
+
(c) MAR
|
| 40 |
+
Figure 2: (a) An overview of the training and decoding pipeline for autoregressive (AR) visual generation models. For models trained with a next-token prediction objective, each forward pass generates a single visual token. (b) Medusa (Cai et al., 2024) and Jacobi (Santilli et al., 2023) decoding predict multiple adjacent tokens in sequence order. (c) MAR (Li et al., 2024) predicts multiple tokens in a random order. (d) The proposed ZipAR predicts multiple spatially adjacent tokens.
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
(d)ZipAR
|
| 44 |
+
|
| 45 |
+
# 1. Introduction
|
| 46 |
+
|
| 47 |
+
Recent advancements in large language models (LLMs) with the "next-token prediction" paradigm (Achiam et al., 2023; Vavekanand & Sam, 2024; Team et al., 2023) have demonstrated remarkable capabilities in addressing text-related tasks. Building on these successes, many studies (Liu et al., 2024a; Wang et al., 2024b; Team, 2024; Ge et al., 2024; Wu et al., 2024a) have extended this paradigm to the generation of visual content, leading to the development of autoregressive (AR) visual generation models. These models not only produce high-fidelity images and videos that rival or even exceed the performance of state-of-the-art diffusion models but also facilitate unified multimodal understanding and generation (Wang et al., 2024a; Chen et al., 2025; Wu et al., 2024a;b). However, their slow generation speed remains a significant barrier to widespread adoption. To generate high-resolution images or videos, these models must sequentially produce thousands of visual tokens, requiring numerous forward passes and resulting in high latency.
|
| 48 |
+
|
| 49 |
+
To reduce the number of forward passes required for generating lengthy responses, several studies (Cai et al., 2024; Santilli et al., 2023; Chen et al., 2023) have proposed the "next-set prediction" paradigm for LLMs, as depicted in Figure 2(b). These approaches involves introducing multiple decoding heads (Cai et al., 2024) or small draft models (Chen et al., 2023), which generate several candidate tokens that are later evaluated by the original model. However, these methods incur additional costs, as they require extra draft models or the training of new decoding heads. Another approaches use the jacobi decoding methods (Santilli et al., 2023; Fu et al., 2024; Teng et al., 2024), iteratively updates sequences of tokens until convergence. However, in practice, the acceleration achieved by these methods is marginal, as LLMs often fail to generate correct tokens when errors exist in preceding ones. Furthermore, none of these approaches exploit the unique characteristics of visual content, and a parallel decoding framework specifically tailored for AR visual generation has yet to be developed.
|
| 50 |
+
|
| 51 |
+
In this paper, we introduce ZipAR, a parallel decoding framework designed to accelerate AR visual generation.
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
(a) Layer 10, Lumina-mGPT
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
(b) Layer 10, LlamaGen
|
| 58 |
+
Figure 3: The attention scores of visual tokens in the Lumina-mGPT-7B (Liu et al., 2024a) and LlamaGen-XL (Sun et al., 2024) models.Slash lines indicate that significant attention scores are allocated to tokens at fixed intervals, corresponding to tokens in the same column of previous rows. The full attention scores are presented by storing the attention scores of each visual token during decoding and concatenating them.
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
(c) Layer 20, LlamaGen
|
| 62 |
+
|
| 63 |
+
As depicted in Figure 2(a), common AR visual generation models produce visual tokens in a raster order, where the first token in a row cannot be generated until the last token in the preceding row is decoded despite their spatial separation. However, visual content inherently exhibits strong locality, which is a widely utilized inductive bias for visual tasks (Liu et al., 2021; Zhang et al., 2022; LeCun et al., 1989; Krizhevsky et al., 2012; Zeiler & Fergus, 2014). Specifically, there are significant spatial correlations between spatially adjacent tokens (e.g., token 5 and token 1 in Figure 2(a)) compared to tokens that are adjacent only in the generation order (e.g., token 5 and token 4), which makes the raster-order sequential dependency suboptimal. Empirical evidence, as shown in Figure 3, further supports this observation, with significant attention allocated to tokens in the same column of the previous row. This motivates us to propose decoding tokens from the next row without waiting for the full decoding of the current row, enabling the parallel decoding of multiple tokens in a single forward pass. Specifically, a predefined window size determines whether two tokens are spatially adjacent. Tokens outside this window in adjacent rows are considered irrelevant. Consequently, once the number of generated tokens in a row exceeds the window size, decoding of the next row begins in parallel with the current row. With an appropriately chosen window size, multiple rows can be decoded simultaneously. Unlike Medusa (Cai et al., 2024), which employs auxiliary heads, all tokens generated in parallel by ZipAR are produced using the original model head. Moreover, to address the limitation that manually tuned window size may not optimally adapt to varying attention distributions across tokens, we introduce an adaptive window size assignment scheme. This scheme dynamically adjusts the local window size during generation, ensuring that each token is generated with a window size tailored
|
| 64 |
+
|
| 65 |
+
to its contextual requirements. As a result, ZipAR can be seamlessly implemented in a training-free, plug-and-play manner for autoregressive visual generation models, without introducing additional overhead. Experiments across multiple autoregressive visual generation models demonstrate the effectiveness and robustness of ZipAR, achieving forward steps reductions of $91\%$ , $75\%$ , and $81\%$ on Emu3-Gen, Lumina-mGPT-7B, and LlamaGen-XL models, respectively, with minimal degradation in image quality.
|
| 66 |
+
|
| 67 |
+
In summary, our contributions are as follows:
|
| 68 |
+
|
| 69 |
+
- We propose a spatially-aware parallel decoding strategy that enables inter-row token generation by leveraging the inherent spatial locality of visual content. Once the number of generated tokens in a row exceeds a window size, decoding of the next row begins in parallel.
|
| 70 |
+
- We propose an adaptive window size assignment scheme that dynamically adjusts the local window size for each token during generation, optimizing decoding efficiency while ensuring the contextual information essential for producing high-quality tokens.
|
| 71 |
+
- By integrating these techniques, we present ZipAR, a training-free, plug-and-play framework that achieves significant acceleration in autoregressive visual generation. Extensive experiments demonstrate its effectiveness and robustness across multiple AR visual generation models.
|
| 72 |
+
|
| 73 |
+
# 2. Related Work
|
| 74 |
+
|
| 75 |
+
# 2.1. Autoregressive Visual Generation
|
| 76 |
+
|
| 77 |
+
The success of Transformer models in text-based tasks has inspired studies (Van Den Oord et al., 2017; Esser et al.,
|
| 78 |
+
|
| 79 |
+
2021; Yu et al., 2023) to apply autoregressive modeling to visual content generation. These methods can be classified into two main categories: GPT-style approaches that utilize the next-token prediction paradigm (Esser et al., 2021; Wang et al., 2024b; Liu et al., 2024a; Sun et al., 2024) and BERT-style approaches that employ masked prediction models (Chang et al., 2022; 2023; Li et al., 2024; Yu et al., 2023). More recently, VAR (Tian et al., 2024) modified the traditional next-token prediction paradigm to next-scale prediction, resulting in faster sampling speeds. Models trained using next-token prediction can leverage the infrastructure and training techniques of large language models (LLMs) and pave the way towards unified multi-modal understanding and generation. However, they are generally less efficient during sampling compared to models that predict multiple tokens in a single forward pass. In this paper, we focus on accelerating visual generation models trained with the next-token prediction objective, hereafter referred to as autoregressive visual generation models unless otherwise specified.
|
| 80 |
+
|
| 81 |
+
# 2.2. Efficient Decoding of LLMs.
|
| 82 |
+
|
| 83 |
+
Efforts to reduce the number of forward passes required for LLMs to generate lengthy responses can be broadly categorized into two main approaches. The first approach involves sampling multiple candidate tokens before verifying them with the base LLM. Speculative decoding (Chen et al., 2023; Liu et al., 2024b; Spector & Re, 2023; Gui et al., 2024) utilizes a small draft LLM to generate candidate tokens, which are then verified in parallel by the base LLM. While this approach can potentially generate multiple tokens in a single evaluation, deploying multiple models introduces significant memory overhead and engineering challenges. Medusa (Cai et al., 2024) addresses this by employing multiple decoding heads for the base LLM, enabling self-speculation. However, due to the large vocabulary size of LLMs, the parameters in each decoding head can be substantial. The second approach, Jacobi decoding (Santilli et al., 2023; Teng et al., 2024), involves randomly guessing the next n tokens in a sequence, which are iteratively updated by the LLMs. Over time, the n-token sequence converges to the same output as that generated by the next-token prediction paradigm. However, in practice, vanilla Jacobi decoding offers only marginal speedup over autoregressive decoding. This limited improvement is largely due to the causal attention mechanism, which rarely produces a correct token when preceding tokens are incorrect. Lookahead (Fu et al., 2024) decoding enhances efficiency by leveraging n-grams generated from previous Jacobi iterations, which are verified in parallel during the decoding process. CLLMs (Kou et al., 2024) further improves the efficiency of Jacobi decoding by fine-tuning the model with a consistency loss, requiring it to map arbitrary
|
| 84 |
+
|
| 85 |
+
points on the Jacobi trajectory to a fixed point. However, none of these approaches are designed for autoregressive visual generation or incorporate visual inductive biases. In contrast, the proposed ZipAR takes advantage of the spatial locality inherent in visual content, offering significant acceleration without the need for retraining. Moreover, ZipAR is orthogonal to the aforementioned methods, and can be combined with them to achieve even greater acceleration.
|
| 86 |
+
|
| 87 |
+
# 3. Method
|
| 88 |
+
|
| 89 |
+
# 3.1. Preliminaries
|
| 90 |
+
|
| 91 |
+
Autoregressive (AR) visual generation models with the next-token prediction paradigm have shown exceptional versatility across various vision-language tasks, including generating high-quality images and videos. As shown in Figure 2(a), pre-trained VQ-VAE models (Van Den Oord et al., 2017; Esser et al., 2021) are commonly employed to convert images or videos into visual tokens. The process begins with a visual encoder that extracts feature maps at a reduced spatial resolution. These feature maps are then subjected to vector quantization to produce discrete latent representations, known as visual tokens. These tokens are arranged in a one-dimensional sequence to serve as input for AR models. Although various methods exist to flatten these tokens, the row-major order (raster order) is empirically validated to offer the best performance (Esser et al., 2021), making it the prevalent method for visual generation. During the image generation phase, AR models generate visual tokens sequentially in this raster order. Finally, the complete sequence of visual tokens is rearranged into a two-dimensional structure and processed through a visual decoder to reconstruct the images.
|
| 92 |
+
|
| 93 |
+
# 3.2. Inference with ZipAR
|
| 94 |
+
|
| 95 |
+
As analyzed in Section 3.1, AR visual generation models with a raster order generate visual tokens row by row, completing each row sequentially from left to right before proceeding to the next. However, images inherently exhibit strong spatial locality. Intuitively, in a high-resolution image, the starting pixel of a row is more closely related to the starting pixel of the preceding row than to the ending pixel of the preceding row due to their spatial proximity. Empirical evidence, as shown in Figure 3, also indicates that significant attention scores are allocated to tokens within the same column of the previous row. Building on these observations, we propose ZipAR, a simple yet effective parallel decoding framework for autoregressive visual generation models. Unlike conventional parallel decoding methods that predict multiple consecutive tokens in a single forward pass, our approach decodes tokens from different rows in parallel. The key idea is that it is unnecessary to wait for an entire row to be generated before initiating the decoding of the
|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
Figure 4: A toy example of the ZipAR framework. The window size is set to 2 in this toy example.
|
| 99 |
+
|
| 100 |
+
next row, as spatially distant tokens contribute minimally to attention scores.
|
| 101 |
+
|
| 102 |
+
To formalize this, we define a local window size $s$ . Given the tokens $x_{i,j}$ located in row $i$ and column $j$ , we assume that tokens beyond $x_{i-1,j+s}$ in the previous row have a negligible impact on the generation of $x_{i,j}$ based on the spatial locality of visual tokens. Consequently, the criterion for initiating the generation of token $x_{i,j}$ can be formulated as:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
C (i, j) = \left\{ \begin{array}{l l} 1, & \text {i f} \left\{x _ {i - 1, k} \mid j \leq k < j + s \right\} \subseteq \mathbb {D} \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {1}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
Here, $\mathbb{D}$ denotes the set of decoded tokens, and $C(i,j) = 1$ indicates that token $x_{i,j}$ is ready to be generated. Once the first token in a row is generated, subsequent tokens in the row can be generated sequentially, along with the unfinished portion of the preceding row, following a next-token prediction paradigm. An illustration of the ZipAR framework with a window size of 2 is shown in Figure 4.
|
| 109 |
+
|
| 110 |
+
However, to initiate the decoding of the first token $x_{i,0}$ in row $i$ , the last token of the row $i - 1$ is required as input to the autoregressive model, despite it has not yet been generated in the ZipAR framework. To address this, we propose several solutions tailored to different types of AR visual generation models. Some methods (Liu et al., 2024a; Wang et al., 2024b) support generating images with dynamic resolutions, typically by appending extra end-of-row tokens at the end of each row. With these special tokens placed at fixed positions, we can insert the end-of-row tokens in advance when initiating the generation of the next row. Since the values of these tokens are predetermined, there is no need to update them subsequently. Conversely, for models that lack end-of-row tokens (Sun et al., 2024), we temporarily assign values to the last token in row $i - 1$ to decode token $x_{i,0}$ . This value can be derived from the most spatially adjacent token that have been decoded.
|
| 111 |
+
|
| 112 |
+

|
| 113 |
+
Figure 5: The local window size required to retain $95\%$ of attention scores across different rows and input prompt. Data is collected from the first token of each row in LuminamGPT-7B model with input prompt from COCO (Lin et al., 2014) and Parti (Yu et al., 2022) dataset.
|
| 114 |
+
|
| 115 |
+
# 3.3. Adaptive Window Size Assignment
|
| 116 |
+
|
| 117 |
+
While ZipAR with a predefined local window size demonstrates improved efficiency, the window size remains a hyperparameter that requires manual tuning to balance image fidelity and generation efficiency. Moreover, using a fixed window size for all token positions is suboptimal, as the attention distributions vary significantly across tokens. As illustrated in Figure 5, the local window size needed to retain $95\%$ of attention scores differs across token positions and input prompts. Consequently, maintaining a fixed window size throughout the image generation process can lead to suboptimal results, potentially compromising image fidelity.
|
| 118 |
+
|
| 119 |
+
To address this, we propose an adaptive window size assignment scheme that dynamically adjusts the local window size during the generation process. Given a minimum window size $s_{min}$ , after generating token $x_{i,s_{min}-1}$ in row $i$ , we attempt to generate the first token in row $i+1$ . Unlike the
|
| 120 |
+
|
| 121 |
+
fixed window size approach, we do not immediately accept this newly generated token, as the current local window size may provide insufficient information. Instead, in the subsequent step, with the addition of a new token from the previous row, we regenerate the token using a slightly larger window size $s_{\text{min}} + 1$ and apply an acceptance criterion to evaluate its validity based on the predictions from both steps. If the criterion is satisfied, subsequent tokens in row $i + 1$ can be generated sequentially, following a next-token prediction paradigm. Otherwise, the current window size is deemed inadequate, and we iteratively expand it until the criterion is met or the previous row is fully generated.
|
| 122 |
+
|
| 123 |
+
Specifically, we adopt a rejection sampling scheme analogous to speculative decoding (Leviathan et al., 2023; Chen et al., 2023). For consecutive window sizes $k + 1$ and $k$ in row $i$ , we compute the ratio between their predictions $p(x|x_{0,0},\dots,x_{i,k})$ and $p(x|x_{0,0},\dots,x_{i,k - 1})$ , which quantifies how well the token sampled under the smaller window size. Formally, the criterion for initiating the generation of token $x_{i + 1,0}$ with window size $k$ can be formulated as:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\tilde {C} (i + 1, 0) = \left\{ \begin{array}{l l} 1, & \text {i f} r < \min \left(1, \frac {p \left(x \mid x _ {0 , 0} , \dots , x _ {i , k}\right)}{p \left(x \mid x _ {0 , 0} , \dots , x _ {i , k - 1}\right)}\right), \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {2}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
Here, we sample $r \sim U[0,1]$ from a uniform distribution. $\tilde{C}(i + 1,0) = 1$ indicates that token $x_{i + 1,0}$ is ready to be generated. If the criterion is not met, we resample $x_{i + 1,0}$ from the following distribution:
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
x _ {i + 1, 0} \sim \frac {\operatorname* {m a x} (0 , p (x | x _ {0 , 0} , \dots , x _ {i , k}) - p (x | x _ {0 , 0} , \dots , x _ {i , k - 1}))}{\sum_ {x} \operatorname* {m a x} (0 , p (x | x _ {0 , 0} , \dots , x _ {i , k}) - p (x | x _ {0 , 0} , \dots , x _ {i , k - 1})} \tag {3}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
The resampled token is subsequently verified in the next step.
|
| 136 |
+
|
| 137 |
+
# 4. Experiments
|
| 138 |
+
|
| 139 |
+
# 4.1. Implementation Details
|
| 140 |
+
|
| 141 |
+
To assess the effectiveness of our proposed method, we integrate it with three state-of-the-arts autoregressive visual generation models: LlamaGen (Sun et al., 2024), LuminamGPT (Liu et al., 2024a) and Emu3-Gen (Wang et al., 2024b). All experiments are conducted with Nvidia A100 GPUs and Pytorch framework. For class-conditional image generation with LlamaGen on ImageNet, we report the widely adopted Frechet Inception Distance (FID) to evaluate the performance. We sample 50000 images and evaluate them with ADM's TensorFlow evaluation suite (Dhariwal & Nichol, 2021).
|
| 142 |
+
|
| 143 |
+
# 4.2. Main Results
|
| 144 |
+
|
| 145 |
+
# 4.2.1. CLASS-CONDITIONAL IMAGE GENERATION
|
| 146 |
+
|
| 147 |
+
In this subsection, we quantitatively evaluate the performance of class-conditional image generation on the ImageNet $256 \times 256$ benchmark using the LlamaGen model, as summarized in Table 1. The model processes a $24 \times 24$ feature map and requires 576 forward passes to generate an image under the next-token prediction (NTP) paradigm. For the LlamaGen-L model, integrating ZipAR with a minimal window size of 16 reduces the number of forward passes by $26.7\%$ without increasing the FID score. For the LlamaGenXL model, ZipAR-12 achieves a lower FID (3.67 vs. 3.87) while requiring fewer steps than the previous parallel decoding algorithm, SJD (Teng et al., 2024) (331 steps vs. 335 steps). This highlights the efficiency of ZipAR in decoding spatially adjacent tokens in parallel.
|
| 148 |
+
|
| 149 |
+
# 4.2.2. TEXT-GUIDED IMAGE GENERATION
|
| 150 |
+
|
| 151 |
+
In this subsection, we expand our evaluation by assessing ZipAR's performance using multiple metrics, including VQAScore (Lin et al., 2024), Human Preference Score v2 (HPSv2) (Wu et al., 2023), ImageReward (Xu et al., 2023), and Aesthetic Score, across three models: LlamaGen-XL512, Lumina-mGPT-768, and Lumina-mGPT-1024, as presented in Table 2. For the LlamaGen-XL model, ZipAR-15 reduces the number of generation steps by $45.1\%$ without any decline in the VQAScore, Image Reward and Aesthetic Score. Similarly, for the Lumina-mGPT-768 model, ZipAR-20 achieves a $54.8\%$ reduction in generation steps while improving VQAScore, HPSv2, and Aesthetic Score. When evaluating the CLIP Score over the LlamaGen-XL model, compared to the previous parallel decoding algoithm, SJD (Teng et al., 2024), ZipAR-7 significantly improves efficiency (324 steps vs. 635 steps) while achieving a higher CLIP score (0.285 vs. 0.283). Moreover, we observe that the acceleration ratio for both text-to-image models is higher than that for the class-conditional LlamaGen-L model. This is primarily attributed to the larger spatial resolution of the feature maps and the generated images. These results suggest that ZipAR provides greater efficiency gains when generating higher-resolution images.
|
| 152 |
+
|
| 153 |
+
# 4.3. Ablation Study
|
| 154 |
+
|
| 155 |
+
# 4.3.1. EFFECT OF ADAPTIVE WINDOW SIZE ASSIGNMENT
|
| 156 |
+
|
| 157 |
+
In this subsection, we evaluate the effectiveness of the proposed adaptive window size assignment scheme. Specifically, we compare the performance of ZipAR with fixed and adaptive window sizes over class-conditional LlamaGenL model, respectively. As shown in Figure 6, under similar generation steps, ZipAR with adaptive window size
|
| 158 |
+
|
| 159 |
+
Table 1: Quantitative evaluation on ImageNet $256 \times 256$ benchmark. The generated images are $384 \times 384$ and resized to $256 \times 256$ for evaluation. Here, "NTP" denotes the next-token prediction paradigm. "ZipAR- $n$ " denotes the ZipAR paradigm with a minimal window size of $n$ . "Step" is the number of model forward passes required to generate an image. The latency is measured with a batch size of 1.
|
| 160 |
+
|
| 161 |
+
<table><tr><td>Model</td><td>Method</td><td>Step</td><td>Latency (s)</td><td>FID↓</td></tr><tr><td rowspan="5">LlamaGen-L (cfg=2.0)</td><td>NTP</td><td>576</td><td>15.20</td><td>3.16</td></tr><tr><td>SJD (Teng et al., 2024)</td><td>367 (-36.3%)</td><td>10.83 (-28.8%)</td><td>3.85</td></tr><tr><td>ZipAR-16</td><td>422 (-26.7%)</td><td>11.31 (-25.6%)</td><td>3.14</td></tr><tr><td>ZipAR-14</td><td>378 (-34.4%)</td><td>10.16 (-33.2%)</td><td>3.44</td></tr><tr><td>ZipAR-12</td><td>338 (-41.3%)</td><td>9.31 (-38.8%)</td><td>3.96</td></tr><tr><td rowspan="5">LlamaGen-XL (cfg=2.0)</td><td>NTP</td><td>576</td><td>22.65</td><td>2.83</td></tr><tr><td>SJD (Teng et al., 2024)</td><td>335 (-41.8%)</td><td>13.17 (-41.8%)</td><td>3.87</td></tr><tr><td>ZipAR-16</td><td>423 (-26.6%)</td><td>16.46 (-27.3%)</td><td>2.87</td></tr><tr><td>ZipAR-14</td><td>378 (-34.4%)</td><td>14.89 (-34.3%)</td><td>3.16</td></tr><tr><td>ZipAR-12</td><td>331 (-41.8%)</td><td>13.17 (-41.8%)</td><td>3.67</td></tr></table>
|
| 162 |
+
|
| 163 |
+
Table 2: Quantitative results on diverse automatic evaluation approaches. Here, "NTP" denotes the next-token prediction paradigm. "ZipAR-n" denotes the ZipAR paradigm with a minimal window size of $n$ . "Step" is the number of model forward passes required to generate an image.
|
| 164 |
+
|
| 165 |
+
<table><tr><td>Model</td><td>Method</td><td>Steps</td><td>VQAScore↑</td><td>HPSv2↑</td><td>Image Reward↑</td><td>Aesthetic Score↑</td></tr><tr><td rowspan="5">LlamaGen-XL-512</td><td>NTP</td><td>1024</td><td>0.6439</td><td>0.2647</td><td>-0.0818</td><td>5.38</td></tr><tr><td>ZipAR-15</td><td>562</td><td>0.6534</td><td>0.2637</td><td>-0.0690</td><td>5.39</td></tr><tr><td>ZipAR-11</td><td>451</td><td>0.6581</td><td>0.2630</td><td>-0.0982</td><td>5.37</td></tr><tr><td>ZipAR-7</td><td>324</td><td>0.6410</td><td>0.2625</td><td>-0.1683</td><td>5.33</td></tr><tr><td>ZipAR-3</td><td>185</td><td>0.6343</td><td>0.2599</td><td>-0.3121</td><td>5.32</td></tr><tr><td rowspan="5">Lumina-mGPT-768</td><td>NTP</td><td>2352</td><td>0.6579</td><td>0.2743</td><td>0.4164</td><td>6.10</td></tr><tr><td>ZipAR-20</td><td>1063</td><td>0.6595</td><td>0.2747</td><td>0.3971</td><td>6.13</td></tr><tr><td>ZipAR-17</td><td>915</td><td>0.6433</td><td>0.2732</td><td>0.3049</td><td>6.12</td></tr><tr><td>ZipAR-14</td><td>740</td><td>0.6589</td><td>0.2739</td><td>0.3646</td><td>6.10</td></tr><tr><td>ZipAR-11</td><td>588</td><td>0.6490</td><td>0.2730</td><td>0.2861</td><td>6.10</td></tr><tr><td rowspan="5">Lumina-mGPT-1024</td><td>NTP</td><td>4160</td><td>0.6718</td><td>0.2762</td><td>0.4232</td><td>5.97</td></tr><tr><td>ZipAR-20</td><td>1331</td><td>0.6705</td><td>0.2761</td><td>0.3913</td><td>5.95</td></tr><tr><td>ZipAR-17</td><td>1150</td><td>0.6797</td><td>0.2761</td><td>0.4018</td><td>5.94</td></tr><tr><td>ZipAR-14</td><td>964</td><td>0.6732</td><td>0.2747</td><td>0.3298</td><td>5.94</td></tr><tr><td>ZipAR-11</td><td>772</td><td>0.6723</td><td>0.2746</td><td>0.3222</td><td>5.95</td></tr></table>
|
| 166 |
+
|
| 167 |
+
consistently achieves a lower FID than its fixed-window counterpart, which suggests that dynamically adjusting the window size based on token position and context enhances the fidelity of generated images.
|
| 168 |
+
|
| 169 |
+
# 4.3.2. IMPACT ON OPTIMAL SAMPLING HYPERPARAMETERS
|
| 170 |
+
|
| 171 |
+
As presented in Tables 4-5, we performed a grid search to determine the optimal token-sampling hyperparameters, namely, sampling temperature and classifier-free guidance scale, for ZipAR. The results are shown below. These results indicate that ZipAR sampling does not alter the optimal sampling temperature and classifier-free guidance scale.
|
| 172 |
+
|
| 173 |
+
# 4.4. Qualitative Visualizations
|
| 174 |
+
|
| 175 |
+
In this subsection, we present non-cherry-picked visualizations of images generated using the next-token prediction
|
| 176 |
+
|
| 177 |
+
(NTP) paradigm and the proposed ZipAR framework over Emu3-Gen (Wang et al., 2024b) and Lumina-mGPT-7B (Liu et al., 2024a), as shown in Figures 1 and 7. Notably, ZipAR can reduce the number of model forward steps by up to $91\%$ for Emu3-Gen, while still producing high-fidelity images rich in semantic information.
|
| 178 |
+
|
| 179 |
+
# 5. Conclusion
|
| 180 |
+
|
| 181 |
+
In this paper, we have proposed ZipAR, a new parallel decoding framework designed to accelerate autoregressive visual generation. ZipAR leverages the spatial locality inherent in visual content and predicts multiple spatially adjacent visual tokens in a single model forward pass, thereby significantly enhancing generation efficiency compared to the traditional next-token-prediction paradigm. An adaptive local window assignment scheme with rejection sampling is employed, ensuring that each token is generated with
|
| 182 |
+
|
| 183 |
+
Table 3: Quantitative evaluation on MS-COCO dataset. Here, "NTP" denotes the next-token prediction paradigm. "ZipAR-n" denotes the ZipAR paradigm with a minimal window size of $n$ . "Step" is the number of model forward passes required to generate an image. The latency is measured with a batch size of 1.
|
| 184 |
+
|
| 185 |
+
<table><tr><td>Model</td><td>Method</td><td>Step</td><td>Latency (s)</td><td>CLIP Score↑</td></tr><tr><td rowspan="6">LlamaGen-XL-512</td><td>NTP</td><td>1024</td><td>33.17</td><td>0.287</td></tr><tr><td>SJD (Teng et al., 2024)</td><td>635 (-38.0%)</td><td>24.80 (-25.2%)</td><td>0.283</td></tr><tr><td>ZipAR-15</td><td>562 (-45.1%)</td><td>18.98 (-42.7%)</td><td>0.287</td></tr><tr><td>ZipAR-11</td><td>451 (-55.9%)</td><td>14.65 (-55.8%)</td><td>0.286</td></tr><tr><td>ZipAR-7</td><td>324 (-68.4%)</td><td>10.24 (-69.1%)</td><td>0.285</td></tr><tr><td>ZipAR-3</td><td>185 (-81.9%)</td><td>5.86 (-82.3%)</td><td>0.281</td></tr><tr><td rowspan="6">Luming-mGPT-7B-768</td><td>NTP</td><td>2352</td><td>91.70</td><td>0.313</td></tr><tr><td>SJD (Teng et al., 2024)</td><td>1054 (-55.2%)</td><td>60.27 (-34.2%)</td><td>0.313</td></tr><tr><td>ZipAR-20</td><td>1063 (-54.8%)</td><td>63.28 (-31.0%)</td><td>0.314</td></tr><tr><td>ZipAR-17</td><td>915 (-61.0%)</td><td>58.54 (-36.2%)</td><td>0.314</td></tr><tr><td>ZipAR-14</td><td>740 (-68.5%)</td><td>53.41 (-41.8%)</td><td>0.313</td></tr><tr><td>ZipAR-11</td><td>588 (-75.0%)</td><td>50.32 (-45.1%)</td><td>0.312</td></tr></table>
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
Figure 6: Performance comparisons of ZipAR over class-conditional LlamaGen-L model with fixed window size and adaptive window size. Under similar step budget, ZipAR with adaptive window size always achieves lower FID.
|
| 189 |
+
|
| 190 |
+
sufficient contextual information. Extensive experiments demonstrate that ZipAR can reduce the number of model forward steps by up to $91\%$ on the Emu3-Gen model with minimal impact on image quality.
|
| 191 |
+
|
| 192 |
+
In the future, we anticipate that integrating ZipAR with other methods that employ the next-set-prediction paradigm, such as Medusa (Cai et al., 2024) and Jacobi decoding (Santilli et al., 2023), will further enhance acceleration ratios.
|
| 193 |
+
|
| 194 |
+
# Acknowledgements
|
| 195 |
+
|
| 196 |
+
This work was supported by the National Key Research and Development Program of China (2022YFC3602601) and the National Key Research and Development Program of China (2022ZD0160102).
|
| 197 |
+
|
| 198 |
+
Table 4: The performance of LlamaGen and ZipAR under different classifier-free guidance. Here, “*” denotes the results obtained from LlamaGen's paper.
|
| 199 |
+
|
| 200 |
+
<table><tr><td>Model</td><td>Classifier-free Guidance</td><td>FID↓</td></tr><tr><td rowspan="4">LlamaGen-L*</td><td>1.5</td><td>4.74</td></tr><tr><td>1.75</td><td>3.15</td></tr><tr><td>2.0</td><td>3.07</td></tr><tr><td>2.25</td><td>3.62</td></tr><tr><td rowspan="4">ZipAR-16</td><td>1.5</td><td>6.18</td></tr><tr><td>1.75</td><td>3.72</td></tr><tr><td>2.0</td><td>3.14</td></tr><tr><td>2.25</td><td>3.44</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Table 5: The performance of LlamaGen and ZipAR under different sampling temperatures. Here, ${}^{\prime \prime } * {}^{\prime \prime }$ denotes the results obtained from LlamaGen's paper.
|
| 203 |
+
|
| 204 |
+
<table><tr><td>Model</td><td>Temperature</td><td>FID↓</td></tr><tr><td rowspan="4">LlamaGen-L</td><td>0.96</td><td>3.53</td></tr><tr><td>0.98</td><td>3.24</td></tr><tr><td>1.0*</td><td>3.07</td></tr><tr><td>1.02</td><td>3.14</td></tr><tr><td rowspan="4">ZipAR-16</td><td>0.96</td><td>3.35</td></tr><tr><td>0.98</td><td>3.25</td></tr><tr><td>1.0</td><td>3.14</td></tr><tr><td>1.02</td><td>3.34</td></tr></table>
|
| 205 |
+
|
| 206 |
+
# Impact Statement
|
| 207 |
+
|
| 208 |
+
The proposed ZipAR framework stands out for its high efficiency, which carry significant implications in reducing the carbon emissions attributed to the widespread deployment of deep generative models. However, similar to other deep generative models, ZipAR has the potential to be utilized for producing counterfeit images and videos for malicious purposes.
|
| 209 |
+
|
| 210 |
+
Prompt: image of a dog playing water, and a water fall is in the background
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
|
| 214 |
+
Prompt: Image of a magical fairy tale castle on a hilltop surrounded by a mystical forest
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+
Prompt: A bustling downtown street in Tokyo at night, with neon signs, sidewalks, and tall skyscrapers
|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
|
| 222 |
+
Prompt: A serene beach at sunset, with palm trees swaying in the breeze and a sailboat in the distance
|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
|
| 226 |
+
Prompt: A medieval knight standing guard in front of a grand castle, with a dragon flying overhead
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Figure 7: Samples generated by the Lumina-mGPT-7B-768 model with next-token prediction paradigm (the first column) and ZipAR under different configurations (the right three columns). The classifier-free guidance is set to 3.
|
| 230 |
+
|
| 231 |
+
Steps: 2355
|
| 232 |
+
|
| 233 |
+
Steps: 1508 (-36%)
|
| 234 |
+
|
| 235 |
+
Steps: 1132 (-52%)
|
| 236 |
+
|
| 237 |
+
Steps: 756 (-67%)
|
| 238 |
+
|
| 239 |
+
# References
|
| 240 |
+
|
| 241 |
+
Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., Aleman, F. L., Almeida, D., Altenschmidt, J., Altman, S., Anadkat, S., et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
|
| 242 |
+
Cai, T., Li, Y., Geng, Z., Peng, H., Lee, J. D., Chen, D., and Dao, T. Medusa: Simple llm inference acceleration framework with multiple decoding heads. arXiv preprint arXiv:2401.10774, 2024.
|
| 243 |
+
Chang, H., Zhang, H., Jiang, L., Liu, C., and Freeman, W. T. Maskgit: Masked generative image transformer. In CVPR, 2022.
|
| 244 |
+
Chang, H., Zhang, H., Barber, J., Maschinot, A., Lezama, J., Jiang, L., Yang, M.-H., Murphy, K., Freeman, W. T., Rubinstein, M., et al. Muse: Text-to-image generation via masked generative transformers. arXiv preprint arXiv:2301.00704, 2023.
|
| 245 |
+
Chen, C., Borgeaud, S., Irving, G., Lespiau, J.-B., Sifre, L., and Jumper, J. Accelerating large language model decoding with speculative sampling. arXiv preprint arXiv:2302.01318, 2023.
|
| 246 |
+
Chen, X., Wu, Z., Liu, X., Pan, Z., Liu, W., Xie, Z., Yu, X., and Ruan, C. Janus-pro: Unified multimodal understanding and generation with data and model scaling, 2025.
|
| 247 |
+
Dhariwal, P. and Nichol, A. Diffusion models beat gans on image synthesis. Advances in neural information processing systems, 2021.
|
| 248 |
+
Esser, P., Rombach, R., and Ommer, B. Taming transformers for high-resolution image synthesis. In CVPR, 2021.
|
| 249 |
+
Fu, Y., Bailis, P., Stoica, I., and Zhang, H. Break the sequential dependency of llm inference using lookahead decoding. arXiv preprint arXiv:2402.02057, 2024.
|
| 250 |
+
Ge, Y., Zhao, S., Zhu, J., Ge, Y., Yi, K., Song, L., Li, C., Ding, X., and Shan, Y. Seed-x: Multimodal models with unified multi-granularity comprehension and generation. arXiv preprint arXiv:2404.14396, 2024.
|
| 251 |
+
Gui, L., Xiao, B., Su, L., and Chen, W. Boosting lossless speculative decoding via feature sampling and partial alignment distillation. arXiv preprint arXiv:2408.15562, 2024.
|
| 252 |
+
Kou, S., Hu, L., He, Z., Deng, Z., and Zhang, H. Cllms: Consistency large language models. arXiv preprint arXiv:2403.00835, 2024.
|
| 253 |
+
Krizhevsky, A., Sutskever, I., and Hinton, G. E. Imagenet classification with deep convolutional neural networks. NeurIPS, 2012.
|
| 254 |
+
|
| 255 |
+
LeCun, Y., Boser, B., Denker, J., Henderson, D., Howard, R., Hubbard, W., and Jackel, L. Handwritten digit recognition with a back-propagation network. NeurIPS, 1989.
|
| 256 |
+
Leviathan, Y., Kalman, M., and Matias, Y. Fast inference from transformers via speculative decoding. In ICML, 2023.
|
| 257 |
+
Li, T., Tian, Y., Li, H., Deng, M., and He, K. Autoregressive image generation without vector quantization. arXiv preprint arXiv:2406.11838, 2024.
|
| 258 |
+
Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dollar, P., and Zitnick, C. L. Microsoft coco: Common objects in context. In ECCV, 2014.
|
| 259 |
+
Lin, Z., Pathak, D., Li, B., Li, J., Xia, X., Neubig, G., Zhang, P., and Ramanan, D. Evaluating text-to-visual generation with image-to-text generation, 2024. URL https://arxiv.org/abs/2404.01291.
|
| 260 |
+
Liu, D., Zhao, S., Zhuo, L., Lin, W., Qiao, Y., Li, H., and Gao, P. Lumina-mgpt: Illuminate flexible photorealistic text-to-image generation with multimodal generative pretraining. arXiv preprint arXiv:2408.02657, 2024a.
|
| 261 |
+
Liu, X., Hu, L., Bailis, P., Cheung, A., Deng, Z., Stoica, I., and Zhang, H. Online speculative decoding. In ICML, 2024b.
|
| 262 |
+
Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., and Guo, B. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, 2021.
|
| 263 |
+
Santilli, A., Severino, S., Postolache, E., Maiorca, V., Mancusi, M., Marin, R., and Rodola, E. Accelerating transformer inference for translation via parallel decoding. arXiv preprint arXiv:2305.10427, 2023.
|
| 264 |
+
Spector, B. and Re, C. Accelerating llm inference with staged speculative decoding. arXiv preprint arXiv:2308.04623, 2023.
|
| 265 |
+
Sun, P., Jiang, Y., Chen, S., Zhang, S., Peng, B., Luo, P., and Yuan, Z. Autoregressive model beats diffusion: Llama for scalable image generation. arXiv preprint arXiv:2406.06525, 2024.
|
| 266 |
+
Team, C. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024.
|
| 267 |
+
Team, G., Anil, R., Borgeaud, S., Wu, Y., Alayrac, J.-B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A. M., Hauth, A., et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023.
|
| 268 |
+
Teng, Y., Shi, H., Liu, X., Ning, X., Dai, G., Wang, Y., Li, Z., and Liu, X. Accelerating auto-regressive text-to-image generation with training-free speculative jacobi decoding. arXiv preprint arXiv:2410.01699, 2024.
|
| 269 |
+
|
| 270 |
+
Tian, K., Jiang, Y., Yuan, Z., Peng, B., and Wang, L. Visual autoregressive modeling: Scalable image generation via next-scale prediction. arXiv preprint arXiv:2404.02905, 2024.
|
| 271 |
+
Van Den Oord, A., Vinyals, O., et al. Neural discrete representation learning. NeurIPS, 2017.
|
| 272 |
+
Vavekanand, R. and Sam, K. Llama 3.1: An in-depth analysis of the next-generation large language model, 2024.
|
| 273 |
+
Wang, C., Lu, G., Yang, J., Huang, R., Han, J., Hou, L., Zhang, W., and Xu, H. Illume: Illuminating your llms to see, draw, and self-enhance. arXiv preprint arXiv:2412.06673, 2024a.
|
| 274 |
+
Wang, X., Zhang, X., Luo, Z., Sun, Q., Cui, Y., Wang, J., Zhang, F., Wang, Y., Li, Z., Yu, Q., et al. Emu3: Next-token prediction is all you need. arXiv preprint arXiv:2409.18869, 2024b.
|
| 275 |
+
Wu, C., Chen, X., Wu, Z., Ma, Y., Liu, X., Pan, Z., Liu, W., Xie, Z., Yu, X., Ruan, C., et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. arXiv preprint arXiv:2410.13848, 2024a.
|
| 276 |
+
Wu, J., Jiang, Y., Ma, C., Liu, Y., Zhao, H., Yuan, Z., Bai, S., and Bai, X. Liquid: Language models are scalable multimodal generators. arXiv preprint arXiv:2412.04332, 2024b.
|
| 277 |
+
Wu, X., Hao, Y., Sun, K., Chen, Y., Zhu, F., Zhao, R., and Li, H. Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis. arXiv preprint arXiv:2306.09341, 2023.
|
| 278 |
+
Xu, J., Liu, X., Wu, Y., Tong, Y., Li, Q., Ding, M., Tang, J., and Dong, Y. Imagereward: learning and evaluating human preferences for text-to-image generation. In NeurIPS, 2023.
|
| 279 |
+
Yu, J., Xu, Y., Koh, J. Y., Luong, T., Baid, G., Wang, Z., Vasudevan, V., Ku, A., Yang, Y., Ayan, B. K., et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3):5, 2022.
|
| 280 |
+
Yu, L., Cheng, Y., Sohn, K., Lezama, J., Zhang, H., Chang, H., Hauptmann, A. G., Yang, M.-H., Hao, Y., Essa, I., et al. Magvit: Masked generative video transformer. In CVPR, 2023.
|
| 281 |
+
Zeiler, M. D. and Fergus, R. Visualizing and understanding convolutional networks. In ECCV, 2014.
|
| 282 |
+
Zhang, Z., Zhang, H., Zhao, L., Chen, T., Arik, S. Ö., and Pfister, T. Nested hierarchical transformer: Towards accurate, data-efficient and interpretable visual understanding. In AAAI, 2022.
|
ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e425031d808a81198c60a0c7d9c69343ce89964507c1882920b241d13ce8f064
|
| 3 |
+
size 997749
|
ICML/2025/ZipAR_ Parallel Autoregressive Image Generation through Spatial Locality/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b77f6ab151b50776974bb6095f5b8c86d2897815a040e21fa58a294975a7a54a
|
| 3 |
+
size 342127
|
ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/485ae2a8-1fc5-402d-9e71-4d6939418c63_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb50db766cfc3d27805fa43ed1b637ff8916d94428d15d1afe1bb32d611e5497
|
| 3 |
+
size 85281
|
ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/485ae2a8-1fc5-402d-9e71-4d6939418c63_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:46a965a0c4168c8796f2d5a9c79c05a0b5831d511ac9ea0c5e42229262b9342f
|
| 3 |
+
size 108494
|
ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/485ae2a8-1fc5-402d-9e71-4d6939418c63_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d3d85563c43f1a41fb544b3890634a6d2cb0adee181d5fad93b258b4e77f89f4
|
| 3 |
+
size 596592
|
ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/full.md
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Zirui Liu<sup>1</sup> Jiatong Li<sup>1</sup> Yan Zhuang<sup>1</sup> Qi Liu *<sup>1,2</sup> Shuanghong Shen<sup>2</sup> Jie Ouyang<sup>1</sup> Mingyue Cheng<sup>1</sup> Shijin Wang<sup>1,3</sup>
|
| 2 |
+
|
| 3 |
+
# Abstract
|
| 4 |
+
|
| 5 |
+
Arena-based evaluation is a fundamental yet significant evaluation paradigm for modern AI models, especially large language models (LLMs). Existing framework based on ELO rating system suffers from the inevitable instability problem due to ranking inconsistency and the lack of attention to the varying abilities of annotators. In this paper, we introduce a novel stable arena framework to address these issues by enhancing the ELO Rating System. Specifically, we replace the iterative update method with a Maximum Likelihood Estimation (MLE) approach, m-ELO, and provide theoretical proof of the consistency and stability of the MLE approach for model ranking. Additionally, we proposed the am-ELO, which modify the Elo Rating's probability function to incorporate annotator abilities, enabling the simultaneous estimation of model scores and annotator reliability. Experiments demonstrate that this method ensures stability, proving that this framework offers a more robust, accurate, and stable evaluation method for LLMs.
|
| 6 |
+
|
| 7 |
+
# 1. Introduction
|
| 8 |
+
|
| 9 |
+
The rapid advancement of large language models (LLMs) (Jin et al., 2024b; Ouyang et al., 2025; Cheng et al., 2025) has led to the proliferation of "model arenas"—platforms designed to compare and evaluate multiple models, identifying their relative strengths and weaknesses (Chiang et al., 2024). These arenas play a critical role in driving innovation and shaping the deployment of cutting-edge LLMs across diverse applications. The ELO rating system (Elo, 1967), a well-established methodology for quantitatively
|
| 10 |
+
|
| 11 |
+
$^{1}$ State Key Laboratory of Cognitive Intelligence, University of Science and Technology of China, Hefei, China $^{2}$ Institute of Artificial Intelligence, Hefei Comprehensive National Science Center, Hefei, China $^{3}$ iFLYTEK Co., Ltd, Hefei, China. Correspondence to: Qi Liu <qiliuql@ustc.edu.cn>.
|
| 12 |
+
|
| 13 |
+
Proceedings of the $42^{nd}$ International Conference on Machine Learning, Vancouver, Canada. PMLR 267, 2025. Copyright 2025 by the author(s).
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1. An example of ELO score. The error bar represents the standard deviation and the error line represents the difference between the maximum or minimum value and the mean value. The line chart represents the ELO scores estimated from the records of the specific annotator.
|
| 17 |
+
|
| 18 |
+
assessing the relative capabilities of competitors in games, forms the theoretical foundation for the evaluation systems in most existing model arenas (Bai et al., 2022; Boubdir et al., 2023).
|
| 19 |
+
|
| 20 |
+
A significant issue with the current ELO method is its instability, which can be attributed to two main factors: 1) From an algorithmic perspective, the existing ELO method treats the data as dynamic, making the results highly sensitive to the order in which the data is presented (Aldous, 2017; Li et al., 2024; Zhang et al., 2024a). In other words, when the same records are shuffled and re-evaluated, the ELO method often yields inconsistent scores. For instance, as shown in Figure 1, the significant error (highlighted in gray) complicates the comparison of models with similar abilities. 2) The judgment of human annotators varies across different aspects such as quality, relevance, and importance of texts. For example, in the line chart in Figure 1, different annotators provide inconsistent ELO scores for each model. However, the arena-based evaluation paradigm, which involves human participation, overlooks these individual differences among humans (Welinder & Perona, 2010; Raykar & Yu, 2011).
|
| 21 |
+
|
| 22 |
+
Ignoring this variability introduces biases and instability into the evaluation process, further undermining the credibility of both the results and the decisions derived from
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
Figure 2. The traditional iterative ELO method and our proposed am-ELO method based on MLE.
|
| 26 |
+
|
| 27 |
+
them (Eickhoff, 2018). These instabilities diminish the interpretability and practical value of ELO scores, eroding confidence in the conclusions drawn from such evaluations, particularly when they are used to inform high-stakes decisions regarding model deployment or research directions.
|
| 28 |
+
|
| 29 |
+
In this work, we propose a novel stable arena framework to address these shortcomings. As illustrated in Figure 2, to mitigate the inconsistencies in ELO scores, we introduce a maximum likelihood estimation (MLE)-driven ELO rating method, referred to as m-ELO. By deriving the theoretical properties of this reformulation, we demonstrate that the proposed method produces consistent results without altering the fundamental principles of the original ELO method. Furthermore, to account for variability in annotator performance, we propose an annotator ability-aware enhancement method for ELO (am-ELO), grounded in psychometrics (Morizot et al., 2009; Furr, 2021). By modifying the ELO probability function, we estimate the annotator's ability and adjust their contribution accordingly, leading to a more accurate and equitable aggregation of evaluation results.
|
| 30 |
+
|
| 31 |
+
Through experiments on real-world datasets, we demonstrate that our framework effectively models annotators while ensuring the consistency of ELO scores. Furthermore, in simulation experiments, our method not only identifies anomalous annotators but also reduces the inconsistency of ELO scores to $30\%$ compared to the traditional ELO method. This indicates that our approach effectively mitigates the instability inherent in the traditional ELO method.
|
| 32 |
+
|
| 33 |
+
# 2. Background and Related Work
|
| 34 |
+
|
| 35 |
+
Arena-based evaluation is an important subfield within the broader domain of LLM evaluation. Unlike traditional evaluation paradigms (Zellers et al., 2019; Hendrycks et al., 2020; Cobbe et al., 2021; Liang et al., 2022; Jin et al., 2024a), which typically assess a model's performance against predefined benchmarks, arena-based evaluation involves models
|
| 36 |
+
|
| 37 |
+
competing directly with others. Current research in this area can generally be divided into three key categories: Battle Scenarios, Annotators, and Ranking Systems.
|
| 38 |
+
|
| 39 |
+
**Battle Scenario** The classic battle scenario is exemplified by the Chatbot Arena (Chiang et al., 2024), in which models respond to the same question and annotators compare their outputs. However, this approach are susceptible to the inherent biases of the annotators. To address this issue, several studies have incorporated multiple models working collaboratively to generate and evaluate responses, enabling iterative improvements (Zhao et al., 2024). Notable examples of this approach include LLMChain (Bouchiha et al., 2024) and ChatEval (Chan et al., 2024). While such strategies offer increased fairness, they come with trade-offs, including higher computational costs and potential instability.
|
| 40 |
+
|
| 41 |
+
Annotator In arena-based evaluation, the comparison of results typically involves human annotators (Cheng et al., 2024) or highly capable LLMs, such as GPT-4 (Achiam et al., 2023) and Claude (Anthropic). Additionally, some researchers have explored the use of specialized referee models for this task, such as PandaLM (Wang et al., 2023), JudgeLM (Zhu et al., 2023), and Auto-J (Li et al., 2023), which are designed to enhance the evaluation process.
|
| 42 |
+
|
| 43 |
+
Ranking Systems for LLM Evaluation Ranking systems play a crucial role in arena-based LLM evaluation (Busa-Fekete et al., 2014; Szörenyi et al., 2015; Chernoff, 1992). Among the existing approaches, many arena-based methods rely on the ELO Rating System to model LLMs' capabilities (Coulom, 2007; Pelanek, 2016). The ELO rating system, grounded in the Bradley-Terry model (Hunter, 2004; Rao & Kupper, 1967), is widely used in competitive games (Sismanis, 2010; Ebtekar & Liu, 2021) to predict the likelihood of one competitor outperforming another based on their relative abilities. However, due to its dynamic nature, which is tailored for traditional competitive games, the ELO system introduces instability in LLM evaluation. To mitigate this instability, existing approaches typically perform multiple random shuffles of the annotated dataset and calculate ELO scores for each iteration (Sismanis, 2010). The statistical summary, such as the mean or variance of the scores across these shuffles, is then used as the final evaluation metric. Although this strategy provides a practical solution, it does not fundamentally resolve the inconsistency introduced by the sequential updates in the ELO method.
|
| 44 |
+
|
| 45 |
+
# 3. Preliminary
|
| 46 |
+
|
| 47 |
+
Arena-based Evaluation is a highly anticipated method in LLMs evaluation, where models are compared head-to-head on benchmarks or datasets and the results are annotated by evaluators. Let $S = \{(i,j,k,W_{ij})|i,j\in [N],k\in [M]\}$
|
| 48 |
+
|
| 49 |
+
Algorithm 1 The Traditional ELO Rating System
|
| 50 |
+
Input: Dataset $S$ , Scaling Factor $K$ , Init Score $R_{init}$
|
| 51 |
+
Initialize: Set of Scores $RS_{i}\gets \emptyset$ , Score of Models
|
| 52 |
+
$R_{i}\gets R_{init}$
|
| 53 |
+
Calculate ELO Score:
|
| 54 |
+
for $(i,j,W_{ij})\in S$ do
|
| 55 |
+
$R_i^{\prime}\gets R_i + K\cdot (W_{ij} - P(R_i,R_j))$ $R_j^\prime \gets R_j + K\cdot (W_{ji} - P(R_j,R_i))$
|
| 56 |
+
end for
|
| 57 |
+
Output: ELO Score $(R_1,\dots ,R_N)$
|
| 58 |
+
|
| 59 |
+
represent the comparative dataset we have collected, where $N$ is the number of models and $M$ is the number of annotators. Each element $(i,j,k,W_{ij})\in S$ indicates that model $i$ and model $j$ engaged in a battle, and annotator $k$ provided the result $W_{ij}$ . Specifically, $W_{ij} = 1$ indicates that model $i$ won the battle, $W_{ij} = 0$ indicates that model $j$ won, and $W_{ij} = 0.5$ indicates a tie. The goal of the arena-based evaluation is to estimate the ranking scores $R = (R_1,\dots ,R_N)$ for the models based on the record $S$ .
|
| 60 |
+
|
| 61 |
+
ELO Rating System The ELO rating system is a widely used method for ranking competitors based on pairwise comparisons. In the ELO system, each competitor (or model) is assigned a rating $R$ , which represents its relative strength. When two models, $i$ and $j$ , compete, their respective ratings, $R_i$ and $R_j$ , are used to calculate the expected probability of each outcome: $P(R_i, R_j) = P(W_{ij} = 1) = \frac{1}{1 + e^{-C(R_i - R_j)}}$ , where $C$ is a constant that scales the difference in ratings. After observing the actual outcome of the match, the ratings are updated as follows:
|
| 62 |
+
|
| 63 |
+
$$
|
| 64 |
+
\begin{array}{l} R _ {i} ^ {\prime} = R _ {i} + K \cdot \left(W _ {i j} - P \left(R _ {i}, R _ {j}\right)\right), \\ R _ {j} ^ {\prime} = R _ {j} + K \cdot \left(W _ {j i} - P \left(R _ {j}, R _ {i}\right)\right), \tag {1} \\ \end{array}
|
| 65 |
+
$$
|
| 66 |
+
|
| 67 |
+
where $K$ is a scaling factor that controls the magnitude of rating changes. The pseudo-code for this process is shown in Algorithm 1. However, the existing ELO method is iterative and highly sensitive to the order of the data. This is irrational for LLMs' evaluation because evaluation can be seen as a static process (Zhan et al., 2024). Specifically, the errors introduced by the ELO method arise from the algorithm's dynamics rather than the data itself, which undermines the statistical significance of the ELO scores for many models.
|
| 68 |
+
|
| 69 |
+
Moreover, current algorithms do not account for differences in annotator abilities. They treat all annotators as if they have the same ability $C$ , mixing annotation records randomly. This assumption can introduce bias and instability into the evaluation process.
|
| 70 |
+
|
| 71 |
+
# 4. Better Performance estimation with ELO
|
| 72 |
+
|
| 73 |
+
Earlier, we introduced the traditional ELO method and highlighted its key challenges, including ranking inconsistencies and the lack of consideration for annotator variability. To address these issues, this section presents a stable arena framework with improvements to the ELO method.
|
| 74 |
+
|
| 75 |
+
# 4.1. MLE for ELO (m-ELO) Estimation
|
| 76 |
+
|
| 77 |
+
The traditional ELO rating estimation method is based on an iterative algorithm, and the results are highly dependent on the order of the samples. This explains why ELO ratings often lack consistency. Inspired by the insensitivity of maximum likelihood estimation (MLE) to the sample order, we propose an MLE-driven ELO estimation algorithm, termed m-ELO. Specifically, for the record dataset $S$ , its log-likelihood function can be expressed as follows:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\ln L = \sum_ {(i, j, W _ {i j}) \in S} W _ {i j} \ln P \left(R _ {i}, R _ {j}\right) + W _ {j i} \ln P \left(R _ {j}, R _ {i}\right), \tag {2}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $P(R_{i},R_{j}) = \frac{1}{1 + e^{-C(R_{i} - R_{j})}}$ . The result of the MLE method, $(R_1^*,R_2^*,\ldots ,R_N^*)$ , can be obtained by solving the extreme point of the log-likelihood function using gradient descent. Specifically, for any given model $n\in [N]$ , the gradient of the log-likelihood function with respect to its rating $R_{n}$ is:
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\frac {\partial \ln L}{\partial R _ {n}} = \sum_ {(n, j, W _ {n j}) \in S} C \left(W _ {n j} - P \left(R _ {n}, R _ {j}\right)\right), \tag {3}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
By comparing Equations 1 and 3, we observe that the two formulas share a consistent structure. This highlights the essence of the ELO algorithm: it performs gradient descent with a learning rate of $\frac{K}{C}$ on the MLE for each annotated sample. Gradient descent based on individual samples rarely converges, which reveals a key shortcoming of the traditional ELO method.
|
| 90 |
+
|
| 91 |
+
Convergence Analysis Although the estimation results of the MLE method are not influenced by the sample order, another important consideration is whether the log-likelihood function has only one extreme point. If multiple extreme points exist, it could still lead to inconsistencies in the ELO rankings. Unfortunately, because ELO scores are relative, it is clear that if $(R_1^*, R_2^*, \ldots, R_N^*)$ is an extreme point, then $(R_1^* + \epsilon, R_2^* + \epsilon, \dots, R_N^* + \epsilon)$ is also an extreme point. Thus, the extreme points of the log-likelihood function are not unique. However, when we fix the score of one of the models, we obtain the following theorem (Zermelo, 1929):
|
| 92 |
+
|
| 93 |
+
Theorem 4.1. Assume $R_0 = 0$ and $|S|$ is sufficiently large, then the log-likelihood function $\ln L$ with respect to $(R_2, \dots, R_N)$ is a concave function and has at most one extreme point.
|
| 94 |
+
|
| 95 |
+
Drawing from Theorem 4.1, we can assert that the ELO score obtained through the MLE method is relatively stable between models, meaning that the difference in ability between any two models remains stable.
|
| 96 |
+
|
| 97 |
+
Replacing the iterative method with the MLE approach makes the ELO method more flexible. Additionally, it allows us to model annotator abilities during the evaluation process. In the next section, we will adopt ideas from psychometrics to propose a feasible modeling approach and analyze its interpretability.
|
| 98 |
+
|
| 99 |
+
# 4.2. Annotator Modeling m-ELO (am-ELO) Estimation
|
| 100 |
+
|
| 101 |
+
Although ability modeling is not commonly seen in LLM evaluation, many ability modeling methods have been developed in education and psychometrics (Liu et al., 2021; Wang et al., 2022; Zhang et al., 2024b; Zhuang et al., 2022; Liu et al., 2024). One prominent method is Item Response Theory (IRT) (Embretson & Reise, 2013; Zhu et al., 2022; Nguyen & Zhang, 2022; Polo et al., 2024). IRT posits that an examinee's performance on a test depends solely on its ability $\theta$ and the properties of the questions. The standard model is the two-parameter logistic (2PL) model, defined as: $P_{j}(\theta) = P(y_{j} = 1) = \frac{1}{1 + e^{-a_{j}(\theta - b_{j})}}$ , where $y_{j} = 1$ indicates a correct response to question $j$ , and $a_{j}$ and $b_{j} \in \mathbb{R}$ represent the discrimination and difficulty of question $j$ .
|
| 102 |
+
|
| 103 |
+
As noted, the parameter $a$ in IRT can be interpreted as the discrimination parameter. Similarly, in the ELO method, the fixed value $C$ can also be understood as the discrimination parameter. To account for annotator variability, we replace the fixed value $C$ in the probability density estimation with a parameter $\theta_{k}$ that is specific to annotator $k$ :
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
P \left(R _ {i}, R _ {j} \mid \theta_ {k}\right) = \frac {1}{1 + e ^ {- \theta_ {k} \left(R _ {i} - R _ {j}\right)}}, \tag {4}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
This new formulation has the following properties:
|
| 110 |
+
|
| 111 |
+
- Maintain symmetry: The symmetry to the model's abilities $R_{i}$ and $R_{j}$ is preserved even after modifying the constant $C$ to an annotator-related parameter $\theta_{k}$ , such that $P(R_{i}, R_{j}|\theta_{k}) + P(R_{j}, R_{i}|\theta_{k}) = 1$
|
| 112 |
+
- Discriminative ability ( $\theta_{k} > 0$ ): When the abilities of two models are identical, the change in win probability caused by small variations in ability values is positively correlated with annotator's ability $\theta_{k} = 4\frac{\partial P(R_{i},r)}{\partial R_{i}}\big|_{R_{i} = r}$ . Therefore, the annotator's ability $\theta_{k}$ represents the maximum discriminative ability.
|
| 113 |
+
- Anomalous annotator $(\theta_{k} < 0)$ : When the discriminative ability $\theta_{k}$ , it is observed that for any model $i$ with greater ability than model $j$ , annotator $k$ perceives the probability of model $i$ winning as less than 0.5. This indicates that it is an anomalous annotator.
|
| 114 |
+
|
| 115 |
+
To estimate the parameters of the probability function, we consider its logarithmic likelihood function similarly:
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\sum_ {(i, j, k, W _ {i j}) \in S} W _ {i j} \ln P \left(R _ {i}, R _ {j} \mid \theta_ {k}\right) + W _ {j i} \ln P \left(R _ {j}, R _ {i} \mid \theta_ {k}\right). \tag {5}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
After modifying the probability function, we need to account for both the ELO scores of the models $R = (R_{1},\dots ,R_{N})$ and the abilities of the annotators $\Theta = (\theta_{1},\ldots ,\theta_{M})$ during gradient descent. For a model $n\in [N]$ and annotator $m\in [M]$ , the gradients of $\ln L$ to them can be expressed as:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\frac {\partial \ln L}{\partial R _ {n}} = \sum_ {(x, j, k, W _ {n j}) \in S} \theta_ {k} \left(W _ {n j} - P \left(R _ {n}, R _ {j} \mid \theta_ {k}\right)\right),
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\frac {\partial \ln L}{\partial \theta_ {m}} = \sum_ {(i, j, m, W _ {i j}) \in S} \left(R _ {i} - R _ {j}\right) \left(W _ {i j} - P \left(R _ {i}, R _ {j} \mid \theta_ {m}\right)\right). \tag {6}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
This method allows us to simultaneously estimate the annotators' abilities during the MLE process. Beyond the concept of discrimination introduced by the improved probability function, we should also explore the practical significance of this ability estimation in the context of the arena. Through analysis, we find that the estimated annotator ability $\theta_{k}$ exhibits the following two properties:
|
| 132 |
+
|
| 133 |
+
Theorem 4.2. Given that $\theta$ represents the ability of annotators estimated by am-ELO, the following conclusions can be drawn:
|
| 134 |
+
|
| 135 |
+
(1) If two annotators label the same set of samples $W_{ij}$ , $W_{ij}'$ with abilities $\theta_1$ and $\theta_2$ ( $\theta_2 > \theta_1$ ), then:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\sum_ {(i, j, W _ {i j}) \in S ^ {\prime}} (R _ {i} - R _ {j}) W _ {i j} < \sum_ {(i, j, W _ {i j} ^ {\prime}) \in S ^ {\prime}} (R _ {i} - R _ {j}) W _ {i j} ^ {\prime}.
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
(2) If $\theta_{k} < 0$ , for each positive sample $(i,j,k,1)$ of annotator $k$ , its loss $\frac{\partial\ln l}{\partial R_i} < 0$ , and for each negative sample $(i,j,k,0)$ of annotator $k$ , $\frac{\partial\ln l}{\partial R_i} > 0$ .
|
| 142 |
+
|
| 143 |
+
From Theorem 4.2, it is evident that the annotator abilities derived from MLE have practical significance. Specifically, $\sum_{(i,j,W_{ij})\in S}(R_i - R_j)W_{ij}$ can be interpreted as the correlation between the annotations $W_{ij}$ and the rankings $R_i - R_j$ . Theorem 4.2 (1) implies that a higher annotator ability corresponds to a greater value of $\sum_{(i,j,W_{ij})\in S'}(R_i - R_j)W_{ij}$ , meaning that a larger $\theta_k$ indicates that the annotations from annotator $k$ are more consistent with the overall rankings. Meanwhile, Theorem 4.2 (2) suggests that an annotator with negative ability might annotate inconsistently or arbitrarily, and am-ELO can identify these anomalous annotators.
|
| 144 |
+
|
| 145 |
+
Normalization Although this method has strong interpretability for modeling annotators, it is not difficult to observe that, for such an optimization problem, if $(R_1^*,\dots ,R_N^*,\theta_1^*,\dots ,\theta_M^*)$ is an extreme point, then
|
| 146 |
+
|
| 147 |
+
Algorithm 2 The am-ELO Rating System
|
| 148 |
+
Input: Dataset $S$ , Learning Rate $\alpha$ , Epoch Epoch
|
| 149 |
+
Initialize: Score of Models $R$ and annotators' ability $\Theta$
|
| 150 |
+
for $t = 1$ to Epoch do Calculate MLE: $\ln L\gets \mathrm{MLE}(R,\Theta ,S)$ Optimize: $R\gets R + \alpha \frac{\partial\ln L}{\partial R},\Theta \gets \Theta +\alpha \frac{\partial\ln L}{\partial\Theta}$ Normalization: $\Theta \leftarrow \frac{\Theta}{\mathbf{1}^T\cdot\Theta}$
|
| 151 |
+
end for
|
| 152 |
+
Output: ELO Score and annotators' ability $(R,\Theta)$
|
| 153 |
+
|
| 154 |
+
Algorithm 3 The Stable Arena framework
|
| 155 |
+
Input: Learning Rate $\alpha$ , Epoch Epoch, Ability Threshold $\epsilon$ .
|
| 156 |
+
Initialize: Dataset $S \gets \emptyset$ , Data quantity threshold $\delta$ .
|
| 157 |
+
while True do
|
| 158 |
+
$S \gets S \cup S_{new}$
|
| 159 |
+
for $k = 1$ to $M$ do
|
| 160 |
+
$\delta_k = |\{(i,j,x,W_{ij})|x = k\}|$
|
| 161 |
+
end for
|
| 162 |
+
$S' \gets \{(i,j,k,W_{ij})|\delta_k > \delta\}$ $(R,\Theta) \gets \text{am-ELO}(S',\alpha, \text{Epoch})$ $(R_1, \dots, R_N) = R$
|
| 163 |
+
Output: ELO Score $(R_1, \dots, R_N)$ $(\theta_1, \dots, \theta_N) = \Theta$
|
| 164 |
+
Filter annotators: $S \gets \{(i,j,k,W_{ij})|\theta_k > \epsilon\}$
|
| 165 |
+
end while
|
| 166 |
+
|
| 167 |
+
$(\alpha R_1^*, \dots, \alpha R_N^*, \frac{1}{\alpha} \theta_1^*, \dots, \frac{1}{\alpha} \theta_M^*)$ is also an extreme point. Thus, when $\alpha < 0$ , the model score ranking will be completely reversed, leading to potential instability. To mitigate this issue, we impose a constraint on the annotator's ability:
|
| 168 |
+
|
| 169 |
+
$$
|
| 170 |
+
\theta_ {1} + \theta_ {2} + \dots + \theta_ {M} = 1. \tag {7}
|
| 171 |
+
$$
|
| 172 |
+
|
| 173 |
+
From Theorem 4.2 (2), we know that $\theta_{k} > 0$ corresponds to users who annotate normally. The significance of this normalization operation is essentially based on the assumption that the majority of annotators in the group are labeling responsibly (Nowak & Rüger, 2010). Based on this assumption, we determine whether the model rankings should follow the original order or be reversed.
|
| 174 |
+
|
| 175 |
+
# 4.3. Stable Arena Framework
|
| 176 |
+
|
| 177 |
+
Algorithm 2 presents the pseudo-code for the am-ELO algorithm. The am-ELO algorithm performs gradient descent (Ruder, 2016) on the negative log-likelihood function over the entire dataset to find the extreme point, ultimately returning both the model scores and annotator abilities. Specifically, when considering only the m-ELO algorithm, the concavity of its log-likelihood function enables the use of Newton's method (Galantai, 2000; Kelley, 2003) during optimization. This allows for dynamic adjustment of the learning rate, thereby improving convergence efficiency.
|
| 178 |
+
|
| 179 |
+
Building on the improvements to the ELO method discussed earlier, we introduce the Stable Arena Framework, a novel paradigm for arena-based evaluation, as detailed in Algorithm 3. To ensure more robust evaluations, we carefully screen the annotated data both before and after applying the am-ELO method. Specifically, upon incorporating new annotation samples, we first filter out annotators who have fewer than $\delta$ annotation records. This is crucial because annotators with fewer records tend to produce less reliable results. However, this does not imply permanent exclusion; once such annotators accumulate a sufficient number of annotations, their records will be reconsidered.
|
| 180 |
+
|
| 181 |
+
After evaluating both models and annotators, we further refine the process by filtering annotators based on their estimated abilities. Annotators with negative ability values, or those with ability values below a threshold $\epsilon$ , are deemed detrimental to the evaluation process. For these annotators, we either issue warnings or exclude them entirely from further evaluations. Moreover, since a higher $\theta$ indicates greater consistency between the annotations and the overall ranking, the LLM evaluation platform can reward annotators proportionally to their demonstrated abilities.
|
| 182 |
+
|
| 183 |
+
# 5. Experiments
|
| 184 |
+
|
| 185 |
+
In this section, we introduce and compare the performance of our proposed method with the traditional ELO method in predicting annotation results, highlighting the superior modeling capability of am-ELO. Additionally, we demonstrate the limitations of the traditional ELO method through a comparison of model rankings produced by various ELO methods and a case study. Next, we validate the convergence of the ELO rankings generated by our method, further reinforcing the validity of our approach for evaluating LLMs. Finally, to assess the stability of the ELO method, we apply four different strategies to perturb the annotators. Our results show that our method not only maintains stability in the tests but also effectively identifies anomalous annotators, emphasizing the superiority of our approach.
|
| 186 |
+
|
| 187 |
+
# 5.1. Dataset
|
| 188 |
+
|
| 189 |
+
We conduct experiments on a real annotation dataset, Chatbot (Zheng et al., 2023), which was collected from 13,000 distinct IP addresses in the Chatbot Arena between April and June 2023. The dataset consists of 33,000 curated conversations with pairwise human preferences. Each entry includes a question ID, the names of two models, their full conversation transcripts, the annotator's vote, and its ID. Due to the requirement for MLE in this experiment, individual samples may introduce instability. Consequently, we excluded annotator samples with fewer than 50 annotated records. The statistical information of the filtered dataset is shown in Table 1.
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
Figure 3. The result of each LLMs on different evaluation method. Specifically, the line chart represents the normalized ELO scores $\uparrow$ (ranging from 0 to 1) of each LLM under different evaluation methods. The bar chart represents the Loss $\downarrow$ (log-likelihood function) of each LLM's match records under different evaluation methods.
|
| 193 |
+
|
| 194 |
+
Table 1. Statistics of the dataset
|
| 195 |
+
|
| 196 |
+
<table><tr><td>Dataset</td><td>Chatbot</td></tr><tr><td>#Annotators</td><td>42</td></tr><tr><td>#Models</td><td>20</td></tr><tr><td>#Response logs</td><td>4321</td></tr><tr><td>#Response logs per annotator</td><td>102.88</td></tr><tr><td>#Response logs per model</td><td>216.05</td></tr><tr><td>#Response logs per model pair</td><td>22.74</td></tr></table>
|
| 197 |
+
|
| 198 |
+
# 5.2. Setting
|
| 199 |
+
|
| 200 |
+
In this experiment, we consider a baseline model, the traditional ELO method, alongside two methods we proposed: m-ELO and am-ELO. For the iterative ELO method, we perform repeated experiments by shuffling the dataset 1000 times and averaging the results. The MLE is solved using the gradient descent (GD) approach with a learning rate of 0.1 and a fixed number of 2000 iterations. The code can be found in the github: https://github.com/bigdata-ustc/am-ELO.
|
| 201 |
+
|
| 202 |
+
# 5.3. Result and Case Study
|
| 203 |
+
|
| 204 |
+
The bar chart in Figure 3 presents the mean log-likelihood loss for each method. As shown, the loss difference between m-ELO and ELO, which share the same probability function, is minimal, while the loss for am-ELO is significantly lower than the other two methods. This indicates that am-ELO demonstrates better fitting ability. Furthermore, as shown in Table 2, am-ELO significantly outperforms the other two baseline models in prediction tasks, suggesting that am-ELO exhibits superior generalization ability. This
|
| 205 |
+
|
| 206 |
+
Table 2. The Performance of ELO method for prediction.
|
| 207 |
+
|
| 208 |
+
<table><tr><td>Method</td><td>MSE↓</td><td>AUC↑</td></tr><tr><td>ELO</td><td>0.1238±0.0031</td><td>0.7492±0.0068</td></tr><tr><td>m-ELO</td><td>0.1234±0.0029</td><td>0.7503±0.0066</td></tr><tr><td>am-ELO</td><td>0.1208±0.0034</td><td>0.7581±0.0067</td></tr></table>
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
Figure 4. The heatmap shows the number of victories in battles between various models (Three models with similar abilities, koala-13b, vicuna-7b, gpt-13b, and the better or worse models than them). Each number in the figure represents the times the row model wins the column model in the battle.
|
| 212 |
+
|
| 213 |
+
also demonstrates that the improved probability function effectively models the annotators.
|
| 214 |
+
|
| 215 |
+
Meanwhile, the line chart in Figure 3 illustrates the ELO scores obtained from the three ELO methods. It is clear that the ranking trends of our proposed methods align closely with the traditional ELO method. However, there are some differences in the rankings of specific models, such as koala-13b, vicuna-7b, and gpt-13b.
|
| 216 |
+
|
| 217 |
+
To analyze these models with similar abilities, we categorize
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
Figure 5. The Loss and Consistency of the evaluation method at each epoch on the Chatbot dataset.
|
| 221 |
+
|
| 222 |
+
the remaining models into two groups based on their ELO scores: "Better" and "Worse", representing models that are better or worse than the aforementioned models. We visualize the number of matches between these models. As shown in Figure 4, each number represents the number of times the model in the row defeated the model in the column. For example, the first row and third column indicate that vicuna-7b lost to "Better" models 148 times. From this, we observe that although the head-to-head records between koala-13b and vicuna-7b do not differentiate their abilities, both models defeated the same number of "Better" models. Meanwhile, vicuna-7b lost to fewer "Better" and "Worse" models. Based on this result, we conclude that vicuna-7b is stronger than koala-13b, which aligns with the rankings provided by both am-ELO and m-ELO.
|
| 223 |
+
|
| 224 |
+
However, due to Koala-13B's large number of victories over "Worse" models, the traditional ELO method disproportionately weighs these victories during the scoring process, ultimately ranking Koala-13B higher than Vicuna-7B. This issue suggests that avoiding strong opponents and repeatedly defeating weaker ones could artificially inflate a model's ranking, which is an undesirable outcome.
|
| 225 |
+
|
| 226 |
+
# 5.4. The Convergence and Efficiency of ELO Methods
|
| 227 |
+
|
| 228 |
+
In this subsection, we discuss the convergence and efficiency of the proposed am-ELO. Our comparison methods not only three mentioned model but also am-ELO (w/o Norm), where normalization is not performed during training. To analyze the convergence and efficiency of the results obtained by each evaluation method, we record the loss (Loss) during the gradient descent process. Additionally, we perform five random initializations of the model parameters and calculate the consistency of the rankings (Consistency) (Hastie & Tibshirani, 1997) of the ELO scores output by these five models at each epoch. It should be noted that the iterative process of the traditional ELO method differs from the gradient descent approach of MLE. Therefore, we directly record the final output loss and consistency for the traditional ELO method. The results are shown in Figure 5.
|
| 229 |
+
|
| 230 |
+
As observed from the loss, the three MLE-based methods
|
| 231 |
+
|
| 232 |
+
all converge to a local minimum within a limited number of iterations. The loss at convergence for m-ELO is nearly identical to that of the traditional ELO, which is expected since both methods share the same probability estimation function. This once again demonstrates that m-ELO and traditional ELO are essentially equivalent. Moreover, amELO (w/o Norm) converges the fastest, followed by amELO, with m-ELO being the slowest. This is because amELO has more adjustable parameters compared to m-ELO, and am-ELO (w/o Norm) benefits from fewer constraints during the gradient descent process. However, as seen from the consistency, am-ELO (w/o Norm) quickly converges to different local minima, and its consistency stabilizes at 0.4. This suggests that the five outputs of this method exhibit two ordered sequences and three reversed sequences $\left(\frac{C_2^2 + C_3^2}{C_5^2} = 0.4\right)$ . On the other hand, am-ELO not only achieves stable rankings after sufficient gradient descent iterations but does so more efficiently than m-ELO. This demonstrates that the proposed am-ELO method strikes a balance between convergence and efficiency.
|
| 233 |
+
|
| 234 |
+
# 5.5. The Stability of Various ELO Methods
|
| 235 |
+
|
| 236 |
+
Since directly verifying the stability of the am-ELO method during the evaluation process is challenging, we use simulation experiments to introduce perturbation to the annotators. Specifically, we perturb the annotators' results using four strategies to simulate the presence of anomalous annotators that may occur during testing:
|
| 237 |
+
|
| 238 |
+
- Random: If model A wins, the result will have a $50\%$ chance of being changed to "Tie" and a $50\%$ chance of being changed to "model B wins", vice versa.
|
| 239 |
+
- Equal: All results are changed to "Tie".
|
| 240 |
+
- Flip: If model A wins, the result will be flipped to "model B wins", and vice versa. The outcome "Tie" remains unchanged.
|
| 241 |
+
- Mixed: A random selection is made from the first three perturbation strategies for each instance.
|
| 242 |
+
|
| 243 |
+
These perturbations mimic scenarios where intentional mislabeling occurs in annotations. Considering that the majority of annotators in the arena will annotate normally, the number of perturbations in our simulation experiment will not exceed half of the total number of annotators. We expect a stable scoring method to have two key properties: (1) it should produce ELO rankings consistent with those without perturbations, and (2) it should identify the anomalous annotators. The ground truth for the consistency of the ELO score is the pairwise comparison between the ELO rankings with and without perturbations, and the ground truth for identifying anomalous annotators is the F1-score
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Figure 6. This figure contains four line charts and four scatter plots, corresponding to the ELO score consistency under the four types of perturbation, as well as the changes in annotator abilities obtained from am-ELO as the level of perturbation increases.
|
| 247 |
+
|
| 248 |
+

|
| 249 |
+
Figure 7. The line chart of F1-score for outlier detection at different thresholds under the four types of perturbation.
|
| 250 |
+
|
| 251 |
+
(Chen & Lin, 2006) of the annotators' abilities obtained from am-ELO. A higher F1-score indicates better accuracy in detecting the perturbations.
|
| 252 |
+
|
| 253 |
+
The line charts in Figure 6 show the relationship between the ratio of perturbations and the consistency of ELO scores. We observe that am-ELO maintains higher consistency across various types of perturbations. Specifically, aside from the fact that the "Equal" perturbation itself is unlikely to affect rankings, leading to high consistency across all ELO methods, in the other three perturbation scenarios, am-ELO reduces the inconsistency rate to $30\%$ compared to m-ELO or traditional ELO. Meanwhile, the scatter plot at the bottom of Figure 6 shows the changes in annotator abilities under each perturbation. Red dots represent annotators who were normal, while green dots represent those who were anomalous. It is clear that nearly all anomalous annotators
|
| 254 |
+
|
| 255 |
+
have ability scores below 0, indicating they are identified as noise points. Additionally, Figure 7 presents the F1-scores for detecting perturbations under thresholds of 0 and 0.005. Under different perturbations, the recognition accuracy reached $90\%$ when $\epsilon = 0$ , and even up to $95\%$ when $\epsilon = 0.005$ . These results demonstrate that our method effectively detects perturbations, models the annotators, and maintains the consistency of results, thereby alleviating the problem of ELO inconsistency.
|
| 256 |
+
|
| 257 |
+
# 6. Conclusion
|
| 258 |
+
|
| 259 |
+
In this study, we explored the instability of the ELO method in the context of LLM evaluation, emphasizing its impact on the reliability of evaluation outcomes. To address this issue, we introduced the Stable Arena Framework, which utilizes the MLE approach for ELO rating estimation and incorporates annotator ability parameters into the probability function. Our experiments demonstrated that am-ELO not only achieves more stable convergence but also effectively identifies anomalous annotators, resulting in rankings that are more aligned with human intuition. These findings suggest that our approach can significantly reduce the instability of ELO, enhancing the credibility and robustness of LLM evaluation, while providing a more stable and easily implementable framework for arena-based evaluation.
|
| 260 |
+
|
| 261 |
+
However, our method has certain limitations. Specifically, the dimensions of annotator modeling are somewhat simplistic, as it primarily captures the annotator's discriminatory ability and consistency with other annotators. This makes it challenging to fully capture the annotator's broader capabilities. In future work, we aim to refine the design of annotator ability dimensions to better leverage crowdsourcing for arena-based evaluation.
|
| 262 |
+
|
| 263 |
+
# Acknowledgements
|
| 264 |
+
|
| 265 |
+
This research was supported by grants from the National Key Research and Development Program of China (Grant No. 2024YFC3308200), the National Natural Science Foundation of China (62337001), the Key Technologies R & D Program of Anhui Province (No. 202423k09020039), China Postdoctoral Science Foundation (Grant No. 2024M760725) and the Fundamental Research Funds for the Central Universities.
|
| 266 |
+
|
| 267 |
+
# Impact Statement
|
| 268 |
+
|
| 269 |
+
This paper presents work whose goal is to advance the field of Machine Learning. There are many potential societal consequences of our work, none which we feel must be specifically highlighted here.
|
| 270 |
+
|
| 271 |
+
# References
|
| 272 |
+
|
| 273 |
+
Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., Aleman, F. L., Almeida, D., Altenschmidt, J., Altman, S., Anadkat, S., et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
|
| 274 |
+
Aldous, D. Elo ratings and the sports model: A neglected topic in applied probability? 2017.
|
| 275 |
+
Anthropic, S. Model card addendum: Claude 3.5 haiku and upgraded claude 3.5 sonnet. URL https://api-semanticscholar.org/CorpusID:273639283.
|
| 276 |
+
Bai, Y., Jones, A., Ndousse, K., Askell, A., Chen, A., Das-Sarma, N., Drain, D., Fort, S., Ganguli, D., Henighan, T., et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022.
|
| 277 |
+
Boubdir, M., Kim, E., Ermis, B., Hooker, S., and Fadaee, M. Elo uncovered: Robustness and best practices in language model evaluation. arXiv preprint arXiv:2311.17295, 2023.
|
| 278 |
+
Bouchiha, M. A., Telnoff, Q., Bakkali, S., Champagnat, R., Rabah, M., Coustaty, M., and Ghamri-Doudane, Y. Llmchain: Blockchain-based reputation system for sharing and evaluating large language models. In 2024 IEEE 48th Annual Computers, Software, and Applications Conference (COMPSAC), pp. 439-448, 2024. doi: 10.1109/COMPSAC61105.2024.00067.
|
| 279 |
+
Boyd, S. and Vandenberghe, L. Convex optimization. Cambridge university press, 2004.
|
| 280 |
+
Busa-Fekete, R., Hüllermeier, E., and Szörenyi, B. Preference-based rank elicitation using statistical mod
|
| 281 |
+
|
| 282 |
+
els: The case of mallows. In International conference on machine learning, pp. 1071-1079. PMLR, 2014.
|
| 283 |
+
Chan, C., Chen, W., Su, Y., Yu, J., Xue, W., Zhang, S., Fu, J., and Liu, Z. Chateval: Towards better llm-based evaluators through multi-agent debate. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=FQepisCUWu.
|
| 284 |
+
Chen, Y.-W. and Lin, C.-J. Combining svms with various feature selection strategies. Feature extraction: foundations and applications, pp. 315-324, 2006.
|
| 285 |
+
Cheng, M., Zhang, H., Yang, J., Liu, Q., Li, L., Huang, X., Song, L., Li, Z., Huang, Z., and Chen, E. Towards personalized evaluation of large language models with an anonymous crowd-sourcing platform. In Companion Proceedings of the ACM Web Conference 2024, pp. 1035-1038, 2024.
|
| 286 |
+
Cheng, M., Luo, Y., Ouyang, J., Liu, Q., Liu, H., Li, L., Yu, S., Zhang, B., Cao, J., Ma, J., et al. A survey on knowledge-oriented retrieval-augmented generation. arXiv preprint arXiv:2503.10677, 2025.
|
| 287 |
+
Chernoff, H. Sequential design of experiments. Springer, 1992.
|
| 288 |
+
Chiang, W., Zheng, L., Sheng, Y., Angelopoulos, A. N., Li, T., Li, D., Zhu, B., Zhang, H., Jordan, M. I., Gonzalez, J. E., and Stoica, I. Chatbot arena: An open platform for evaluating llms by human preference. In Forty-first International Conference on Machine Learning, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=3MW8GKNyzI.
|
| 289 |
+
Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., et al. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.
|
| 290 |
+
Coulom, R. Computing "elo ratings" of move patterns in the game of go. ICGA journal, 30(4):198-208, 2007.
|
| 291 |
+
Ebtekar, A. and Liu, P. An elo-like system for massive multiplayer competitions. arXiv preprint arXiv:2101.00400, 2021.
|
| 292 |
+
Eickhoff, C. Cognitive biases in crowdsourcing. In Proceedings of the eleventh ACM international conference on web search and data mining, pp. 162-170, 2018.
|
| 293 |
+
Elo, A. E. The proposed uscf rating system, its development, theory, and applications. *Chess life*, 22(8):242-247, 1967.
|
| 294 |
+
|
| 295 |
+
Embretson, S. E. and Reise, S. P. Item response theory. Psychology Press, 2013.
|
| 296 |
+
Furr, R. M. Psychometrics: an introduction. SAGE publications, 2021.
|
| 297 |
+
Galantai, A. The theory of newton's method. Journal of Computational and Applied Mathematics, 124(1-2):25-44, 2000.
|
| 298 |
+
Hastie, T. and Tibshirani, R. Classification by pairwise coupling. Advances in neural information processing systems, 10, 1997.
|
| 299 |
+
Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300, 2020.
|
| 300 |
+
Hunter, D. R. Mm algorithms for generalized bradley-terry models. The annals of statistics, 32(1):384-406, 2004.
|
| 301 |
+
Jin, Y., Choi, M., Verma, G., Wang, J., and Kumar, S. Mmsoc: Benchmarking multimodal large language models in social media platforms. In ACL, 2024a.
|
| 302 |
+
Jin, Y., Zhao, Q., Wang, Y., Chen, H., Zhu, K., Xiao, Y., and Wang, J. Agentreview: Exploring peer review dynamics with llm agents. In EMNLP, 2024b.
|
| 303 |
+
Johnson, C. R. Positive definite matrices. The American Mathematical Monthly, 77(3):259-264, 1970.
|
| 304 |
+
Kelley, C. T. Solving nonlinear equations with Newton's method. SIAM, 2003.
|
| 305 |
+
Li, C., Shi, L., Zhou, C., Huan, Z., Tang, C., Zhang, X., Wang, X., Zhou, J., and Liu, S. A merge sort based ranking system for the evaluation of large language models. In Bifet, A., Krilavicius, T., Miliou, I., and Nowaczyk, S. (eds.), Machine Learning and Knowledge Discovery in Databases. Applied Data Science Track, pp. 240-255, Cham, 2024. Springer Nature Switzerland. ISBN 978-3-031-70378-2.
|
| 306 |
+
Li, J., Sun, S., Yuan, W., Fan, R.-Z., Zhao, H., and Liu, P. Generative judge for evaluating alignment. arXiv preprint arXiv:2310.05470, 2023.
|
| 307 |
+
Liang, P., Bommasani, R., Lee, T., Tsipras, D., Soylu, D., Yasunaga, M., Zhang, Y., Narayanan, D., Wu, Y., Kumar, A., et al. Holistic evaluation of language models. arXiv preprint arXiv:2211.09110, 2022.
|
| 308 |
+
Liu, Q., Huang, Z., Yin, Y., Chen, E., Xiong, H., Su, Y., and Hu, G. Ekt: Exercise-aware knowledge tracing for student performance prediction. IEEE Transactions on Knowledge and Data Engineering, 33(1):100-115, 2021.
|
| 309 |
+
|
| 310 |
+
Liu, Z., Yan, Z., Liu, Q., Li, J., Zhang, Y., Huang, Z., Wu, J., and Wang, S. Computerized adaptive testing via collaborative ranking. In Neural Information Processing Systems, 2024. URL https://api_semanticscholar.org/CorpusID:276259892.
|
| 311 |
+
Morizot, J., Ainsworth, A. T., and Reise, S. P. Toward modern psychometrics. Handbook of research methods in personality psychology, 407, 2009.
|
| 312 |
+
Nguyen, D. and Zhang, A. Y. A spectral approach to item response theory. In Koyejo, S., Mohamed, S., Agarwal, A., Belgrave, D., Cho, K., and Oh, A. (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 38818-38830. Curran Associates, Inc., 2022.
|
| 313 |
+
Nowak, S. and Rüger, S. How reliable are annotations via crowdsourcing: a study about inter-annotator agreement for multi-label image annotation. In Proceedings of the international conference on Multimedia information retrieval, pp. 557-566, 2010.
|
| 314 |
+
Ouyang, J., Pan, T., Cheng, M., Yan, R., Luo, Y., Lin, J., and Liu, Q. Hoh: A dynamic benchmark for evaluating the impact of outdated information on retrieval-augmented generation, 2025. URL https://arxiv.org/abs/2503.04800.
|
| 315 |
+
O'Meara, O. T. Introduction to quadratic forms, volume 117. Springer, 2013.
|
| 316 |
+
Pelánek, R. Applications of the elo rating system in adaptive educational systems. Computers & Education, 98:169-179, 2016.
|
| 317 |
+
Polo, F. M., Weber, L., Choshen, L., Sun, Y., Xu, G., and Yurochkin, M. tinybenchmarks: evaluating llms with fewer examples. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=qAml3FpfhG.
|
| 318 |
+
Rao, P. and Kupper, L. L. Ties in paired-comparison experiments: A generalization of the bradley-terry model. Journal of the American Statistical Association, 62(317): 194-204, 1967.
|
| 319 |
+
Raykar, V. C. and Yu, S. Ranking annotators for crowdsourced labeling tasks. Advances in neural information processing systems, 24, 2011.
|
| 320 |
+
Ruder, S. An overview of gradient descent optimization algorithms. ArXiv, abs/1609.04747, 2016. URL https://api(semanticscholar.org/CorpusID:17485266.
|
| 321 |
+
Shi-gu, J. Application of lagrange mean value theorem. 2014. URL https://api-semanticscholar.org/CorpusID:124971556.
|
| 322 |
+
|
| 323 |
+
Sismanis, Y. How i won the" chess ratings-elo vs the rest of the world" competition. arXiv preprint arXiv:1012.4571, 2010.
|
| 324 |
+
Szörenyi, B., Busa-Fekete, R., Paul, A., and Hüllermeier, E. Online rank elicitation for plackett-luce: A dueling bandits approach. Advances in neural information processing systems, 28, 2015.
|
| 325 |
+
Thacker, W. C. The role of the hessian matrix in fitting models to measurements. Journal of Geophysical Research: Oceans, 94(C5):6177-6196, 1989.
|
| 326 |
+
Wang, F., Liu, Q., Chen, E., Huang, Z., Yin, Y., Wang, S., and Su, Y. Neuralcd: a general framework for cognitive diagnosis. IEEE Transactions on Knowledge and Data Engineering, 2022.
|
| 327 |
+
Wang, Y., Yu, Z., Zeng, Z., Yang, L., Wang, C., Chen, H., Jiang, C., Xie, R., Wang, J., Xie, X., et al. Pandalm: An automatic evaluation benchmark for llm instruction tuning optimization. arXiv preprint arXiv:2306.05087, 2023.
|
| 328 |
+
Welinder, P. and Perona, P. Online crowdsourcing: rating annotators and obtaining cost-effective labels. In 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition-Workshops, pp. 25-32. IEEE, 2010.
|
| 329 |
+
Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. Hellaswag: Can a machine really finish your sentence? arXiv preprint arXiv:1905.07830, 2019.
|
| 330 |
+
Zermelo, E. Die berechnung der turnier-ergebnisse als ein maximumproblem der wahrscheinlichkeitsrechnung. Mathematische Zeitschrift, 29(1):436-460, 1929.
|
| 331 |
+
Zhan, J., Wang, L., Gao, W., Li, H., Wang, C., Huang, Y., Li, Y., Yang, Z., Kang, G., Luo, C., et al. Evaluatology: The science and engineering of evaluation. BenchCouncil Transactions on Benchmarks, Standards and Evaluations, 4(1):100162, 2024.
|
| 332 |
+
Zhang, Y., Zhang, M., Yuan, H., Liu, S., Shi, Y., Gui, T., Zhang, Q., and Huang, X. Llmeval: A preliminary study on how to evaluate large language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pp. 19615-19622, 2024a.
|
| 333 |
+
Zhang, Z., Wu, L., Liu, Q., Liu, J.-Y., Huang, Z., Yin, Y., Yan, Z., Gao, W., and Chen, E. Understanding and improving fairness in cognitive diagnosis. Sci. China Inf. Sci., 67, 2024b. URL https://api-semanticscholar.org/CorpusID:269473652.
|
| 334 |
+
|
| 335 |
+
Zhao, Q., Wang, J., Zhang, Y., Jin, Y., Zhu, K., Chen, H., and Xie, X. Competeai: Understanding the competition behaviors in large language model-based agents. In ICML, 2024.
|
| 336 |
+
Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. Judging llm-as-a-judge with mt-bench and chatbot arena, 2023.
|
| 337 |
+
Zhu, L., Wang, X., and Wang, X. Judgelm: Fine-tuned large language models are scalable judges. arXiv preprint arXiv:2310.17631, 2023.
|
| 338 |
+
Zhu, Z., Arthur, D., and Chang, H.-H. A new person-fit method based on machine learning in cdm in education. British Journal of Mathematical and Statistical Psychology, 75(3):616-637, 2022.
|
| 339 |
+
Zhuang, Y., Liu, Q., Huang, Z., Li, Z., Shen, S., and Ma, H. Fully adaptive framework: Neural computerized adaptive testing for online education. In Proceedings of the AAAI conference on artificial intelligence, volume 36, pp. 4734-4742, 2022.
|
| 340 |
+
|
| 341 |
+
# A. Proofs of Theorem 4.1
|
| 342 |
+
|
| 343 |
+
Proof. Assume $R_{1} = 0$ and consider the remaining variables $(R_{2},\dots ,R_{N})$ . For each sample $(i,j,W_{ij})$ , consider the log-likelihood function $\ln l$ for this sample is given by:
|
| 344 |
+
|
| 345 |
+
$$
|
| 346 |
+
\ln l = W _ {i j} \ln P \left(R _ {i}, R _ {j}\right) + W _ {j i} \ln P \left(R _ {j}, R _ {i}\right).
|
| 347 |
+
$$
|
| 348 |
+
|
| 349 |
+
The second-order partial derivatives of $\ln l$ are:
|
| 350 |
+
|
| 351 |
+
$$
|
| 352 |
+
\frac {\partial^ {2} \ln l}{\partial R _ {i} ^ {2}} = - C ^ {2} P (R _ {i}, R _ {j}) (1 - P (R _ {i}, R _ {j})), i \neq 1,
|
| 353 |
+
$$
|
| 354 |
+
|
| 355 |
+
$$
|
| 356 |
+
\frac {\partial^ {2} \ln l}{\partial R _ {i} \partial R _ {j}} = C ^ {2} P (R _ {i}, R _ {j}) (1 - P (R _ {i}, R _ {j})), i, j \neq 1,
|
| 357 |
+
$$
|
| 358 |
+
|
| 359 |
+
Now, let the number of matches between model $i$ and model $j$ be $\delta_{ij}$ and define $a_{ij} = \delta_{ij}C^2 P(R_i,R_j)(1 - P(R_i,R_j))$ . For the Hessian matrix (Thacker, 1989) of the log-likelihood function $\frac{\partial^2\ln L}{\partial\mathbf{R}\partial\mathbf{R}^T}$ , its quadratic form (O'Meara, 2013) can be expressed as:
|
| 360 |
+
|
| 361 |
+
$$
|
| 362 |
+
\mathbf {x} \frac {\partial^ {2} \ln L}{\partial \mathbf {R} \partial \mathbf {R} ^ {T}} \mathbf {x} ^ {T} = - \sum_ {i = 2} ^ {N} \sum_ {j = 2} ^ {N} a _ {i j} (x _ {i} - x _ {j}) ^ {2} - \sum_ {i = 2} ^ {N} a _ {i 1} x _ {i} ^ {2} - \sum_ {j = 2} ^ {N} a _ {1 j} x _ {j} ^ {2}.
|
| 363 |
+
$$
|
| 364 |
+
|
| 365 |
+
Note that $a_{ij} \geq 0$ , therefore:
|
| 366 |
+
|
| 367 |
+
$$
|
| 368 |
+
\mathbf {x} \frac {\partial^ {2} \ln L}{\partial \mathbf {R} \partial \mathbf {R} ^ {T}} \mathbf {x} ^ {T} \leq 0.
|
| 369 |
+
$$
|
| 370 |
+
|
| 371 |
+
The equality holds if and only if $x_{i} = x_{j} = 0$ , i.e. $\mathbf{x} = \mathbf{0}$ . Since the quadratic form is strictly negative for all non-zero vectors $\mathbf{x}$ , the Hessian matrix $\frac{\partial^2\ln L}{\partial\mathbf{R}\partial\mathbf{R}^T}$ is negative definite (Johnson, 1970). This implies that the log-likelihood function $\ln L$ is concave. Therefore, $\ln L$ can have at most one extreme point (Boyd & Vandenberghe, 2004), ensuring the uniqueness of the maximum likelihood solution.
|
| 372 |
+
|
| 373 |
+
# B. Proofs of Theorem 4.2
|
| 374 |
+
|
| 375 |
+
Proof. (1) For annotators 1 and 2, the following formula can be obtained from Equation 6:
|
| 376 |
+
|
| 377 |
+
$$
|
| 378 |
+
\frac {\partial \ln L}{\partial \theta_ {1}} = \sum_ {(i, j, W _ {i j}) \in S ^ {\prime}} (R _ {i} - R _ {j}) \left(W _ {i j} - P \left(R _ {i}, R _ {j} \mid \theta_ {1}\right)\right)
|
| 379 |
+
$$
|
| 380 |
+
|
| 381 |
+
$$
|
| 382 |
+
\frac{\partial\ln L}{\partial\theta_{2}} = \sum_{(i,j,W^{\prime}_{ij})\in S^{\prime}}(R_{i} - R_{j})(W^{\prime}_{ij} - P(R_{i},R_{j}|\theta_{2}))
|
| 383 |
+
$$
|
| 384 |
+
|
| 385 |
+
Since $\frac{\partial\ln L}{\partial\theta_1} = \frac{\partial\ln L}{\partial\theta_2} = 0$ , the difference between the two equations can be obtained:
|
| 386 |
+
|
| 387 |
+
$$
|
| 388 |
+
\sum_ {(i, j, W _ {i j}) \in S ^ {\prime}} (R _ {i} - R _ {j}) \left(W _ {i j} - W _ {i j} ^ {\prime}\right) = \sum_ {(i, j, W _ {i j} ^ {\prime}) \in S ^ {\prime}} (R _ {i} - R _ {j}) \left(P \left(R _ {i}, R _ {j} \mid \theta_ {1}\right) - P \left(R _ {i}, R _ {j} \mid \theta_ {2}\right)\right)
|
| 389 |
+
$$
|
| 390 |
+
|
| 391 |
+
According to the Lagrange mean value theorem (Shi-gu, 2014), the following derivation can be derived:
|
| 392 |
+
|
| 393 |
+
$$
|
| 394 |
+
= \sum_ {(i, j, W _ {i j} ^ {\prime}) \in S ^ {\prime}} (R _ {i} - R _ {j}) ^ {2} P _ {i j} (\xi_ {i j}) (1 - P _ {i j} (\xi_ {i j})) (\theta_ {1} - \theta_ {2})
|
| 395 |
+
$$
|
| 396 |
+
|
| 397 |
+
Due to $P_{ij}(\xi_{ij})(1 - P_{ij}(\xi_{ij})) > 0$ and $\theta_1 < \theta_2$ :
|
| 398 |
+
|
| 399 |
+
$$
|
| 400 |
+
\sum_ {(i, j, W _ {i j}) \in S ^ {\prime}} (R _ {i} - R _ {j}) \left(W _ {i j} - W _ {i j} ^ {\prime}\right) < 0
|
| 401 |
+
$$
|
| 402 |
+
|
| 403 |
+
(2) Because of $0 < P(R_{i}, R_{j} | \theta_{k}) < 1$ and $\theta_{k} < 0$ , for each positive sample $(i, j, k, 1)$ of annotator $k$ , we have $\frac{\partial \ln l}{\partial R_{i}} = \theta_{k} (1 - P(R_{i}, R_{j} | \theta_{k})) < 0$ . Similarly, for each negative sample $(i, j, k, 0)$ of annotator $k$ , we have $\frac{\partial \ln l}{\partial R_{i}} = \theta_{k} (0 - P(R_{i}, R_{j} | \theta_{k})) > 0$ .
|
ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59e2da615701e0132fbcf42af023db238f34d073537baacb219bcf49024aeb10
|
| 3 |
+
size 498703
|
ICML/2025/am-ELO_ A Stable Framework for Arena-based LLM Evaluation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:046dfb8f81246f541e32c76cf794d0a0e038bb216f859f0af6f1e92be16e2173
|
| 3 |
+
size 504027
|
ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/aed8248d-b454-44ef-a338-4bc6daba1424_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56b75a134546068a62ce0e36969b548874ff39cfd8237a4c639431b820a01e80
|
| 3 |
+
size 117069
|
ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/aed8248d-b454-44ef-a338-4bc6daba1424_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f387f235d5ff34fb2718fc92b8f6db0762091071eba735b2ed9d00e962b64ffa
|
| 3 |
+
size 140190
|
ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/aed8248d-b454-44ef-a338-4bc6daba1424_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f804dd9c79b0a743ba4407f49fad5d221171a56f98a61ab9b587b103a674060c
|
| 3 |
+
size 521408
|
ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/full.md
ADDED
|
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Mostafa Elhoushi $^{*1}$ Jeff Johnson $^{*1}$
|
| 2 |
+
|
| 3 |
+
# Abstract
|
| 4 |
+
|
| 5 |
+
We present any4, a learned 4-bit weight quantization solution for large language models (LLMs) providing arbitrary numeric representations without requiring pre-processing of weights or activations. any4 yields higher accuracy compared to other related 4-bit numeric representation types: int4, fp4 and nf4, as evaluated on a range of model sizes, generations and families (Llama 2, Llama 3, Mistral and Mixtral). While any4 does not require preprocessing of weights or activations, it is also competitive with orthogonal techniques that require such preprocessing (e.g., AWQ and GPTQ). We also experiment with any3 and any2 and show competitiveness at lower bits. Additionally, we show that we can calibrate using a single curated diverse sample rather than hundreds of samples from a dataset as done in most quantization approaches. We also open source tinygemm, a latency optimized GPU matrix multiplication library for LLMs, that implements any4 using a GPU-efficient lookup table strategy along with other common quantization methods. We open source our code at https://github.com/facebookresearch/any4.
|
| 6 |
+
|
| 7 |
+
# 1. Introduction
|
| 8 |
+
|
| 9 |
+
Reduced neural network parameter sizes are important for efficient inference, whether at datacenter scale, where accelerators can be provisioned based more upon arithmetic throughput rather than memory requirements, or with edge devices, where smaller, slower memories could be used improving battery lifetime while meeting performance constraints. Given training is typically done in high dynamic range floating point arithmetic, techniques to lossily compress weights must deal with the possibility of varying scale factors and outliers. Various weight numeric formats, such
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
Figure 1: Perplexity by quantizing various Llama3 model sizes. Our proposed any4 is the most accurate across numeric formats.
|
| 13 |
+
|
| 14 |
+
as 4-bit integer (int4), floating point (fp4), or custom distributions such as NormalFloat4 (nf4) (Dettmers et al., 2023)) along with quantization grouping (Dai et al., 2021) are used to increase accuracy. Pre-processing weights and/or activations (e.g., AWQ (Lin et al., 2024), GPTQ (Frantar et al., 2023), or weight Hadamard transforms (Ashkboos et al., 2024b; Liu et al., 2024) can aid with accuracy as well. In this paper, we present a new learned numeric representation, any4, that does not require online or offline modification of weights or activations. any4 quantization accuracy outperforms other numeric representation types, and is competitive with orthogonal quantization algorithms that preprocess weights and/or activations (orthogonality implying that some of these techniques can be applied together with any4 representation). Accuracy was evaluated on a wide range of model sizes, generations and families.
|
| 15 |
+
|
| 16 |
+
# 2. Background
|
| 17 |
+
|
| 18 |
+
Trained neural network weights tend to be roughly Gaussian in nature but with heavier tails (Goodfellow et al., 2016). In attempting to lossily compress weights via quantization (yielding fewer reproduction values than the original do
|
| 19 |
+
|
| 20 |
+
main), being able to closely match the weight distribution with post-quantization possible reproduction values is important for accuracy.
|
| 21 |
+
|
| 22 |
+
# 2.1. Uniform Integer Quantization
|
| 23 |
+
|
| 24 |
+
Some of the first neural network quantization works concerned uniform integer quantization (Jacob et al., 2018). Given a set of values to quantize, we obtain the maximum absolute value, and set that to the extreme value (e.g., -128 / +127 for int8 and -8 / +7 for int4 quantization), with zero being preserved (int8/int4 zero dequantizes to original domain zero). Each increment between int8/int4 values corresponds to a fixed increment (scale) in the original floating point domain.
|
| 25 |
+
|
| 26 |
+
This allows for more efficient (chip area and power) hardware circuits, as integer multiply-add is much simpler than floating point multiply-add. However, uniform integer quantization is best suited to representing samples from a uniform distribution, which is a mismatch with neural network properties. Increased bitwidth (more dense uniform samples) is needed for accuracy due to the expected distribution mismatch, indicating that there is waste in memory storage.
|
| 27 |
+
|
| 28 |
+
# 2.2. Floating Point Quantization
|
| 29 |
+
|
| 30 |
+
Floating point quantization (reducing fractional precision and dynamic range via rounding) is another mechanism. Unlike integer quantization, reproduction values are now non-uniformly spaced. Floating point arithmetic is a piecewise linear distribution of values: the steps between floating point exponents are geometric in nature (multiply or divide by 2 each increment), but within a given exponent value, the spacing of reproduction values is linear (as given by the significand bits). This is slightly closer mapping as a Gaussian distribution with zero mean has most of the mass of the distribution at smaller exponent values more densely sampled by floating point than linear distributions on the number line, while within an exponent the spacing of values is still linear.
|
| 31 |
+
|
| 32 |
+
Such quantization makes sense with hardware support for reduced bit width floating point types (e.g., fp8 formats with Nvidia's H100 GPU and fp4 with Nvidia's B100 GPU). In lieu of native conversion instructions, bit manipulation can usually convert or round a $n$ -bit fpn value to the nearest standard fp16/bf16 value (thus, fp4 can be emulated on devices with higher bit width floating point support).
|
| 33 |
+
|
| 34 |
+
# 2.3. Grouped Quantization
|
| 35 |
+
|
| 36 |
+
As the bitwidth (and thus the number of possible quantization reproduction values) decreases, it can be useful to introduce metadata pertaining to groups of values to the quantization to improve accuracy, with metadata storage
|
| 37 |
+
|
| 38 |
+
cost amortized across many values (Darvish Rouhani et al., 2020). Grouped quantization is an attempt at this. Instead of forcing a single scalar value itself to be the entire representation, we can define groups of contiguous values along a row or column of the matrix. A common offset and scale factor is defined for a group of values such that the reconstruction error is improved, with typical group sizes in practice being 32 - 256. Other variants include Shared Microexponents (Rouhani et al., 2023), providing a group-wise shared exponent value (multiplicative scale) to adjust per-scalar 4 bit floating point values (MX4) in lieu of a scale and offset.
|
| 39 |
+
|
| 40 |
+
# 2.4. Non-Uniform Quantization
|
| 41 |
+
|
| 42 |
+
Thus far we have discussed uniform (linear) and floating-point (log/linear) distributions. But we can go further and have quantization reproduction values match the seen distributions more closely.
|
| 43 |
+
|
| 44 |
+
NormalFloat4 (nf4) (Dettmers et al., 2023) attempts to do exactly this by having the reproduction values (fixed ahead of time) match a Gaussian distribution exactly. However, with an even number of reproduction values (e.g., $2^{n}$ for $n$ bits), we cannot represent a Gaussian symmetrically if we wish to preserve zero. So nf4 is asymmetric, using one of the 16 values to represent zero. This results in higher accuracy, especially for partially sparse matrices.
|
| 45 |
+
|
| 46 |
+
AbnormalFloat4 (af4) (Yoshida, 2023) is a variant of nf4 which adjusts the distribution based on quantization group size. The larger the quantization group, the larger the expected maximum absolute value of Gaussian distribution samples, but the mass of the distribution would still be close to 0. Mapping the nf4 distribution based on the seen absolute maximum value would result in much of the mass of the distribution (values closer to the mean) not being as accurately represented. af4 adjusts the distribution based on group size to take this into account.
|
| 47 |
+
|
| 48 |
+
# 2.4.1. ARBITRARY NON-UNIFORM QUANTIZATION: ANY4
|
| 49 |
+
|
| 50 |
+
Instead of trying to match an a priori data distribution as nf4/af4 do, we can instead learn the distribution from the seen data itself. This was explored in signal processing (Lloyd, 1982a; Max, 1960) and any4 explores this for LLMs. For each set of values along each row of a matrix, we can perform k-means (Lloyd, 1982b; MacQueen et al., 1967) or neural network-based clustering, so each row of the matrix has its own 4-bit quantization code, providing indices into a per-row codebook or lookup table (LUT) containing arbitrary floating point dequantization values. This adds little overhead to quantization: for each row of a $\mathbf{M} \times 4096$ matrix, any4 will add 16 bfloat16/float16 values, for an overhead of $(16 \times \text{sizeof}([b]\text{float}16) \times 8\text{bits/byte}) / 4096$ columns $= 0.0625$ bits for each matrix entry. Like existing
|
| 51 |
+
|
| 52 |
+
4-bit techniques, for higher accuracy we add quantization groups (e.g., each set of $g$ contiguous row values has a shared 16-bit scale and zero point). Thus, per-scalar quantization group overhead for $g = 128$ in our example would be $((4096 / 128) \times (2 \times 16)) / 4096 = 0.25$ bits, yielding $0.0625 + 0.25 + 4 = 4.3125$ bits for any4 representation. Note that standard int4 grouped quantization is already 4.25 bits/entry here, with extension to any4 only adding 0.0625 bits/entry of LUT overhead.
|
| 53 |
+
|
| 54 |
+
In addition, the likely most efficient way to implement nf4 and af4 in software itself is via the same mechanism as any4: using a LUT, as there is no efficient programmatic way to convert a 4-bit integer to an nf4/af4 value using a small number of instructions. To support nf4/af4, our CUDA implementation also allows using a single 16 entry any4 LUT for an entire matrix instead of a LUT per each matrix row. This paper solely evaluates the latter.
|
| 55 |
+
|
| 56 |
+
# 2.5. Quantization Process
|
| 57 |
+
|
| 58 |
+
Vanilla quantization happens in 2 steps: scaling followed by rounding.
|
| 59 |
+
|
| 60 |
+
# 2.5.1. SCALING
|
| 61 |
+
|
| 62 |
+
Numeric formats have different numeric ranges, and high precision numeric formats usually have orders of magnitude larger ranges from low precision numeric formats, e.g., fp32 ranges from $-3.4 \times 10^{38}$ to $+3.4 \times 10^{38}$ while int4 ranges from -7 to +8. Moreover, the numeric range of a given tensor could be orders of magnitude different from a low precision format (e.g., most weight values range from -0.01 to +0.01 while int4 ranges from -7 to +8). Hence, directly rounding each element in a tensor to its nearest value in a numeric format will waste most of the bits and lead to high reconstruction error.
|
| 63 |
+
|
| 64 |
+
Instead, most approaches scale a tensor, or a subset of a tensor, to the range of lower precision numeric format. Given a weight tensor $\pmb{w}$ , and an index $i$ , the scaled weight tensor, $\pmb{w}_S$ , can be expressed as:
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
w _ {S _ {i}} = \frac {w _ {i} - \beta_ {i}}{\alpha_ {i}} \tag {1}
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
Scale factors $\alpha$ and $\beta$ , are high precision scalar values that are calculated for each group of indices, $G$ . For asymmetric quantization<sup>1</sup>:
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\begin{array}{l} \alpha_ {j \in G} = \frac {\operatorname* {m a x} \left(w _ {j \in G}\right) - \operatorname* {m i n} \left(w _ {j \in G}\right)}{Q _ {\text {m a x}} - Q _ {\text {m i n}}} \tag {2} \\ \beta_ {j \in G} = \min (w _ {j \in G}) \\ \end{array}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
For symmetric quantization:
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\alpha_ {j \in G} = \frac {\operatorname* {m a x} \left(\operatorname {a b s} \left(w _ {j \in G}\right)\right)}{Q _ {\text {m a x}}} \tag {3}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\beta = 0
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
where $G$ is a set of indices of a tensor, $\alpha$ and $\beta$ are scaling factors, $Q_{min}$ and $Q_{max}$ are the minimum and maximum values of the lower precision numeric format.
|
| 87 |
+
|
| 88 |
+
Scaling could be applied at different granularities:
|
| 89 |
+
|
| 90 |
+
- Tensorwise: where $G$ is the set of all indices of the tensor. Hence, all elements in tensor, $\mathbf{w}$ , share the same scale factors: $\alpha_{i,j} = \alpha$ , $\beta_{i,j} = \beta$ , $\forall i,j$ .
|
| 91 |
+
- Rowwise: where $G$ is the set of all indices of a row. Elements in each row of a tensor share the same scale factors: $\alpha_{i,j} = \alpha_i, \beta_{i,j} = \beta_i, \forall j$ .
|
| 92 |
+
- Columnwise: where $G$ is the set of all indices of a column. Elements in each column of a tensor share the same scale factors: $\alpha_{i,j} = \alpha_j, \beta_{i,j} = \beta_j, \forall i$ .
|
| 93 |
+
- Groupwise: where $G$ is the set of non-overlapping consecutive indices along a row (or column), of size $1 \times g$ , where group size, $g$ , is a scalar hyperparameter. Elements in each group, $G_{k}$ , share the same scale factors: $\alpha_{i,j} = \alpha_{i,G_k}, \beta_{i,j} = \beta_{i,G_k}, \forall j$ s.t. $kg \leq j < k(g + 1)$ . Values of 64 or 128 for $g$ usually provide a sweet spot between accuracy and overhead for 4-bit quantization.
|
| 94 |
+
- Blockwise: where $G$ is the set of indices within a two-dimensional block of size $b \times b$ , where, $b$ , is a scalar hyperparameter. Elements in each block, $G_{k,l}$ , of a tensor share the same scale factors: $\alpha_{i,j} = \alpha_{G_{k,l}}$ , $\beta_{i,j} = \beta_{G_{k,l}}$ , $\forall i, j$ s.t. $kb \leq i < k(b + 1)$ , $lb \leq j < l(b + 1)$ .
|
| 95 |
+
|
| 96 |
+
In our work, we focus on weight-only groupwise quantization (along the reduction dimension) and, unless stated otherwise, use a default group size $g$ of 128.
|
| 97 |
+
|
| 98 |
+
# 2.5.2. ROUNDING
|
| 99 |
+
|
| 100 |
+
After scaling, the next step is to round the scaled value to the nearest value in the low-precision quantization format:
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
w _ {Q} = \operatorname {r o u n d} _ {Q} (w _ {S}) \tag {4}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
And to dequantize: $\mathrm{dequant}(w_{Q}) = \alpha w_{Q} + \beta$ .
|
| 107 |
+
|
| 108 |
+
# 3. Related Work
|
| 109 |
+
|
| 110 |
+
Quantization has long been researched to run on CPUs and custom chips (Xie & Jabri, 1992). Various techniques can be categorized into:
|
| 111 |
+
|
| 112 |
+
Weights vs. Activations vs. Gradients vs. Optimizer States Quantization can be applied on weights only (AWQ (Lin et al., 2024), GPTQ (Frantar et al., 2023)), weights and activations (SmoothQuant (Xiao et al., 2023), LLM.int8() (Dettmers et al., 2022a)), KV cache (KVQuant (Hooper et al., 2024)), and can be applied to gradients for training (TinyScript (Fu et al., 2020)) and optimization states (8-bit Optimizers (Dettmers et al., 2022b)). Auto-regressive decoding with batch size 1 and sequence length 1 is a highly memory bound process (a big portion of compute time is spent in loading weights compared to processing activations), thus 4-bit weight only quantization leads to better speedup than 8-bit weight and 8-bit activation quantization (PyTorch, 2024). Moreover, 4-bit weight only quantization leads to a better accuracy-speed tradeoff compared to 4-bit weight and 4-bit activation quantization. In this research, we focus on quantizing weights only.
|
| 113 |
+
|
| 114 |
+
Post-Training Quantization (PTQ) vs. Quantization Aware Training (QAT) PTQ refers to quantization on a trained model without the need for further training. QAT refers to quantization during training, whether training a model from scratch, e.g., FP8-LM (Peng et al., 2023), or continually training or finetuning a trained model, e.g., QLoRA (Dettmers et al., 2023). This work falls under PTQ as it does not require further training of a model.
|
| 115 |
+
|
| 116 |
+
Numeric Representation While integer quantization is the most commonly used numeric representation, other numeric representations, that have been explained above, are also used for inference and/or training: fp8 (Wang et al., 2018), fp6 (Gernigon et al., 2023), fp4 (Sun et al., 2020), nf4, and af4 (Yoshida, 2023).
|
| 117 |
+
|
| 118 |
+
Lookup Table (LUT) Representation While most research quantize to pre-defined numeric formats, other approaches use a dynamic format that is specified for each tensor or subset of elements of a tensor using a look-up-table (LUT) (a.k.a. codebook). In scalar quantization techniques, e.g., DeepCompression for CNNs (Han et al., 2016), GOBO for BERT (Zadeh et al., 2020), SqueezeLLM for LLMs (Kim et al., 2023), LUTs map scalar quantized values to scalar high precision values. In vector quantization techniques (Stock et al. for CNNs (Stock et al., 2020), AQLM for LLMs (Egiazarian et al., 2024)), LUTs map vectors of quantized values to vectors of high precision values.
|
| 119 |
+
|
| 120 |
+
Preserving Outlier/Sensitive Values LLM.int8() (Dettmers et al., 2022a) found that keeping $< 0.1\%$ of outlier activations and their corresponding weights in high precision minimizes drop in accuracy. SqueezeLLM (Kim et al., 2023) found that keeping $0.40\%$ outlier weights and an additional $0.05\%$ sensitive weights, determined by a Hessian metric, minimizes accuracy drops. In this work, we quantize all values and keep no outlier/sensitive values in higher precision.
|
| 121 |
+
|
| 122 |
+
Pre-processing Weights and/or Activations While many quantization algorithms simply round each high precision value to a value in the quantized set of possible values (Round to Nearest (RTN), stochastic rounding (Xia et al., 2021), or adaptive rounding (Nagel et al., 2020)), other algorithms perform some offline or online processing of weights and/or activations. Instead of keeping outlier activations or sensitive weights, AWQ (Lin et al., 2024) and SmoothQuant (Xiao et al., 2023) mitigate their effects by dividing outlier channels by a scaling factor and compensating by multiplying weights with the same factor. Other quantization approaches mitigate outliers by applying matrix transformations on weights and activations, e.g., QuIP (Chee et al., 2023), QuaRot (Ashkboos et al., 2024a) and Spin-Quant (Liu et al., 2024). Another line of research follows an iterative procedure of quantizing weights in subsets, modifying unquantized elements to mitigate the errors introduced after quantizing each subset, e.g., GPTQ (Frantar et al., 2023).
|
| 123 |
+
|
| 124 |
+
A common trend is to use a combination of techniques. QuIP cascades incoherence processing with adaptive rounding, QTIP (Tseng et al., 2024) uses Hadamard transforms to remove outliers, vector quantization for numeric representation and other techniques, while SqueezeLLM preserves a portion of outlier/sensitive values in high precision and applies scalar quantization. In this work, we opt for a one-shot quantization algorithm that does not require any online or offline pre-processing or transformations on weights and/or activations, and focus on the aspect of learning quantization from data with efficient inference in hardware, achieving SOTA accuracies compared to other numeric format approaches and is competitive with orthogonal approaches that pre-process weights and activations. We leave it to future work to combine any4 with such orthogonal techniques.
|
| 125 |
+
|
| 126 |
+
# 4. Proposed Solution
|
| 127 |
+
|
| 128 |
+
# 4.1. any4 Algorithm
|
| 129 |
+
|
| 130 |
+
In any4 quantization, we first apply group-wise scaling, then try to find the optimal numeric representation for each row of a weight matrix. Naively applying K-means clustering on scaled weights will lead to a sub-optimal quantization scheme. This is because K-means clustering will minimize the reconstruction error of the weight matrix rather than the output of multiplying weights with sample inputs, and even for weight reconstruction, K-means clustering will minimize the reconstruction error of the scaled weight matrix rather than the original weight matrix.
|
| 131 |
+
|
| 132 |
+
We denote a weight matrix with dimensions of $N\times K$ as $\pmb{w}$ , an input vector with dimensions $M\times K$ , where $M = 1$ without loss of generality, as $\pmb{x}$ , and the output vector with dimensions $M\times N$ as $\pmb{y}$ . Matrix multiplication in high
|
| 133 |
+
|
| 134 |
+
precision can be expressed as:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\boldsymbol {y} = \boldsymbol {w x} \tag {5}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
and matrix multiplication with quantized weights as:
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\hat {\boldsymbol {y}} = \operatorname {d e q u a n t} \left(\boldsymbol {w} _ {Q}\right) \boldsymbol {x} \tag {6}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
For the $i$ th element of output $\pmb{y}$ , this is equivalent to:
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
y _ {i} = \sum_ {\forall j} w _ {i, j} x _ {j} \tag {7}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
\hat {y} _ {i} = \sum_ {\forall j} \operatorname {d e q u a n t} \left(w _ {Q _ {i, j}}\right) x _ {j} \tag {8}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
Our goal is to find the set of $2^{n}$ quantized values for row $i$ :
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
Q _ {i} = \left\{w _ {Q _ {i} ^ {0}}, w _ {Q _ {i} ^ {1}}, \dots , w _ {Q _ {i} ^ {2 n - 1}} \right\} \tag {9}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
for $n$ -bit quantization (anyn) that will minimize the expected mean square error in output activations for possible input activations:
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
\min _ {Q _ {i}} \mathbb {E} \| \hat {\boldsymbol {y}} - \boldsymbol {y} \| \tag {10}
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
We choose a greedy approach to minimize the mean of Frobenius norm of the error of the output activation vector by minimizing the absolute error of each of its elements:
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
\begin{array}{l} \min _ {Q _ {i}} \mathbb {E} | \hat {y} _ {i} - y _ {i} | = \min _ {Q _ {i}} \mathbb {E} \left| \sum_ {\forall j} w _ {i, j} x _ {j} - \sum_ {\forall j} \operatorname {d e q u a n t} \left(w _ {Q _ {i, j}}\right) x _ {j} \right| \\ = \min _ {Q _ {i}} \mathbb {E} \left| \sum_ {\forall j} \left(w _ {i, j} - \operatorname {d e q u a n t} \left(w _ {Q _ {i, j}}\right)\right) x _ {j} \right| \tag {11} \\ \end{array}
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
This way, we can focus on dealing with finding the optimal quantization configuration for each row $i$ of the weight matrix. (Note that GPTQ opts to minimize output activations error in a different way such that all rows of the weight matrix are co-optimized together). Expanding the right hand side of the equation:
|
| 175 |
+
|
| 176 |
+
$$
|
| 177 |
+
\min _ {Q _ {i}} \mathbb {E} | \hat {y} _ {i} - y _ {i} | = \min _ {Q _ {i}} \mathbb {E} \left| \sum_ {\forall j} \left(w _ {i, j} - \left(\alpha_ {i, j} w _ {Q _ {i, j}} + \beta_ {i, j}\right)\right) x _ {j} \right| \tag {12}
|
| 178 |
+
$$
|
| 179 |
+
|
| 180 |
+
The high precision weights are mathematically equivalent to applying scaling factors on scaled weights (i.e., re-arrange Eqn. 1 to expand $w_{i,j}$ into $w_{i,j} = \alpha_{i,j}w_{S_{i,j}} + \beta_{i,j}$ ):
|
| 181 |
+
|
| 182 |
+
$$
|
| 183 |
+
\begin{array}{l} \min _ {Q _ {i}} \mathbb {E} | \hat {y} _ {i} - y _ {i} | \\ = \min _ {Q _ {i}} \mathbb {E} \left| \sum_ {\forall j} \left(\alpha_ {i, j} w _ {S _ {i, j}} + \beta_ {i, j} - \left(\alpha_ {i, j} w _ {Q _ {i, j}} + \beta_ {i, j}\right)\right) x _ {j} \right| \\ = \min _ {Q _ {i}} \mathbb {E} \left| \sum_ {\forall j} \left(\alpha_ {i, j} \left(w _ {S _ {i, j}} - w _ {Q _ {i, j}}\right) x _ {j} \right. \right| \tag {13} \\ \end{array}
|
| 184 |
+
$$
|
| 185 |
+
|
| 186 |
+
The offset factors, $\beta_{i,j}$ , cancel each other out. Hence, we have:
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\min _ {Q _ {i}} \mathbb {E} | \hat {y} _ {i} - y _ {i} | = \min _ {Q _ {i}} \mathbb {E} \left| \sum_ {\forall j} \left(\alpha_ {i, j} w _ {S _ {i, j}} x _ {j} - \alpha_ {i, j} w _ {Q _ {i, j}} x _ {j}\right) \right| \tag {14}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
We now proceed to solve this by a K-Means-style alternating optimization procedure:
|
| 193 |
+
|
| 194 |
+
0. Initialize: for $i$ th row of a weight matrix, randomly initialize a set $Q_{i}$ to a random set of $2^{n}$ values:
|
| 195 |
+
|
| 196 |
+
$$
|
| 197 |
+
Q _ {i} = \left\{w _ {Q _ {i} ^ {0}}, w _ {Q _ {i} ^ {1}}, \dots , w _ {Q _ {i} ^ {2 ^ {n} - 1}} \right\} \tag {15}
|
| 198 |
+
$$
|
| 199 |
+
|
| 200 |
+
1. E-Step: Given $Q_{i}$ and the row of scaled weights:
|
| 201 |
+
|
| 202 |
+
$$
|
| 203 |
+
\{w _ {S _ {i, j}} \} _ {\forall j} = \left\{w _ {S _ {i, 0}}, w _ {S _ {i, 1}}, \dots , w _ {S _ {i, M - 1}} \right\} \tag {16}
|
| 204 |
+
$$
|
| 205 |
+
|
| 206 |
+
we would like to deduce the best $w_{Q_{i,j}}$ for each corresponding $w_{S_{i,j}}$ that will minimize the expression defined in Eq. 14. Since in this step, the possible values in $Q_{i}$ are fixed and we are merely selecting from a set of discrete values, we apply a local minimization step and re-write Eq. 14 to:
|
| 207 |
+
|
| 208 |
+
$$
|
| 209 |
+
\begin{array}{l} w _ {Q _ {i, j}} = \min _ {w _ {Q _ {i, j}} \in Q _ {i}} \left(\alpha_ {i, j} w _ {S _ {i, j}} x _ {j} - \alpha_ {i, j} w _ {Q _ {i, j}} x _ {j}\right) ^ {2} \\ = \alpha_ {i, j} x _ {j} \min _ {w _ {Q _ {i, j}} \in Q _ {i}} \left(w _ {S _ {i, j}} - w _ {Q _ {i, j}}\right) ^ {2} \tag {17} \\ \end{array}
|
| 210 |
+
$$
|
| 211 |
+
|
| 212 |
+
Again since $\alpha_{i,j}x_{j}$ are fixed in this step and are independent of $w_{Q_{i,j}}$ , we can drop that term:
|
| 213 |
+
|
| 214 |
+
$$
|
| 215 |
+
w _ {Q _ {i, j}} = \min _ {w _ {Q _ {i, j}} \in Q _ {i}} \left(w _ {S _ {i, j}} - w _ {Q _ {i, j}}\right) ^ {2} \tag {18}
|
| 216 |
+
$$
|
| 217 |
+
|
| 218 |
+
2. M-Step: After applying the E-Step above, each $w_{Q_{i,j}}$ will be set to one of the $2^n$ values in the set $Q_i$ . We refer to each set of indices $i, j$ that are associated with a specific quantized value $Q_i^q$ as a cluster. We can rewrite Eq. 14 to create a separate sum term for elements in each cluster:
|
| 219 |
+
|
| 220 |
+
$$
|
| 221 |
+
\begin{array}{l} \min _ {Q _ {i}} \mathbb {E} \left| \hat {y} _ {i} - y _ {i} \right| \\ = \min _ {Q _ {i}} \mathbb {E} \left| \sum_ {\forall j} \sum_ {\forall q \in Q _ {i} ^ {q}} \left(\alpha_ {i, j} w _ {S _ {i, j}} x _ {j} - \alpha_ {i, j} w _ {Q _ {i} ^ {q}} x _ {j}\right) \right| \tag {19} \\ \end{array}
|
| 222 |
+
$$
|
| 223 |
+
|
| 224 |
+
To minimize the term, we can aim to set the difference for elements for each cluster to 0:
|
| 225 |
+
|
| 226 |
+
$$
|
| 227 |
+
\mathbb {E} \left| \sum_ {\forall q \in Q _ {i} ^ {q}} \left(\alpha_ {i, j} w _ {S _ {i, j}} x _ {j} - \alpha_ {i, j} w _ {Q _ {i} ^ {q}} x _ {j}\right) \right| = 0 \tag {20}
|
| 228 |
+
$$
|
| 229 |
+
|
| 230 |
+
The expression inside the expectation operation is a scalar value. Moreover, except for input activations $x$ , all the other variables are deterministic and known offline. Hence, the expectation operator is only needed to be applied on input activations:
|
| 231 |
+
|
| 232 |
+
$$
|
| 233 |
+
\sum_ {\forall q \in Q _ {i} ^ {q}} \left(\alpha_ {i, j} w _ {S _ {i, j}} \mathbb {E} | x _ {j} | - \alpha_ {i, j} w _ {Q _ {i} ^ {q}} \mathbb {E} | x _ {j} |\right) = 0 \tag {21}
|
| 234 |
+
$$
|
| 235 |
+
|
| 236 |
+
Re-writing:
|
| 237 |
+
|
| 238 |
+
$$
|
| 239 |
+
\begin{array}{l} \sum_ {\forall q \in Q _ {i} ^ {q}} \alpha_ {i, j} w _ {S _ {i, j}} \mathbb {E} | x _ {j} | = \sum_ {\forall q \in Q _ {i} ^ {q}} \alpha_ {i, j} w _ {Q _ {i} ^ {q}} \mathbb {E} | x _ {j} | (22) \\ = w _ {Q _ {i} ^ {q}} \sum_ {\forall q \in Q _ {i} ^ {q}} \alpha_ {i, j} \mathbb {E} | x _ {j} | (22) \\ \end{array}
|
| 240 |
+
$$
|
| 241 |
+
|
| 242 |
+
Re-arranging:
|
| 243 |
+
|
| 244 |
+
$$
|
| 245 |
+
w _ {Q _ {i} ^ {q}} = \frac {\sum_ {\forall q \in Q _ {i} ^ {q}} \alpha_ {i , j} w _ {S _ {i , j}} \mathbb {E} \left| x _ {j} \right|}{\sum_ {\forall q \in Q _ {i} ^ {q}} \alpha_ {i , j} \mathbb {E} \left| x _ {j} \right|} \tag {23}
|
| 246 |
+
$$
|
| 247 |
+
|
| 248 |
+
Eqn. 23 states that the optimal value to represent a group of scaled weights within a cluster is their average weighted by the product of the scaling factor of a weight element and mean of the norm of activations applied to that element.
|
| 249 |
+
|
| 250 |
+
We alternate between the E-Step and M-Step till the values of $Q_{i}$ converge.
|
| 251 |
+
|
| 252 |
+
The equation of E-Step is equivalent to the cluster assignment step of K-means clustering, while the equation of M-Step is equivalent to the centroid update step of weighted K-means. Hence, our mathematical formulations guide us to creating the LUT of each row of a scaled weight matrix by the algorithm depicted in Alg. 1. We also summarize our algorithm in Fig. 2. We speedup the process by parallelizing the loop over each linear weight's rows, enabling us to quantize Llama3 8B in 10 minutes.
|
| 253 |
+
|
| 254 |
+
While most quantization papers use a dataset like C4 to obtain a set of calibration activations, we hand curate a single calibration sample, as shown in Listing. 1, that covers diverse set of topics, and then obtain the mean of absolute of activations along the channel axis to represent $\mathbb{E}|x|$
|
| 255 |
+
|
| 256 |
+
- Fiction: "Once upon a time, a girl named Alice was living alone on an island. One day, she met a wizard ..."
|
| 257 |
+
- News: "The United Nations held its General Assembly meeting this year amid multiple world crises and wars. In his speech, the General Secretary called for ...
|
| 258 |
+
- Code: ~public static void main(String[] args)
|
| 259 |
+
\~ System.out.println("Hello world!");
|
| 260 |
+
- Math: $(5.2 + 2.7) / 0.6 - 1.9 * 2.2 =$
|
| 261 |
+
- Facts: "The capital of Egypt is Cairo. It is the largest city in the region and is home to...
|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
Listing 1: Calibration sample used to generate LUTs.
|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
Figure 2: any4 quantization process
|
| 270 |
+
|
| 271 |
+
# 4.2. tinygemm Library
|
| 272 |
+
|
| 273 |
+
As part of this paper, we present tinygemm, a GEMM library optimized for low-latency LLM inference at small batch sizes (1 to 16) for Nvidia GPU Ampere generation and later architectures. For a matrix multiplication $\pmb{y} = \pmb{x}\pmb{w}^T$ where $\pmb{x}$ is of size $M\times K$ and $\pmb{w}$ is of size $N\times K$ ( $M$ and $N$ being the outer dimensions and $K$ being the reduction dimension), in linear layers, the product of batch size
|
| 274 |
+
|
| 275 |
+
<table><tr><td colspan="11">Llama3.2 1B</td></tr><tr><td rowspan="2"></td><td colspan="4">Perplexity ↓</td><td colspan="6">Tasks ↑</td></tr><tr><td>WikiText-2</td><td>C4</td><td>PTB</td><td>CodeParrot</td><td>HumanEval Pass@1</td><td>MBPP Pass@1</td><td>MMLU</td><td>HellaSwag</td><td>GSM8K</td><td>BBH</td></tr><tr><td>FP16</td><td>9.76</td><td>12.77</td><td>16.56</td><td>3.49</td><td>16.46%</td><td>21.4%</td><td>36.1%</td><td>47.7%</td><td>6.60%</td><td>31.1%</td></tr><tr><td>INT4</td><td>11.89</td><td>15.74</td><td>20.32</td><td>4.08</td><td>9.76%</td><td>11.4%</td><td>30.1%</td><td>44.7%</td><td>3.18%</td><td>26.2%</td></tr><tr><td>FP4</td><td>13.01</td><td>17.11</td><td>21.89</td><td>4.28</td><td>8.54%</td><td>5.8%</td><td>29.3%</td><td>43.6%</td><td>2.27%</td><td>23.3%</td></tr><tr><td>NF4</td><td>10.99</td><td>14.63</td><td>18.78</td><td>3.82</td><td>13.4%</td><td>13.8%</td><td>33.3%</td><td>45.8%</td><td>2.65%</td><td>26.8%</td></tr><tr><td>ANY4</td><td>10.63</td><td>13.95</td><td>17.94</td><td>3.71</td><td>11.0%</td><td>18.6%</td><td>32.9%</td><td>46.7%</td><td>3.71%</td><td>29.0%</td></tr><tr><td colspan="11">Llama3 8B</td></tr><tr><td>FP16</td><td>6.14</td><td>8.93</td><td>10.59</td><td>2.54</td><td>29.3%</td><td>41.4%</td><td>62.0%</td><td>60.1%</td><td>50.7%</td><td>62.8%</td></tr><tr><td>INT4</td><td>6.87</td><td>9.89</td><td>11.37</td><td>2.83</td><td>23.2%</td><td>35.4%</td><td>59.6%</td><td>58.6%</td><td>40.6%</td><td>58.5%</td></tr><tr><td>FP4</td><td>7.10</td><td>10.22</td><td>11.81</td><td>2.89</td><td>22.0%</td><td>36.8%</td><td>57.1%</td><td>58.5%</td><td>35.0%</td><td>53.2%</td></tr><tr><td>NF4</td><td>6.63</td><td>9.52</td><td>11.14</td><td>2.72</td><td>23.2%</td><td>39.2%</td><td>60.7%</td><td>59.1%</td><td>41.1%</td><td>59.0%</td></tr><tr><td>ANY4</td><td>6.51</td><td>9.40</td><td>11.07</td><td>2.68</td><td>21.3%</td><td>39.2%</td><td>61.0%</td><td>59.5%</td><td>41.7%</td><td>59.2%</td></tr><tr><td colspan="11">Llama3 70B</td></tr><tr><td>FP16</td><td>2.86</td><td>6.77</td><td>8.16</td><td>1.91</td><td>17.7%</td><td>60.8%</td><td>75.4%</td><td>66.3%</td><td>80.6%</td><td>82.4%</td></tr><tr><td>INT4</td><td>3.63</td><td>7.97</td><td>8.86</td><td>2.21</td><td>18.3%</td><td>45.0%</td><td>73.0%</td><td>66.2%</td><td>73.9%</td><td>78.4%</td></tr><tr><td>FP4</td><td>3.94</td><td>7.76</td><td>8.99</td><td>2.17</td><td>22.0%</td><td>50.8%</td><td>71.9%</td><td>65.6%</td><td>75.3%</td><td>77.9%</td></tr><tr><td>NF4</td><td>3.43</td><td>7.67</td><td>8.84</td><td>2.15</td><td>18.9%</td><td>39.6%</td><td>73.7%</td><td>66.1%</td><td>75.9%</td><td>79.3%</td></tr><tr><td>ANY4</td><td>3.20</td><td>7.01</td><td>8.33</td><td>1.99</td><td>17.1%</td><td>57.4%</td><td>75.1%</td><td>66.1%</td><td>78.5%</td><td>81.8%</td></tr></table>
|
| 276 |
+
|
| 277 |
+
Table 1: Quantizing Llama3 models with various numeric formats. Results for Llama2 and Mistral/Mixtral are in the Appendix.
|
| 278 |
+
|
| 279 |
+
and sequence length corresponds to matrix dimension $M$ . At $M \leq 8$ , activation $x$ is itself much smaller than tensor core tile sizes ( $m = 16, n = 8, k = 16$ ) for 16-bit float Ampere+ mma "tensor core" fixed-function matrix multiplication instructions. In this case, each $8 \times 16$ tile of $w$ (weights) is only used once (no data reuse). Thus, multistage asynchronous pipelining and data reuse concerns in typical high-performance GPU GEMM kernels are reduced, as the problem is largely memory latency (or bandwidth) limited. Tensor cores still outperform manual (scalar) matrix multiplication at $M = 1$ (GEMV / matrix-vector multiplication) per our analysis. An early version of tinygemm, largely focused on int4 grouped quantization for small batch sizes, has been part of core PyTorch since late 2023, subsequently utilized by gpt-fast (PyTorch, 2023), torchao (PyTorch, 2024), and Hugging Face Transformers (Wolf et al., 2020).
|
| 280 |
+
|
| 281 |
+
Many inference works (especially in open source) concentrate on $M = 1$ performance, where latency is a concern. Even in this case, where we would be using only $\frac{1}{8}$ or $\frac{1}{16}$ of tensor core throughput, we improve latency by laying out matrices in main (global) memory in the exact format that mma expects per tile rather than standard row-major / column-major format. Typical tensor core GEMM kernels use shared memory (a small, high-speed user-controllable scratchpad memory) to transpose tiles of matrices into the desired format before multiplication can proceed. We avoid this by performing the transposition in advance, allowing matrix data to pass directly from global memory to registers.
|
| 282 |
+
|
| 283 |
+
As there is little to no weight reuse opportunity for small batch sizes, and loads into registers can be asynchronous as they generally do not stall execution until the point of first use, tinygemm does not use shared memory in many cases. This strategy improves performance at small batch sizes, but is not applicable for larger sizes. To improve efficiency, when $M \leq 8$ , we maintain weights on the left to use the $16 \times 16$ tile, computing $\mathbf{y} = (\mathbf{w}\mathbf{x}^T)^T$ flipping the order of matrices presented to mma with transpositions performed on the fly, and if $M > 8$ , we maintain weights on the right for the $8 \times 16$ tile $(\mathbf{y} = \mathbf{x}\mathbf{w}^T)$ .
|
| 284 |
+
|
| 285 |
+
To implement int4, nf4, or any4 GEMM, we dequantize weights on the fly before mma multiplication. Speed is improved by always ensuring that we can load matrix data using vectorized 16 byte loads in coalesced and contiguous fashion across the warp from global memory. In cases where a single thread's quantized tile data is less than 16 bytes (a m16n8k16 "B" tensor core layout with quantized 4-bit values only needs 2 bytes loaded prior to dequantization per CUDA thread per mma), multiple tiles along the reduction dimension (" $k$ -tiles" in tinygemm terminology) can be packed together to ensure that wide data loads can be used in all cases.
|
| 286 |
+
|
| 287 |
+
Instead of typical int4-to-float dequantization (converting an integer in $[-8, 7]$ to floating point via native instructions or bit manipulation), we can use a 16-entry LUT per row containing arbitrary floating point values. In tinygemm, this LUT is held in a single register with lookup provided using
|
| 288 |
+
|
| 289 |
+
<table><tr><td colspan="9">Llama3 8B</td></tr><tr><td></td><td>Quantization Algorithm</td><td>Numeric Format</td><td>WikiText-2 Perplexity ↓</td><td></td><td>Numeric Format</td><td>WikiText-2 Perplexity ↓</td><td></td><td>Numeric Format WikiText-2 Perplexity ↓</td></tr><tr><td></td><td>FP16</td><td colspan="7">6.1</td></tr><tr><td rowspan="5">4-bits</td><td>RTN</td><td>INT4</td><td>6.9</td><td rowspan="5">3-bits</td><td>INT3</td><td>17.1</td><td rowspan="5">2-bits</td><td>INT2</td></tr><tr><td>GPTQ</td><td>INT4</td><td>6.5</td><td>INT3</td><td>8.2</td><td>INT2</td></tr><tr><td>AWQ</td><td>INT4</td><td>6.6</td><td>INT3</td><td>8.2</td><td>INT2</td></tr><tr><td>QuIP</td><td>INT4</td><td>6.5</td><td>INT3</td><td>7.5</td><td>INT2</td></tr><tr><td>RTN</td><td>ANY4</td><td>6.5</td><td>ANY3</td><td>8.0</td><td>ANY2</td></tr></table>
|
| 290 |
+
|
| 291 |
+
<table><tr><td colspan="10">Llama3 70B</td></tr><tr><td colspan="3">FP16</td><td colspan="7">2.9</td></tr><tr><td rowspan="5">4-bits</td><td>RTN</td><td>INT4</td><td>3.6</td><td rowspan="5">3-bits</td><td>INT3</td><td>11.8</td><td rowspan="5">2-bits</td><td>INT2</td><td>4.6E5</td></tr><tr><td>GPTQ</td><td>INT4</td><td>3.3</td><td>INT3</td><td>5.2</td><td>INT2</td><td>11.9</td></tr><tr><td>AWQ</td><td>INT4</td><td>3.3</td><td>INT3</td><td>4.8</td><td>INT2</td><td>1.7E6</td></tr><tr><td>QuIP</td><td>INT4</td><td>3.4</td><td>INT3</td><td>4.7</td><td>INT2</td><td>13.0</td></tr><tr><td>RTN</td><td>ANY4</td><td>3.2</td><td>ANY3</td><td>4.6</td><td>ANY2</td><td>253.8</td></tr></table>
|
| 292 |
+
|
| 293 |
+
Table 2: Quantizing Llama3 models with various quantization algorithms for different bit widths.
|
| 294 |
+
|
| 295 |
+
GPU warp shuffle functionality, with the 4-bit quantization codes used as LUT indices. An alternative strategy would be to use a shared memory LUT containing all possible $16 \times 16 = 256$ pairs of any4 reproduction values so that two packed any4 values (in a byte) can be dequantized per lookup. While this amount of shared memory usage will likely not affect performance (via occupancy) that much, it does suffer shared memory bank conflict penalties in many circumstances.
|
| 296 |
+
|
| 297 |
+
# 5. Results
|
| 298 |
+
|
| 299 |
+
We quantize weights of all linear modules of all transformer layers: key, query, value, and output projections, up, down projections and gate for feed-forward networks (FFN). Following most quantization papers, we keep weights of embedding and final classification layers high-precision.
|
| 300 |
+
|
| 301 |
+
We evaluate both perplexity and downstream tasks. For perplexity, we ported the implementation of GPTQ for WikiText-2 (Merit et al., 2017), C4 (Raffel et al., 2019), and Penn Treebank (Marcus et al., 1993) that is used by codebases of other quantization papers. To add coding domain, we added perplexity on CodeParrot (CodeParrot).
|
| 302 |
+
|
| 303 |
+
For downstream tasks, we used Eleuther Harness (Gao et al., 2024) for natural language tasks, and BigCode Harness (Ben Allal et al., 2022) for coding tasks. Accuracies on downstream tasks tend to be noisy (Wang et al., 2024), while perplexity is a less noisy indicator of a model's performance.
|
| 304 |
+
|
| 305 |
+
Comparison with Other Numeric Representations We first compare accuracy of any4 with other numeric formats: int4, fp4, nf4. We use group-wise scaling with group size 128, and asymmetric scaling for all models, except for Llama3 70B where we found symmetric scaling leads to
|
| 306 |
+
|
| 307 |
+
better results.
|
| 308 |
+
|
| 309 |
+
We ran on different model families (Llama (Touvron et al., 2023a) and Mistral (Jiang et al., 2023)), different generations (Llama2 (Touvron et al., 2023b) and Llama3 (Grattafori et al., 2024)), and different sizes (from 1B all the way to 70B). We provide results of Llama3 in Table 1, Llama2 in Table A1, and Mistral in Table A2. Our results show any4 has the best accuracies across all models.
|
| 310 |
+
|
| 311 |
+
Speed Comparisons We benchmark matrix multiplication of vector activation and square weight tensors from 1K to 16K on A100 80GB GPU using PyTorch 2.3.0 and provide the speedups of our tinygemm library in Fig. 3. int4, nf4, and any4 were implemented using our tinygemm library. int4 kernels have the highest speedup, reaching close to $3 \times$ . nf4 and any4 speedups reach up to $2 \times$ ; lower than int4 because of the overhead of looking up the LUTs. Nevertheless, any4 has almost the same speedup as nf4, despite the latter requiring a single LUT for a whole tensor and the former requiring a separate LUT for each row in the weight matrix.
|
| 312 |
+
|
| 313 |
+
Comparison with Orthogonal Quantization Techniques As explained in the Related Works section, our work proposes a new numeric representation applying RTN (round-to-nearest). Despite our work being orthogonal to others that transforms weights and/or activations to make them more rounding or quantization friendly, we compare any4 to GPTQ, AWQ, and QuIP that use int4 in Table 2. Results of AWQ, GPTQ, and QuIP are obtained from (Huang et al., 2024). In 4-bit the results show that any4 has either the best or competitive performance. For future work, we can evaluate these orthogonal techniques together, replacing the int4 representation with any4.
|
| 314 |
+
|
| 315 |
+
3-bit and 2-bit Quantization Although our main goal was 4-bit representation, we ran experiments to see how any3 and
|
| 316 |
+
|
| 317 |
+
<table><tr><td colspan="7">Llama3.2 1B</td></tr><tr><td rowspan="2"></td><td rowspan="2">Calibration Data</td><td rowspan="2">Number of Samples</td><td rowspan="2">Sequence Length per Sample</td><td rowspan="2">WikiText-2</td><td colspan="2">Perplexity ↓</td></tr><tr><td>C4</td><td>PTB</td></tr><tr><td>FP16</td><td></td><td></td><td></td><td>9.76</td><td>12.77</td><td>16.56</td></tr><tr><td>ANY4</td><td>WikiText-2</td><td>128</td><td>2048</td><td>10.70</td><td>14.08</td><td>18.02</td></tr><tr><td>ANY4</td><td>Pile</td><td>128</td><td>2048</td><td>10.70</td><td>13.99</td><td>18.26</td></tr><tr><td>ANY4</td><td>C4</td><td>128</td><td>4096</td><td>10.74</td><td>14.14</td><td>18.10</td></tr><tr><td>ANY4</td><td>C4</td><td>128</td><td>2048</td><td>10.67</td><td>14.05</td><td>17.97</td></tr><tr><td>ANY4</td><td>C4</td><td>128</td><td>512</td><td>10.62</td><td>13.96</td><td>18.03</td></tr><tr><td>ANY4</td><td>Handwritten Prompt</td><td>1</td><td>-</td><td>10.63</td><td>13.95</td><td>17.94</td></tr></table>
|
| 318 |
+
|
| 319 |
+

|
| 320 |
+
Figure 3: Speedup of our tinygemm CUDA kernels on 80GB A100 on matrix multiplication of $1 \times K$ input by $K \times K$ weight, w.r.t PyTorch's bfloat16 implementation.
|
| 321 |
+
|
| 322 |
+
any2 perform compared to the prior orthogonal quantization techniques (Table 2). For 3-bit, any3 is either the best or competitive with other approaches. For 2-bit, QuIP is the best, while any2 is better than AWQ and competitive with GPTQ.
|
| 323 |
+
|
| 324 |
+
# 5.1. Ablation Studies
|
| 325 |
+
|
| 326 |
+
# Calibration Data
|
| 327 |
+
|
| 328 |
+
In Table 3 we ablate with different calibration datasets to calculate sample weighting in Eqn. 23 of our any4 algorithm. The results show that our proposed handwritten sample performs better than commonly used datasets in literature, despite being significantly smaller in number of tokens. Note that the handwritten sample or prompt has a fixed number of words that translates to different numbers of tokens depending on the tokenizer that changes with different models. Our prompt has 88 words only, which will in worst case translate to a few hundred tokens. These results may indicate that a single data sample with diverse topics could be enough or better to calibrate than using many long sample sequences. Our evaluation sequence length is 2048 (following Lin et al., 2024; Frantar et al., 2023)), calibration is on training split of each dataset, and evaluation is on the validation or
|
| 329 |
+
|
| 330 |
+
test split.
|
| 331 |
+
|
| 332 |
+
Group Size In Table 4 we ablate quantization group size from 64 to 1024. any4 always has the lowest perplexity across other 4-bit representations across all group sizes. It is noteworthy that fp4 and nf4 perplexity degenerates for large group sizes at 1024, while any4 only increases marginally.
|
| 333 |
+
|
| 334 |
+
Table 3: any4 quantization with different calibration data.
|
| 335 |
+
|
| 336 |
+
<table><tr><td colspan="6">Llama3.2 1B</td></tr><tr><td></td><td colspan="5">Group Size</td></tr><tr><td></td><td>64</td><td>128</td><td>256</td><td>512</td><td>1024</td></tr><tr><td>FP16</td><td></td><td></td><td>12.77</td><td></td><td></td></tr><tr><td>FP4</td><td>16.19</td><td>17.11</td><td>18.12</td><td>20.43</td><td>2.3E6</td></tr><tr><td>NF4</td><td>14.27</td><td>14.63</td><td>14.98</td><td>15.38</td><td>7.8E5</td></tr><tr><td>ANY4</td><td>13.75</td><td>13.95</td><td>14.09</td><td>14.24</td><td>14.34</td></tr></table>
|
| 337 |
+
|
| 338 |
+
Table 4: C4 perplexity after quantizing with different group sizes.
|
| 339 |
+
|
| 340 |
+
# 6. Conclusion & Future Work
|
| 341 |
+
|
| 342 |
+
We have presented any4, an algorithm to find an optimal low-bit numeric representation for each row in a weight matrix, as well as tinygamm, a matrix multiplication library for low-latency, low-bit inference. We have shown that accuracy of any4 is superior to other 4-bit numeric formats with low memory overhead, and competitive with various orthogonal quantization techniques that involve further preprocessing. We would like to explore combining with these orthogonal techniques in the future.
|
| 343 |
+
|
| 344 |
+
# Acknowledgements
|
| 345 |
+
|
| 346 |
+
We would like to thank Newsha Ardalani for help in running experiments; Daniel Haziza, Francisco Massa, Luca Wehrstedt, Bram Wasti, Steven Li, and Lin Xiao for discussions.
|
| 347 |
+
|
| 348 |
+
# Impact Statement
|
| 349 |
+
|
| 350 |
+
This paper presents a work that quantizes pretrained models. The input to the algorithm is a model's pretrained weights,
|
| 351 |
+
|
| 352 |
+
architecture, and a calibration dataset (which in our case was a single hand-written prompt). We have not evaluated if the quantization algorithm increases or decreases any societal impact of the underlying model. One factor that may introduce bias into the model is the calibration dataset. We leave it for future work to analyze the effect of different calibration datasets (or prompts in our case) on bias and truthfulness.
|
| 353 |
+
|
| 354 |
+
# References
|
| 355 |
+
|
| 356 |
+
Arthur, D. and Vassilvitskii, S. k-means++: the advantages of careful seeding. In Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms, SODA '07, pp. 1027-1035, USA, 2007. Society for Industrial and Applied Mathematics. ISBN 9780898716245.
|
| 357 |
+
Ashkboos, S., Mohtashami, A., Croci, M. L., Li, B., Cameron, P., Jaggi, M., Alistarh, D., Hoefler, T., and Hensman, J. Quarot: Outlier-free 4-bit inference in rotated LLMs. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024a. URL https://openreview.net/forum?id=dfqsW38v1X.
|
| 358 |
+
Ashkboos, S., Mohtashami, A., Croci, M. L., Li, B., Cameron, P., Jaggi, M., Alistarh, D., Hoefer, T., and Hensman, J. Quarot: Outlier-free 4-bit inference in rotated llms, 2024b. URL https://arxiv.org/abs/2404.00456.
|
| 359 |
+
Ben Allal, L., Muennighoff, N., Kumar Umapathi, L., Lipkin, B., and von Werra, L. A framework for the evaluation of code generation models. https://github.com/bigcode-project/bigcode-evaluation-harness, 2022.
|
| 360 |
+
Chee, J., Cai, Y., Kuleshov, V., and Sa, C. D. QuIP: 2-bit quantization of large language models with guarantees. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=xrk9g5vcXR.
|
| 361 |
+
CodeParrot. Codeparrot/codeparrot-clean. URL https: //huggingface.co/datasets/codeparrot /codeparrot-clean.
|
| 362 |
+
Dai, S., Venkatesan, R., Ren, M., Zimmer, B., Dally, W., and Khailany, B. Vs-quant: Per-vector scaled quantization for accurate low-precision neural network inference. In Smola, A., Dimakis, A., and Stoica, I. (eds.), Proceedings of Machine Learning and Systems, volume 3, pp. 873-884, 2021. URL https://proceedings.mlsys.org/paper_files/paper/2021/file/48a6431f04545e11919887748ec5cb52-Paper.pdf.
|
| 363 |
+
|
| 364 |
+
Darvish Rouhani, B., Lo, D., Zhao, R., Liu, M., Fowers, J., Ovtcharov, K., Vinogradsky, A., Massengill, S., Yang, L., Bittner, R., Forin, A., Zhu, H., Na, T., Patel, P., Che, S., Chand Koppaka, L., SONG, X., Som, S., Das, K., T, S., Reinhardt, S., Lanka, S., Chung, E., and Burger, D. Pushing the limits of narrow precision inferencing at cloud scale with microsoft floating point. In Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., and Lin, H. (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 10271-10281. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/747e32ab0fea7fbd2ad9ec03daa3f840-Paper.pdf.
|
| 365 |
+
Dettmers, T., Lewis, M., Belkada, Y., and Zettlemoyer, L. Gpt3.int8(): 8-bit matrix multiplication for transformers at scale. In Koyejo, S., Mohamed, S., Agarwal, A., Belgrave, D., Cho, K., and Oh, A. (eds.), Advances in Neural Information Processing Systems, volume 35, pp. 30318-30332. Curran Associates, Inc., 2022a. URL https://proceedings.neurips.cc/paper_files/paper/2022/file/c3ba4962c05c49636d4c6206a97e9c8a-Paper-Conference.pdf.
|
| 366 |
+
Dettmers, T., Lewis, M., Shleifer, S., and Zettlemoyer, L. 8-bit optimizers via block-wise quantization. In International Conference on Learning Representations, 2022b. URL https://openreview.net/forum?id=shpkpVXzo3h.
|
| 367 |
+
Dettmers, T., Pagnoni, A., Holtzman, A., and Zettlemoyer, L. Qlora: Efficient finetuning of quantized llms. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S. (eds.), Advances in Neural Information Processing Systems, volume 36, pp. 10088-10115. Curran Associates, Inc., 2023. URL https://proceedings.neurips.cc/paper_files/paper/2023/file/1feb87871436031bdc0f2beaa62a049b-Paper-Conference.pdf.
|
| 368 |
+
Egiazarian, V., Panferov, A., Kuznedev, D., Frantar, E., Babenko, A., and Alistarh, D. Extreme compression of large language models via additive quantization. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=5mCaITRTmo.
|
| 369 |
+
Frantar, E., Ashkboos, S., Hoefler, T., and Alistarh, D. OPTQ: Accurate quantization for generative pre-trained transformers. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id= tcbBPnfwxsS.
|
| 370 |
+
Fu, F., Hu, Y., He, Y., Jiang, J., Shao, Y., Zhang, C., and Cui, B. Don't waste your bits! Squeeze activations and gradients for deep neural networks via TinyScript. In
|
| 371 |
+
|
| 372 |
+
III, H. D. and Singh, A. (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 3304-3314. PMLR, 13-18 Jul 2020. URL https://proceedings.mlr.press/v119/fu20c.html.
|
| 373 |
+
Gao, L., Tow, J., Abbasi, B., Biderman, S., Black, S., DiPofi, A., Foster, C., Golding, L., Hsu, J., Le Noac'h, A., Li, H., McDonell, K., Muennighoff, N., Ociepa, C., Phang, J., Reynolds, L., Schoelkopf, H., Skowron, A., Sutawika, L., Tang, E., Thite, A., Wang, B., Wang, K., and Zou, A. A framework for few-shot language model evaluation, 07 2024. URL https://zenodo.org/records/1 2608602.
|
| 374 |
+
Gernigon, C., Filip, S.-I., Sentieys, O., Coggiola, C., and Bruno, M. Low-precision floating-point for efficient onboard deep neural network processing, 2023. URL https://arxiv.org/abs/2311.11172.
|
| 375 |
+
Goodfellow, I., Bengio, Y., and Courville, A. Deep Learning. MIT Press, 2016. URL http://www.deeplearningbook.org. Book in preparation for MIT Press.
|
| 376 |
+
Grattaftori, A., Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Vaughan, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Roziere, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Wyatt, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Guzmán, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Thattai, G., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I., Misra, I., Evtimov, I., Zhang, J., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Prasad, K., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., El-Arini, K., Iyer, K., Malik K. Chiu K. Bhalla K. Lakhotia K. Rantala-Yeary L. van der Maaten L. Chen L. Tan L. Jenkins L. Martin L. Madaan L. Malo L. Blecher L. Landzaat L. de Oliveira L. Muzzi M. Pasupuleti M. Singh M. Paluri M. Kardas M. Tsimpoukelli M. Oldham M. Rita M. Pavlova M. Kambadur M. Lewis M. Si M. Singh M. K. Hassan M. Goyal N. Torabi N. Bashlykov N. Bogoychev N. Chatterji N. Zhang N. Duchenne O. Celebi O. Alrassy P. Zhang P. Li P
|
| 377 |
+
|
| 378 |
+
Vasic, P., Weng, P., Bhargava, P., Dubal, P., Krishnan, P., Koura, P. S., Xu, P., He, Q., Dong, Q., Srinivasan, R., Ganapathy, R., Calderer, R., Cabral, R. S., Stojnic, R., Raileanu, R., Maheswari, R., Girdhar, R., Patel, R., Sauvestre, R., Polidoro, R., Sumbaly, R., Taylor, R., Silva, R., Hou, R., Wang, R., Hosseini, S., Chennabasappa, S., Singh, S., Bell, S., Kim, S. S., Edunov, S., Nie, S., Narang, S., Raparthy, S., Shen, S., Wan, S., Bhosale, S., Zhang, S., Vandenhende, S., Batra, S., Whitman, S., Sootla, S., Collot, S., Gururangan, S., Borodinsky, S., Herman, T., Fowler, T., Sheasha, T., Georgiou, T., Scialom, T., Speckbacher, T., Mihaylov, T., Xiao, T., Karn, U., Goswami, V., Gupta, V., Ramanathan, V., Kerkez, V., Gonguet, V., Do, V., Vogeti, V., Albiero, V., Petrovic, V., Chu, W., Xiong, W., Fu, W., Meers, W., Martinet, X., Wang, X., Wang, X., Tan, X. E., Xia, X., Xie, X., Jia, X., Wang, X., Goldschlag, Y., Gaur, Y., Babaei, Y., Wen, Y., Song, Y., Zhang, Y., Li, Y., Mao, Y., Coudert, Z. D., Yan, Z., Chen, Z., Papakipos, Z., Singh, A., Srivastava, A., Jain, A., Kelsey, A., Shajnfeld, A., Gangidi, A., Victoria, A., Goldstand, A., Menon, A., Sharma, A., Boesenberg, A., Baevski, A., Feinstein, A., Kallet, A., Sangani, A., Teo, A., Yunus, A., Lupu, A., Alvarado, A., Caples, A., Gu, A., Ho, A., Poulton, A., Ryan, A., Ramchandani, A., Dong, A., Franco, A., Goyal, A., Saraf, A., Chowdhury, A., Gabriel, A., Bharambe, A., Eisenman, A., Yazdan, A., James, B. Maurer, B. Leonhardi, B. Huang B. Loyd B. Paola B.D. ParanjapeB.LiuB.WuB.NiB.Hancock B. Wasti B. Spence B. Stojkovic B.Gamido B. Montalvo B. Parker C. Burton C.Mejia C. Liu C. WangC.KimC.ZhouC.HuC.-H.CaiC. Tindal C. Feichtenhofer C. Gao C.Civin D.Beaty D.KreymerD.LiD.AdkinsD.XuD.Testuggine D. David D.Parikh D. Liskovich D.Foss D.Wang D.Le,D.Holland D.DowlingE.Jamil E.Montgomery,E.PresaniE.HahnE.WoodE.LeE.-T. Brinkman E. Arcaute E.Dunbar E.Smothers E.Sun F.Kreuk F.Tian F.Kokkinos F.Ozgenel F.Caggioni F. Kanayet F.Seide F.FlorezG.M.Schwarz G.Badeer G.Swee G.Halpern G.Herman G.Sizov G.Guangyi Zhang Lakshminarayanan G.Inan H. Shojanazeri H.Zou H.Wang H.Zha H.Habeeb H. Rudolph H. Suk H.Aspegren H.Goldman H.Zhan H.Damlaj I.Molybog I.Tufanov I.Leontiadis I. Veliche I.-E.Gat I.Weissman J.Geboski J.Kohli J.Lam J.Asher J.Gaya J-B.Marcus J.TangJ Chan J.Zhen J.Reizenstein J.Teboul J.Zhong J Jin J.Yang J.Cummings J.Carvill J.Shepard J McPhie J.Torres J.Ginsburg J.Wang J.WuK.U K.H.Saxena K.Khandelwal K.Zand K.Matosich K.Veeraraghavan K.Michelena K.Li K.Jagadeesh K.Huang K.Chawla K.Huang K.Chen L.Garg L.A.L Silva L.Bell L.Zhang L.Guo L.Yu L. Moshkovich L.Wehrstedt L.Khabsa M.Avalani M
|
| 379 |
+
|
| 380 |
+
Bhatt, M., Mankus, M., Hasson, M., Lennie, M., Reso, M., Groshev, M., Naumov, M., Lathi, M., Keneally, M., Liu, M., Seltzer, M. L., Valko, M., Restrepo, M., Patel, M., Vyatskov, M., Samvelyan, M., Clark, M., Macey, M., Wang, M., Hermoso, M. J., Metanat, M., Rastegari, M., Bansal, M., Santhanam, N., Parks, N., White, N., Bawa, N., Singhal, N., Egebo, N., Usunier, N., Mehta, N., Laptev, N. P., Dong, N., Cheng, N., Chernoguz, O., Hart, O., Salpekar, O., Kalinli, O., Kent, P., Parekh, P., Saab, P., Balaji, P., Rittner, P., Bontrager, P., Roux, P., Dollar, P., Zvyagina, P., Ratanchandani, P., Yuvraj, P., Liang, Q., Alao, R., Rodriguez, R., Ayub, R., Murthy, R., Nayani, R., Mitra, R., Parthasarathy, R., Li, R., Hogan, R., Battey, R., Wang, R., Howes, R., Rinott, R., Mehta, S., Siby, S., Bondu, S. J., Datta, S., Chugh, S., Hunt, S., Dhillon, S., Sidorov, S., Pan, S., Mahajan, S., Verma, S., Yamamoto, S., Ramaswamy, S., Lindsay, S., Lindsay, S., Feng, S., Lin, S., Zha, S. C., Patil, S., Shankar, S., Zhang, S., Zhang, S., Wang, S., Agarwal, S., Sajuyigbe, S., Chintala, S., Max, S., Chen, S., Kehoe, S., Satterfield, S., Govindaprasad, S., Gupta, S., Deng, S., Cho, S., Virk, S., Subramanian, S., Choudhury, S., Goldman, S., Remez, T., Glaser, T., Best, T., Koehler, T., Robinson, T., Li, T., Zhang, T., Matthews, T., Chou, T., Shaked, T., Vontimitta, V., Ajayi, V., Montanez, V., Mohan, V., Kumar, V. S., Mangla, V., Ionescu, V., Poenaru, V., Mihailescu, V. T., Ivanov, V., Li, W., Wang, W., Jiang, W., Bouaziz, W., Constable, W., Tang, X., Wu, X., Wang, X., Wu, X., Gao, X., Kleinman, Y., Chen, Y. Hu Y. Jia Y. QiY.LiY.ZhangY.ZhangY.AdiY.NamY.Yu Wang,ZhaoY.HaoY.QianY.LiY.HeY.Rait Z.DeVitoZ.RosnbrickZ.WenZ.YangZ.Zhao Z.and MaZ.The llama 3 herd of models2024. URL https://arxiv.org/abs/2407.21783.
|
| 381 |
+
Han, S., Mao, H., and Dally, W. J. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. International Conference on Learning Representations (ICLR), 2016.
|
| 382 |
+
Hooper, C. R. C., Kim, S., Mohammadzadeh, H., Mahoney, M. W., Shao, S., Keutzer, K., and Gholami, A. KVQuant: Towards 10 million context length LLM inference with KV cache quantization. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=OLXotew9Du.
|
| 383 |
+
Huang, W., Zheng, X., Ma, X., Qin, H., Lv, C., Chen, H., Luo, J., Qi, X., Liu, X., and Magno, M. An empirical study of llama3 quantization: From llms to mllms, 2024. URL https://arxiv.org/abs/2404.14047.
|
| 384 |
+
Jacob, B., Kligys, S., Chen, B., Zhu, M., Tang, M., Howard, A., Adam, H., and Kalenichenko, D. Quantization
|
| 385 |
+
|
| 386 |
+
and training of neural networks for efficient integer-arithmetic-only inference. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018.
|
| 387 |
+
Jiang, A. Q., Sablayrolles, A., Mensch, A., Bamford, C., Chaplot, D. S., de las Casas, D., Bressand, F., Lengyel, G., Lample, G., Saulnier, L., Lavaud, L. R., Lachaux, M.-A., Stock, P., Scao, T. L., Lavril, T., Wang, T., Lacroix, T., and Sayed, W. E. Mistral 7b, 2023. URL https://arxiv.org/abs/2310.06825.
|
| 388 |
+
Kim, S., Hooper, C., Gholami, A., Dong, Z., Li, X., Shen, S., Mahoney, M., and Keutzer, K. Squeezeellm: Dense-and-sparse quantization. arXiv, 2023.
|
| 389 |
+
Lin, J., Tang, J., Tang, H., Yang, S., Chen, W.-M., Wang, W.-C., Xiao, G., Dang, X., Gan, C., and Han, S. Awq: Activation-aware weight quantization for llm compression and acceleration. In MLSys, 2024.
|
| 390 |
+
Liu, Z., Zhao, C., Fedorov, I., Soran, B., Choudhary, D., Krishnamoorthi, R., Chandra, V., Tian, Y., and Blankevoort, T. Spinquant: Lm quantization with learned rotations, 2024. URL https://arxiv.org/abs/2405.16406.
|
| 391 |
+
Lloyd, S. Least squares quantization in pmc. IEEE Transactions on Information Theory, 28(2):129-137, 1982a. doi: 10.1109/TIT.1982.1056489.
|
| 392 |
+
Lloyd, S. Least squares quantization in pmc. IEEE transactions on information theory, 28(2):129-137, 1982b.
|
| 393 |
+
MacQueen, J. et al. Some methods for classification and analysis of multivariate observations. In Proceedings of the fifth Berkeley symposium on mathematical statistics and probability, volume 1, pp. 281-297. Oakland, CA, USA, 1967.
|
| 394 |
+
Marcus, M. P., Marcinkiewicz, M. A., and Santorini, B. Building a large annotated corpus of english: the penn treebank. Comput. Linguist., 19(2):313-330, June 1993. ISSN 0891-2017.
|
| 395 |
+
Max, J. Quantizing for minimum distortion. IRE Transactions on Information Theory, 6(1):7-12, 1960. doi: 10.1109/TIT.1960.1057548.
|
| 396 |
+
Merit, S., Xiong, C., Bradbury, J., and Socher, R. Pointer sentinel mixture models. In International Conference on Learning Representations, 2017. URL https://open review.net/forum?id=Byj72udxe.
|
| 397 |
+
Nagel, M., Amjad, R. A., Van Baalen, M., Louizos, C., and Blankevoort, T. Up or down? adaptive rounding for post-training quantization. In Proceedings of the 37th International Conference on Machine Learning, ICML'20. JMLR.org, 2020.
|
| 398 |
+
|
| 399 |
+
Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., Thirion, B., Grisel, O., Blondel, M., Prettenhofer, P., Weiss, R., Dubourg, V., Vanderplas, J., Passos, A., Cournaepau, D., Brucher, M., Perrot, M., and Duchesnay, E. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011.
|
| 400 |
+
Peng, H., Wu, K., Wei, Y., Zhao, G., Yang, Y., Liu, Z., Xiong, Y., Yang, Z., Ni, B., Hu, J., Li, R., Zhang, M., Li, C., Ning, J., Wang, R., Zhang, Z., Liu, S., Chau, J., Hu, H., and Cheng, P. Fp8-lm: Training fp8 large language models, 2023.
|
| 401 |
+
PyTorch. gpt-fast, 2023. URL https://github.com/pytorch-labs/gpt-fast.
|
| 402 |
+
PyTorch.torchao,2024. URL https://github.com/pytorch/ao.
|
| 403 |
+
Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., and Liu, P. J. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints, 2019.
|
| 404 |
+
Rouhani, B., Zhao, R., Elango, V., Shafipour, R., Hall, M., Mesmakhosroshahi, M., More, A., Melnick, L., Golub, M., Varatkar, G., Shao, L., Kolhe, G., Melts, D., Klar, J., L'Heureux, R., Perry, M., Burger, D., Chung, E., Deng, Z., Naghshineh, S., Park, J., and Naumov, M. With shared microexponents, a little shifting goes a long way, 2023. URL https://arxiv.org/abs/2302.08007.
|
| 405 |
+
Stock, P., Joulin, A., Gribonval, R., Graham, B., and Jégou, H. And the bit goes down: Revisiting the quantization of neural networks. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=rJehVyrKwH.
|
| 406 |
+
Sun, X., Wang, N., Chen, C.-Y., Ni, J., Agrawal, A., Cui, X., Venkataramani, S., El Maghraoui, K., Srinivasan, V. V., and Gopalakrishnan, K. Ultra-low precision 4-bit training of deep neural networks. In Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., and Lin, H. (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1796-1807. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/13b919438259814cd5be8cb45877d577-Paper.pdf.
|
| 407 |
+
Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.-A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., Rodriguez, A., Joulin, A., Grave, E., and Lample, G. Llama: Open and efficient foundation language models, 2023a. URL https://arxiv.org/abs/2302.13971.
|
| 408 |
+
|
| 409 |
+
Touvron, H., Martin, L., Stone, K., Albert, P., Almahairi, A., Babaei, Y., Bashlykov, N., Batra, S., Bhargava, P., Bhosale, S., Bikel, D., Blecher, L., Ferrer, C. C., Chen, M., Cucurull, G., Esiobu, D., Fernandes, J., Fu, J., Fu, W., Fuller, B., Gao, C., Goswami, V., Goyal, N., Hartshorn, A., Hosseini, S., Hou, R., Inan, H., Kardas, M., Kerkez, V., Khabsa, M., Kloumann, I., Korenev, A., Koura, P. S., Lachaux, M.-A., Lavril, T., Lee, J., Liskovich, D., Lu, Y., Mao, Y., Martinet, X., Mihaylov, T., Mishra, P., Molybog, I., Nie, Y., Poulton, A., Reizenstein, J., Rungta, R., Saladi, K., Schelten, A., Silva, R., Smith, E. M., Subramanian, R., Tan, X. E., Tang, B., Taylor, R., Williams, A., Kuan, J. X., Xu, P., Yan, Z., Zarov, I., Zhang, Y., Fan, A., Kambadur, M., Narang, S., Rodriguez, A., Stojnic, R., Edunov, S., and Scialom, T. Llama 2: Open foundation and fine-tuned chat models, 2023b. URL https://arxiv.org/abs/2307.09288.
|
| 410 |
+
Tseng, A., Sun, Q., Hou, D., and Sa, C. D. QTIP: Quantization with trellises and incoherence processing. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=7sdkLVuYCU.
|
| 411 |
+
Wang, N., Choi, J., Brand, D., Chen, C.-Y., and Gopalakrishnan, K. Training deep neural networks with 8-bit floating point numbers. In Bengio, S., Wallach, H., Larochelle, H., Grauman, K., Cesa-Bianchi, N., and Garnett, R. (eds.), Advances in Neural Information Processing Systems, volume 31. Curran Associates, Inc., 2018. URL https://proceedings.neurips.cc/paper_files/paper/2018/file/335d3d1cd7ef05ec77714a215134914c-Paper.pdf.
|
| 412 |
+
Wang, S. I., Gu, A., Madaan, L., Hupkes, D., Liu, J., Wei, Y., Jain, N., Lai, Y., Sootla, S., Press, O., Rozière, B., and Synnaeve, G. Eval-Arena: noise and errors on llm evaluations. https://github.com/cruz-eval/eval-arena, 2024.
|
| 413 |
+
Wolf, T., Debut, L., Sanh, V., Chaumond, J., Delangue, C., Moi, A., Cistac, P., Rault, T., Louf, R., Funtowicz, M., Davison, J., Shleifer, S., von Platen, P., Ma, C., Jernite, Y., Plu, J., Xu, C., Scao, T. L., Gugger, S., Drame, M., Lhoest, Q., and Rush, A. M. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 38-45, Online, October 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.emnlp-demos.6.
|
| 414 |
+
Xia, L., Anthonissen, M., Hochstenbach, M., and Koren, B. A simple and efficient stochastic rounding method for training neural networks in low precision, 2021. URL https://arxiv.org/abs/2103.13445.
|
| 415 |
+
|
| 416 |
+
Xiao, G., Lin, J., Seznec, M., Wu, H., Demouth, J., and Han, S. SmoothQuant: Accurate and efficient post-training quantization for large language models. In Proceedings of the 40th International Conference on Machine Learning, 2023.
|
| 417 |
+
Xie, Y. and Jabri, M. Analysis of the effects of quantization in multilayer neural networks using a statistical model. IEEE Transactions on Neural Networks, 3(2):334-338, 1992. doi: 10.1109/72.125876.
|
| 418 |
+
Yoshida, D. Nf4 isn't information theoretically optimal (and that's good), 2023. URL https://arxiv.org/abs/s/2306.06965.
|
| 419 |
+
Zadeh, A. H., Edo, I., Awad, O. M., and Moshovos, A. GOBO: Quantizing Attention-Based NLP Models for Low Latency and Energy Efficient Inference. In 2020 53rd Annual IEEE/ACM International Symposium on Microarchitecture (MICRO), pp. 811-824, Los Alamitos, CA, USA, October 2020. IEEE Computer Society. doi: 10.1109/MICRO50266.2020.00071. URL https://doi.ieeecomputersociety.org/10.1109/MICRO50266.2020.00071.
|
| 420 |
+
|
| 421 |
+
# Appendix
|
| 422 |
+
|
| 423 |
+
# A. Solution Details
|
| 424 |
+
|
| 425 |
+
We provide here more details about our proposed any4 algorithm.
|
| 426 |
+
|
| 427 |
+
# A.1. Algorithm
|
| 428 |
+
|
| 429 |
+
We summarize our any4 quantization algorithm in Alg. 1.
|
| 430 |
+
|
| 431 |
+
Algorithm 1 any4 quantization algorithm.
|
| 432 |
+
```python
|
| 433 |
+
module2input = calibrate(model, sample_data)
|
| 434 |
+
for module in model:
|
| 435 |
+
w = module.weight()
|
| 436 |
+
wQ = torch.zeros_like(w)
|
| 437 |
+
alpha = []
|
| 438 |
+
beta = []
|
| 439 |
+
for i in range(w.shape[0]):
|
| 440 |
+
wSi, alpha, betai = scale(w[i,:])
|
| 441 |
+
xi = module2input优惠政策[i]
|
| 442 |
+
wQ[i,:] = kmeans(
|
| 443 |
+
samples=wSi,
|
| 444 |
+
sample_weight=alpha*abs(xi.mean())
|
| 445 |
+
)
|
| 446 |
+
alpha.append(alpha)
|
| 447 |
+
beta.append(betai)
|
| 448 |
+
module.weight.data = wQ
|
| 449 |
+
module.alpha = alpha
|
| 450 |
+
module.beta = beta
|
| 451 |
+
```
|
| 452 |
+
|
| 453 |
+
# B. Further Results
|
| 454 |
+
|
| 455 |
+
# B.1. Comparison with Other Numeric Formats
|
| 456 |
+
|
| 457 |
+
We compare our any4 numeric format with other numeric formats for the Llama2 family of models in Table A1 and for Mistral-7B and Mixtral-7B in Table A2.
|
| 458 |
+
|
| 459 |
+
<table><tr><td colspan="9">Mistral-7B Instruct v0.2</td></tr><tr><td rowspan="2"></td><td rowspan="2">WikiText-2</td><td colspan="3">Perplexity ↓</td><td colspan="4">Tasks ↑</td></tr><tr><td>C4</td><td>PTB</td><td>CodeParrot</td><td>MMLU</td><td>HellaSwag</td><td>GSM8K</td><td>BigBench</td></tr><tr><td>FP16</td><td>5.95</td><td>8.82</td><td>21.77</td><td>2.63</td><td>58.7%</td><td>66.1%</td><td>41.7%</td><td>51.7%</td></tr><tr><td>INT4</td><td>6.14</td><td>9.03</td><td>22.02</td><td>2.70</td><td>57.1%</td><td>65.1%</td><td>39.7%</td><td>50.4%</td></tr><tr><td>FP4</td><td>6.19</td><td>9.10</td><td>21.62</td><td>2.70</td><td>56.6%</td><td>64.7%</td><td>38.2%</td><td>47.7%</td></tr><tr><td>NF4</td><td>6.06</td><td>8.93</td><td>24.72</td><td>2.66</td><td>58.0%</td><td>65.5%</td><td>38.5%</td><td>51.8%</td></tr><tr><td>ANY4</td><td>6.00</td><td>8.85</td><td>23.24</td><td>2.64</td><td>58.6%</td><td>65.4%</td><td>41.1%</td><td>51.7%</td></tr><tr><td colspan="9">Mixtral-8x7B Instruct v0.1</td></tr><tr><td>FP16</td><td>4.14</td><td>7.18</td><td>16.47</td><td>2.20</td><td>68.2%</td><td>67.6%</td><td>64.8%</td><td>68.1%</td></tr><tr><td>INT4</td><td>4.45</td><td>7.45</td><td>16.84</td><td>2.26</td><td>66.5%</td><td>66.3%</td><td>57.8%</td><td>61.8%</td></tr><tr><td>FP4</td><td>4.46</td><td>7.48</td><td>18.42</td><td>2.27</td><td>66.8%</td><td>66.5%</td><td>59.4%</td><td>62.8%</td></tr><tr><td>NF4</td><td>4.30</td><td>7.32</td><td>15.00</td><td>2.24</td><td>67.6%</td><td>67.2%</td><td>61.0%</td><td>66.5%</td></tr><tr><td>ANY4</td><td>4.27</td><td>7.27</td><td>16.14</td><td>2.22</td><td>67.7%</td><td>67.1%</td><td>62.8%</td><td>65.8%</td></tr></table>
|
| 460 |
+
|
| 461 |
+
Table A2: Quantizing Mistral and Mixtral with various numeric formats.
|
| 462 |
+
|
| 463 |
+
<table><tr><td colspan="11">Llama2 7B</td></tr><tr><td rowspan="2"></td><td colspan="4">Perplexity ↓</td><td colspan="6">Tasks ↑</td></tr><tr><td>WikiText-2</td><td>C4</td><td>PTB</td><td>CodeParrot</td><td>HumanEval Pass@1</td><td>MBPP Pass@1</td><td>MMLU</td><td>HellaSwag</td><td>GSM8K</td><td>BBH</td></tr><tr><td>FP16</td><td>5.47</td><td>6.97</td><td>20.83</td><td>2.54</td><td>17.1%</td><td>20.0%</td><td>41.3%</td><td>57.2%</td><td>13.6%</td><td>39.8%</td></tr><tr><td>INT4</td><td>5.74</td><td>7.30</td><td>24.00</td><td>2.63</td><td>10.4%</td><td>18.2%</td><td>38.1%</td><td>56.4%</td><td>10.6%</td><td>36.5%</td></tr><tr><td>FP4</td><td>5.83</td><td>7.37</td><td>22.57</td><td>2.65</td><td>11.0%</td><td>16.8%</td><td>36.5%</td><td>56.6%</td><td>11.2%</td><td>35.5%</td></tr><tr><td>NF4</td><td>5.66</td><td>7.19</td><td>22.82</td><td>2.60</td><td>11.6%</td><td>19.2%</td><td>37.4%</td><td>56.8%</td><td>12.0%</td><td>36.8%</td></tr><tr><td>ANY4</td><td>5.59</td><td>7.10</td><td>21.23</td><td>2.57</td><td>14.0%</td><td>18.4%</td><td>40.3%</td><td>56.7%</td><td>12.7%</td><td>36.9%</td></tr><tr><td colspan="11">Llama2 13B</td></tr><tr><td>FP16</td><td>4.88</td><td>6.47</td><td>28.93</td><td>2.40</td><td>19.5%</td><td>18.4%</td><td>50.5%</td><td>60.0%</td><td>23.2%</td><td>47.4%</td></tr><tr><td>INT4</td><td>5.05</td><td>6.65</td><td>30.79</td><td>2.45</td><td>15.2%</td><td>16.4%</td><td>48.8%</td><td>59.3%</td><td>20.8%</td><td>44.2%</td></tr><tr><td>FP4</td><td>5.07</td><td>6.67</td><td>30.96</td><td>2.46</td><td>15.2%</td><td>16.2%</td><td>49.5%</td><td>59.3%</td><td>19.3%</td><td>43.0%</td></tr><tr><td>NF4</td><td>4.99</td><td>6.58</td><td>31.17</td><td>2.43</td><td>15.9%</td><td>16.0%</td><td>49.9%</td><td>59.9%</td><td>22.1%</td><td>44.6%</td></tr><tr><td>ANY4</td><td>4.97</td><td>6.55</td><td>28.83</td><td>2.42</td><td>15.2%</td><td>18.0%</td><td>49.3%</td><td>59.5%</td><td>21.6%</td><td>44.6%</td></tr><tr><td colspan="11">Llama2 70B</td></tr><tr><td>FP16</td><td>3.32</td><td>5.52</td><td>14.44</td><td>2.11</td><td>31.7%</td><td>37.4%</td><td>65.2%</td><td>64.8%</td><td>53.3%</td><td>67.1%</td></tr><tr><td>INT4</td><td>3.46</td><td>5.61</td><td>14.71</td><td>2.14</td><td>26.8%</td><td>37.8%</td><td>64.4%</td><td>64.7%</td><td>51.4%</td><td>65.0%</td></tr><tr><td>FP4</td><td>3.53</td><td>5.67</td><td>14.34</td><td>2.16</td><td>28.0%</td><td>30.6%</td><td>64.1%</td><td>64.0%</td><td>51.6%</td><td>65.0%</td></tr><tr><td>NF4</td><td>3.44</td><td>5.61</td><td>14.65</td><td>2.14</td><td>29.9%</td><td>37.2%</td><td>64.5%</td><td>63.9%</td><td>50.6%</td><td>65.4%</td></tr><tr><td>ANY4</td><td>3.40</td><td>5.58</td><td>14.64</td><td>2.13</td><td>26.8%</td><td>35.8%</td><td>64.8%</td><td>64.5%</td><td>51.6%</td><td>66.6%</td></tr></table>
|
| 464 |
+
|
| 465 |
+
# C. Further Ablation Studies
|
| 466 |
+
|
| 467 |
+
# C.1. Minimization Terms
|
| 468 |
+
|
| 469 |
+
In Table A3 we ablate on using different terms to minimize when learning (using K-means clustering) the LUT of each row in the weight matrix. First row shows the results of optimizing weights directly. The other 2 rows show the results of using the 2 additional terms of Equation 14 in our paper, i.e., multiplying with activations and scales. These results confirm that our derivation that lead to all the terms of Equation 14 is essential for optimal accuracy.
|
| 470 |
+
|
| 471 |
+
Table A1: Quantizing Llama2 models with various numeric formats.
|
| 472 |
+
|
| 473 |
+
<table><tr><td colspan="6">Llama3.2 1B</td></tr><tr><td></td><td>Term to Minimize</td><td colspan="4">Perplexity ↓</td></tr><tr><td></td><td></td><td>WikiText-2</td><td>C4</td><td>PTB</td><td>CodeParrot</td></tr><tr><td>Weights Only</td><td>(wSi,j - wQi,j)</td><td>6.680</td><td>9.619</td><td>11.186</td><td>2.751</td></tr><tr><td>Weights × Activations</td><td>(wSi,j xj - wQi,j xj)</td><td>6.496</td><td>9.375</td><td>11.055</td><td>2.675</td></tr><tr><td>Weights × Activations × Group Scales</td><td>(αi,jwSi,jxj - αi,jwQi,jxj)</td><td>6.487</td><td>9.366</td><td>11.034</td><td>2.680</td></tr><tr><td>[Ours]</td><td></td><td></td><td></td><td></td><td></td></tr></table>
|
| 474 |
+
|
| 475 |
+
Table A3: Perplexity after quantizing Llama3.2 1B with LUTs created by minimizing different terms.
|
| 476 |
+
|
| 477 |
+
# C.2. K-Means Initialization
|
| 478 |
+
|
| 479 |
+
We use scikit (Pedregosa et al., 2011) to implement K-means clustering, that is core to any4's quantization algorithm. By default, scikit initializes cluster centroids using k-means++ algorithm (Arthur & Vassilvitskii, 2007), but it also supports random initialization, as well as initializing with a vector of pre-defined values. In Table A4 we ablate K-means initialization on Llama 3.2 1B by evaluating k-means++ and random initialization, as well as seeding with uniform int4 values (i.e., integer values -7 to 8), and nf4 values (ranging from -1 to +1). We see that k-means++ performs clearly the best, while uniform int4 initialization performs the worst.
|
| 480 |
+
|
| 481 |
+
<table><tr><td colspan="5">Llama3.2 1B</td></tr><tr><td></td><td rowspan="2">K-Means Initialization</td><td colspan="3">Perplexity ↓</td></tr><tr><td></td><td>WikiText-2</td><td>C4</td><td>PTB</td></tr><tr><td>FP16</td><td></td><td>9.76</td><td>12.77</td><td>16.56</td></tr><tr><td>ANY4</td><td>k-means++</td><td>10.63</td><td>13.95</td><td>17.94</td></tr><tr><td>ANY4</td><td>random</td><td>10.66</td><td>13.97</td><td>18.17</td></tr><tr><td>ANY4</td><td>int4</td><td>10.83</td><td>14.21</td><td>18.69</td></tr><tr><td>ANY4</td><td>nf4</td><td>10.65</td><td>13.96</td><td>18.21</td></tr></table>
|
| 482 |
+
|
| 483 |
+
Table A4: any4 quantization with K-means clustering initialized with different algorithms and values.
|
ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5860df3ba53e3c9c1977c2c5b5e4609fd58057b7ee172c14b25a263be727ccf
|
| 3 |
+
size 797784
|
ICML/2025/any4_ Learned 4-bit Numeric Representation for LLMs/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f60ff2a13d2a1e1c7c4bfebe34685e532077c06b0df7249dff739405c16db758
|
| 3 |
+
size 535826
|
ICML/2025/e-GAI_ e-value-based Generalized $α$-Investing for Online False Discovery Rate Control/dabdc8cf-f6a6-4355-a6eb-b0bb1e491482_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee247cb4c27eab3a034187e589c4c74e951a7d0b299f45eb2e854fbffc92c7e7
|
| 3 |
+
size 179852
|
ICML/2025/e-GAI_ e-value-based Generalized $α$-Investing for Online False Discovery Rate Control/dabdc8cf-f6a6-4355-a6eb-b0bb1e491482_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f99c13f8272544eb50a5c0226b729f1199a95fa1ba7ff102f95bbba1b8e7a65c
|
| 3 |
+
size 212300
|