andyjzhao commited on
Commit
bc1107e
·
verified ·
1 Parent(s): f2db61f

Upload folder using huggingface_hub

Browse files
Files changed (26) hide show
  1. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/appendix_chunks.jsonl +156 -0
  2. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/appendix_text_v3.txt +467 -0
  3. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets.json +123 -0
  4. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_0_Picture_22.jpeg +3 -0
  5. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_20_Figure_1.jpeg +3 -0
  6. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_21_Figure_1.jpeg +3 -0
  7. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_22_Figure_1.jpeg +3 -0
  8. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_22_Figure_8.jpeg +3 -0
  9. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_3_Figure_2.jpeg +3 -0
  10. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_6_Figure_7.jpeg +3 -0
  11. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_6_Picture_16.jpeg +3 -0
  12. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_7_Figure_11.jpeg +3 -0
  13. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_7_Figure_18.jpeg +3 -0
  14. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_7_Figure_7.jpeg +3 -0
  15. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/chunks_v3_anonymized.jsonl +0 -0
  16. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/dataset_meta.json +63 -0
  17. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/main_body_chunks.jsonl +118 -0
  18. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_meta.json +3042 -0
  19. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/model_text_v3.txt +353 -0
  20. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/paper.blocks.json +0 -0
  21. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/paper.md +0 -0
  22. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/parse_report.json +78 -0
  23. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/reference_chunks.jsonl +12 -0
  24. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/reference_text_v3.txt +35 -0
  25. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/sanitization_report.json +61 -0
  26. icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/sanitized_v3.txt +0 -0
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/appendix_chunks.jsonl ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0130", "section": "Krause Synchronization Transformers", "page_start": 13, "page_end": 13, "type": "ListGroup", "text": "–Appendix Contents– A Additional Discussion and Limitations B Extended Related Work C Multi-Cluster Behavior of Krause Attention C.1 Multi-Cluster Formation under Clustering Emergence Dynamics C.2 Multi-Cluster Formation under Mean-Field Dynamics D Extended Experiment Results D.1 Implementation Details * D.1.1 Vision Transformers * D.1.2 Autoregressive Transformers * D.1.3 LLMs finetuned with LoRA * D.1.4 Evaluation Datasets and Prompts for LLMs D.2 Vision Transformers for Image Recognition * D.2.1 Results on Fashion-MNIST * D.2.2 Qualitative Results of Attention Heatmaps * D.2.3 Ablation Studies * D.2.4 Krause SwinTransformers D.3 Autoregressive Models for Image Generation * D.3.1 Qualitative Examples * D.3.2 Ablation Studies D.4 LLMs for Language Understanding and Reasoning * D.4.1 Results on Krause-Qwen * D.4.2 Extended Results on Krause-Llama * D.4.3 Inference Speed Comparison E Theoretical Perspective on Attention Sink Phenomena F Relation to Competitive Learning and Lateral Interaction Dynamics", "source": "marker_v2", "marker_block_id": "/page/12/ListGroup/364"}
2
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0131", "section": "A. Additional Discussion and Limitations", "page_start": 14, "page_end": 14, "type": "Text", "text": "Theoretical Scope. Krause Attention is motivated by bounded-confidence and synchronization dynamics, and our theoretical analysis focuses on idealized continuous-time and mean-field formulations. As with prior dynamical analyses of Transformer models, these results characterize asymptotic behavior and provide qualitative insight into clustering and synchronization phenomena, rather than exact predictions for finite-depth, finite-width networks used in practice.", "source": "marker_v2", "marker_block_id": "/page/13/Text/2"}
3
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0132", "section": "A. Additional Discussion and Limitations", "page_start": 14, "page_end": 14, "type": "Text", "text": "Hyperparameter Selection. Krause Attention introduces several task-dependent hyperparameters, most notably the local interaction window size, the top-k sparsity level, and the scale parameter \\sigma of the RBF distance kernel. These parameters jointly control the trade-off between locality, selectivity, and computational efficiency. While the window size and sparsity level directly affect interaction structure, the RBF scale governs the sensitivity of distance-based coupling. In practice, we observe that the scale parameter is not a critical or fragile choice: across all experiments, it reliably converges to a suitable range during training and exhibits stable behavior over a broad initialization interval. Nevertheless, we do not claim the existence of a universally optimal configuration, and systematic strategies for automatic or adaptive hyperparameter selection remain an interesting direction for future work.", "source": "marker_v2", "marker_block_id": "/page/13/Text/3"}
4
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0133", "section": "A. Additional Discussion and Limitations", "page_start": 14, "page_end": 14, "type": "Text", "text": "Integration with LLMs. In our LLM experiments, Krause Attention is evaluated as an auxiliary interaction pathway trained via LoRA on top of pretrained models, rather than as a full replacement for standard self-attention. Investigating deeper integration and end-to-end pretraining with Krause Attention remains an important direction for future work.", "source": "marker_v2", "marker_block_id": "/page/13/Text/4"}
5
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0134", "section": "B. Extended Related Work", "page_start": 14, "page_end": 14, "type": "Text", "text": "Efficient Transformers. Despite their empirical success, standard Transformers incur quadratic computational and memory complexity with respect to sequence length, which fundamentally limits their scalability to long-context and high-resolution settings. To address this bottleneck, a substantial body of work has investigated sub-quadratic approximations to self-attention. One line of research reduces the cost of self-attention by imposing structured sparsity on the attention pattern. Sparse attention methods (Child et al., 2019) restrict token interactions to predefined subsets, lowering the effective complexity from \\mathcal{O}(N^2d) to \\mathcal{O}(N\\sqrt{N}d) . A second class of approaches focuses on approximating the attention computation itself. Performer (Choromanski et al., 2021) replaces the softmax operation with random feature—based kernel approximations, yielding linear-time \\mathcal{O}(Nd) attention, while Linformer (Wang et al., 2020) exploits low-rank structure in the attention matrix to reduce complexity. Related linear attention formulations (Katharopoulos et al., 2020) further re-express self-attention using kernelized representations. Finally, Reformer (Kitaev et al., 2020) adopts a hashing-based strategy, employing locality-sensitive hashing to cluster similar tokens and obtain \\mathcal{O}(N \\log Nd) complexity.", "source": "marker_v2", "marker_block_id": "/page/13/Text/6"}
6
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0135", "section": "B. Extended Related Work", "page_start": 14, "page_end": 14, "type": "Text", "text": "Transformer Alternatives. Beyond the routes we discussed in the main paper, another line of research focuses on architectural alternatives. State-Space Models (SSMs), particularly Structured State-Space Sequences (S4) (Gu et al., 2022) and Mamba (Gu & Dao, 2023; Dao & Gu, 2024), offer linear-time inference and demonstrate superior scaling in language and vision tasks (Zhu et al., 2024; Yang et al., 2024b; Liu et al., 2024; Munir et al., 2025). Hybrid architectures (Lieber et al., 2024; Glorioso et al., 2024; De et al., 2024) attempt to combine the efficiency of SSMs with the expressivity of Transformers. Other designs, such as the Perceiver (Jaegle et al., 2021), utilize latent bottlenecks to process high-dimensional inputs. Despite their efficiency, these models often trade the iterative refinement of dense attention for state compression.", "source": "marker_v2", "marker_block_id": "/page/13/Text/7"}
7
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0136", "section": "C. Multi-Cluster Behavior of Krause Attention", "page_start": 14, "page_end": 14, "type": "Text", "text": "This appendix provides two complementary theoretical analyses supporting the multi-cluster behavior of Krause Attention. The first argument adapts the clustering emergence framework of Geshkovski et al. (2023) to our sparse, distance-based attention mechanism. The second analysis leverages the mean-field Wasserstein gradient-flow perspective developed by Chen et al. (2025). Both analyses characterize the dynamics under appropriate separation or bounded-confidence assumptions and provide mechanistic insight into why Krause Attention admits stable multi-cluster configurations.", "source": "marker_v2", "marker_block_id": "/page/13/Text/9"}
8
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0137", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 14, "page_end": 14, "type": "Text", "text": "We analyze the multi-cluster formation by adopting the interacting-particle formulation of Transformer dynamics introduced by Geshkovski et al. (2023). Let \\{z_i(t)\\}_{i=1}^N \\subset \\mathbb{R}^D denote rescaled token representations evolving according to", "source": "marker_v2", "marker_block_id": "/page/13/Text/11"}
9
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0138", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 14, "page_end": 14, "type": "Equation", "text": "\\dot{z}_i(t) = \\sum_{z_j \\in \\mathcal{N}_i(t)} a_{i,j}(t) V z_j(t) (13)", "source": "marker_v2", "marker_block_id": "/page/13/Equation/12"}
10
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0139", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "where the attention weights a_{i,j}(t) are defined by the Krause Attention rule in Eq. (12), incorporating distance-based similarity, local and selective interactions.", "source": "marker_v2", "marker_block_id": "/page/14/Text/1"}
11
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0140", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "As will be discussed in Sec. C.2, bounded-confidence dynamics naturally evolve toward well-separated clusters. Motivated by this phenomenon, we analyze the particle-level dynamics after such fragmentation has occurred and consider the regime in which tokens form m disconnected groups \\{C_k\\}_{k=1}^m lying outside each other's interaction ranges. This conditional analysis is standard in clustering dynamics and allows us to characterize the resulting multi-cluster behavior precisely.", "source": "marker_v2", "marker_block_id": "/page/14/Text/2"}
12
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0141", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "Hard Interaction Cutoff. Unlike standard self-attention, Krause Attention imposes an explicit bounded-confidence rule: tokens interact only with neighbors selected by the local top-k operator. Hence, whenever two token groups \\mathcal{G}_1 and \\mathcal{G}_2 are separated such that", "source": "marker_v2", "marker_block_id": "/page/14/Text/3"}
13
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0142", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Equation", "text": "z_2 \\notin \\xi_1^k \\quad \\text{for all } z_1 \\in \\mathcal{G}_1, \\ z_2 \\in \\mathcal{G}_2, \\tag{14}", "source": "marker_v2", "marker_block_id": "/page/14/Equation/4"}
14
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0143", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "their pairwise attention weights satisfy", "source": "marker_v2", "marker_block_id": "/page/14/Text/5"}
15
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0144", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Equation", "text": "a_{1,2}(t) = 0 \\quad \\text{at all times.} \\tag{15}", "source": "marker_v2", "marker_block_id": "/page/14/Equation/6"}
16
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0145", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "The sparsity and indicator structure therefore enforce a strict cutoff in the interaction graph, preventing any influence between sufficiently distant token groups.", "source": "marker_v2", "marker_block_id": "/page/14/Text/7"}
17
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0146", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "Block-diagonalization of Attention Dynamics. The transition from global to local dynamics is rooted in the reducibility of the attention matrix. In standard self-attention, the matrix A(t) is strictly positive (A_{ij} > 0) , making it an irreducible stochastic matrix. By the Perron-Frobenius theorem, A(t) has a unique largest eigenvalue \\lambda = 1 with a corresponding strictly positive eigenvector \\Lambda , which facilitates the global convergence toward a single cluster (Geshkovski et al., 2023).", "source": "marker_v2", "marker_block_id": "/page/14/Text/8"}
18
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0147", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "In contrast, the Krause Attention interaction graph G(t) decomposes into m disjoint connected components \\{C_k\\}_{k=1}^m . Formally, if z_i \\in C_p and z_j \\in C_q (p \\neq q) , the sparsity mechanism ensures A_{ij}(t) = 0 . Consequently, A(t) is a reducible stochastic matrix, and its spectrum is the union of the spectra of its blocks:", "source": "marker_v2", "marker_block_id": "/page/14/Text/9"}
19
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0148", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Equation", "text": "\\sigma(A(t)) = \\bigcup_{k=1}^{m} \\sigma(A_{\\mathcal{C}_k}(t)). \\tag{16}", "source": "marker_v2", "marker_block_id": "/page/14/Equation/10"}
20
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0149", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "Since each A_{\\mathcal{C}_k}(t) is itself a row-stochastic matrix, the eigenvalue \\lambda=1 has a multiplicity of at least m. The corresponding eigenspace is spanned by vectors of the clusters \\{\\boldsymbol{\\Lambda}_{\\mathcal{C}_1},\\ldots,\\boldsymbol{\\Lambda}_{\\mathcal{C}_m}\\} , where", "source": "marker_v2", "marker_block_id": "/page/14/Text/11"}
21
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0150", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Equation", "text": "(\\mathbf{\\Lambda}_{\\mathcal{C}_k})_i = \\begin{cases} 1 & \\text{if } i \\in \\mathcal{C}_k \\\\ 0 & \\text{otherwise} \\end{cases} (17)", "source": "marker_v2", "marker_block_id": "/page/14/Equation/12"}
22
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0151", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "Then the global attention matrix A(t) naturally takes a block-diagonal form without requiring external constraints:", "source": "marker_v2", "marker_block_id": "/page/14/Text/13"}
23
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0152", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Equation", "text": "A(t) = \\begin{pmatrix} A_{\\mathcal{C}_1}(t) & 0 & \\dots & 0 \\\\ 0 & A_{\\mathcal{C}_2}(t) & \\dots & 0 \\\\ \\vdots & \\vdots & \\ddots & \\vdots \\\\ 0 & 0 & \\dots & A_{\\mathcal{C}_m}(t) \\end{pmatrix} (18)", "source": "marker_v2", "marker_block_id": "/page/14/Equation/14"}
24
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0153", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "This multiplicity prevents the global contraction of tokens. Each cluster C_k evolves within its own invariant subspace S_k = \\text{span}\\{\\mathbf{\\Lambda}_{C_k}\\} , and its dynamics are governed independently by:", "source": "marker_v2", "marker_block_id": "/page/14/Text/15"}
25
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0154", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Equation", "text": "\\dot{Z}_{\\mathcal{C}_k}(t) = A_{\\mathcal{C}_k}(t) Z_{\\mathcal{C}_k}(t) V^{\\top}. \\tag{19}", "source": "marker_v2", "marker_block_id": "/page/14/Equation/16"}
26
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0155", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "Mathematically, the absence of inter-cluster spectral coupling ensures that the velocity \\dot{z}_i for any i \\in \\mathcal{C}_p is a zero-gradient direction with respect to any z_j \\in \\mathcal{C}_q , thereby rigorously sustaining the multi-cluster geometry.", "source": "marker_v2", "marker_block_id": "/page/14/Text/17"}
27
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0156", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "Independence from the Spectrum of V. In standard self-attention, the connectivity of the attention matrix implies that the dominant eigenvector of V induces global alignment across all tokens (Geshkovski et al., 2023). If V has a dominant eigenvector v_1 , it tends to pull all z_i towards the line spanned by v_1 . In contrast, Krause Attention decouples the dynamics: even if tokens from different clusters align locally with the same eigenmode of V, there is no coupling force between their centroids. Considering the block dynamics in Eq. (19), the clustering structure is preserved because the cross-cluster entries [A]_{i,j} remain strictly zero for z_i \\in \\mathcal{C}_p, z_j \\in \\mathcal{C}_q ( p \\neq q ). The linear transformations induced by V act only within the subspaces of each cluster and cannot reintroduce cross-cluster interaction.", "source": "marker_v2", "marker_block_id": "/page/14/Text/18"}
28
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0157", "section": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics", "page_start": 15, "page_end": 15, "type": "Text", "text": "Thus, Krause Attention maintains cluster separation dynamically, enabling the persistence of multi-cluster configurations independent of the global spectral dominance of V.", "source": "marker_v2", "marker_block_id": "/page/14/Text/19"}
29
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0158", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "We now analyze Krause Attention using the mean-field framework of Chen et al. (2025). Let \\mu_t denote the empirical distribution of token representations on the unit sphere \\mathbb{S}^{d-1} .", "source": "marker_v2", "marker_block_id": "/page/15/Text/2"}
30
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0159", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "Truncated Interaction Kernel. Our Krause Attention replaces the global dot-product kernel with a truncated RBF kernel", "source": "marker_v2", "marker_block_id": "/page/15/Text/3"}
31
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0160", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "a_{x,y} = \\exp\\left(-\\frac{\\|Qx - Ky\\|^2}{2\\sigma^2}\\right), \\quad y \\in \\xi_x(y), (20)", "source": "marker_v2", "marker_block_id": "/page/15/Equation/4"}
32
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0161", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "where \\xi_x(y) is the selected local neighborhood described in the main text, which enforces bounded-confidence interactions consistent with the classical Krause model. In particular, the sparse neighborhood operator \\xi_x(\\cdot) induces a compact-support interaction kernel: there exists a radius R>0 such that", "source": "marker_v2", "marker_block_id": "/page/15/Text/5"}
33
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0162", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "a_{x,y} = 0 whenever ||Qx - Ky|| > R . (21)", "source": "marker_v2", "marker_block_id": "/page/15/Equation/6"}
34
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0163", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "The induced mean-field dynamics satisfy the continuity equation:", "source": "marker_v2", "marker_block_id": "/page/15/Text/7"}
35
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0164", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "\\partial_t \\mu_t + \\operatorname{div}(\\mu_t \\mathcal{X}_{\\mu_t}) = 0, \\quad \\mathcal{X}_{\\mu_t}(x) = \\int_{\\mathbb{S}^{d-1}} \\mathcal{P}_x[y] a_{x,y} \\, \\mathrm{d}\\mu_t(y), \\tag{22}", "source": "marker_v2", "marker_block_id": "/page/15/Equation/8"}
36
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0165", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "and correspond to a Wasserstein gradient flow maximizing the interaction energy", "source": "marker_v2", "marker_block_id": "/page/15/Text/9"}
37
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0166", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "E_K[\\mu] = \\frac{1}{2\\tilde{\\beta}} \\iint_{\\mathbb{S}^{d-1} \\times \\mathbb{S}^{d-1}} a_{x,y} d\\mu(x) d\\mu(y), \\qquad \\tilde{\\beta} := \\frac{1}{2\\sigma^2}. (23)", "source": "marker_v2", "marker_block_id": "/page/15/Equation/10"}
38
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0167", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "Decomposition into Independent Flows. Bounded-confidence dynamics are known to generically evolve toward fragmented configurations consisting of well-separated opinion clusters (Hegselmann & Krause, 2002; Lorenz, 2007; Blondel et al., 2009; Chen et al., 2025). In particular, uniform or sufficiently broad initial measures are linearly unstable and tend to break into clusters with characteristic spacing on the order of 2R, where R denotes the interaction radius (Wang et al., 2017). Motivated by this phenomenon, we focus our analysis on the regime after such fragmentation has taken place. In what follows, we assume that at some time there exists a time t_0 \\ge 0 such that the measure \\mu_{t_0} admits a decomposition", "source": "marker_v2", "marker_block_id": "/page/15/Text/11"}
39
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0168", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "\\mu_{t_0} = \\sum_{k=1}^{m} \\pi_k \\mu_{t_0}^{(k)}, \\quad \\text{supp}(\\mu_{t_0}^{(k)}) \\subset \\mathcal{C}_k \\subset \\mathbb{S}^{d-1}, (24)", "source": "marker_v2", "marker_block_id": "/page/15/Equation/12"}
40
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0169", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "where the sets \\{\\mathcal{C}_k\\}_{k=1}^m satisfy", "source": "marker_v2", "marker_block_id": "/page/15/Text/13"}
41
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0170", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "\\inf_{x \\in \\mathcal{C}_n, y \\in \\mathcal{C}_a} \\|Qx - Ky\\| > R \\quad \\text{for all } p \\neq q. (25)", "source": "marker_v2", "marker_block_id": "/page/15/Equation/14"}
42
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0171", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "Under this condition, the interaction kernel vanishes across clusters,", "source": "marker_v2", "marker_block_id": "/page/15/Text/15"}
43
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0172", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "a_{r,y} = 0 \\quad \\forall x \\in \\mathcal{C}_p, \\ y \\in \\mathcal{C}_q, \\ p \\neq q. (26)", "source": "marker_v2", "marker_block_id": "/page/15/Equation/16"}
44
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0173", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "Consequently, for x \\in \\mathcal{C}_p ,", "source": "marker_v2", "marker_block_id": "/page/15/Text/17"}
45
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0174", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "\\mathcal{X}_{\\mu_t}(x) = \\int_{\\mathcal{C}_n} \\mathcal{P}_x[y] a_{x,y} \\, \\mathrm{d}\\mu_t^{(p)}(y), \\tag{27}", "source": "marker_v2", "marker_block_id": "/page/15/Equation/18"}
46
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0175", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "which implies that the velocity field is entirely determined by intra-cluster interactions.", "source": "marker_v2", "marker_block_id": "/page/15/Text/19"}
47
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0176", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "Moreover, since the velocity field \\mathcal{X}_{\\mu_t} is tangential to \\mathbb{S}^{d-1} and depends only on points within the same cluster, the flow map associated with the continuity equation preserves the supports \\mathcal{C}_k . In particular, no trajectory starting in \\mathcal{C}_p can enter the interaction range of another cluster \\mathcal{C}_q .", "source": "marker_v2", "marker_block_id": "/page/15/Text/20"}
48
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0177", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Text", "text": "Therefore, each C_k is forward invariant under the mean-field dynamics, and the global flow decomposes into m independent gradient flows:", "source": "marker_v2", "marker_block_id": "/page/15/Text/21"}
49
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0178", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 16, "page_end": 16, "type": "Equation", "text": "\\partial_t \\mu_t^{(k)} + \\operatorname{div}\\left(\\mu_t^{(k)} \\mathcal{X}_{\\mu_t^{(k)}}\\right) = 0, \\quad \\mathcal{X}_{\\mu_t^{(k)}}(x) = \\int_{\\mathbb{S}^{d-1}} \\mathcal{P}_x[y] a_{x,y} \\, \\mathrm{d}\\mu_t^{(k)}(y). \\tag{28}", "source": "marker_v2", "marker_block_id": "/page/15/Equation/22"}
50
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0179", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "Exponential Convergence within Clusters. We now analyze the dynamics within a single cluster C_k . Since C_k is isolated, the evolution of \\mu_t^{(k)} is governed by the restriction of the Krause Attention energy E_K to C_k . Along the flow, the energy satisfies", "source": "marker_v2", "marker_block_id": "/page/16/Text/1"}
51
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0180", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Equation", "text": "\\frac{\\mathrm{d}}{\\mathrm{d}t} E_K[\\mu^{(k)}] = \\int_{\\mathbb{S}^{d-1}} \\|\\mathcal{X}_{\\mu^{(k)}}(x)\\|_2^2 \\,\\mathrm{d}\\mu^{(k)}(x) \\geqslant 0. (29)", "source": "marker_v2", "marker_block_id": "/page/16/Equation/2"}
52
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0181", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "Fix d \\geqslant 2 , \\tilde{\\beta} > 0 , \\alpha \\in [0, \\pi/2) and u \\in \\mathbb{S}^{d-1} . Let S_{\\alpha}^{+(k)}(u) \\subseteq \\mathbb{S}^{d-1} denote the spherical cap", "source": "marker_v2", "marker_block_id": "/page/16/Text/3"}
53
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0182", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Equation", "text": "S_{\\alpha}^{+(k)}(u) := \\{ x \\in \\mathcal{C}_k \\mid \\langle x, u \\rangle \\geqslant \\cos \\alpha \\}, \\quad k = 1, \\dots, m. (30)", "source": "marker_v2", "marker_block_id": "/page/16/Equation/4"}
54
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0183", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "Assume that \\mu_0^{(k)} is supported on S_{\\alpha}^{+(k)}(u) .", "source": "marker_v2", "marker_block_id": "/page/16/Text/5"}
55
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0184", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "If 10(1+\\sqrt{\\tilde{\\beta}})\\tan\\alpha\\leqslant 1 , then following Chen et al. (2025), the interaction energy satisfies a Polyak–Łojasiewicz inequality:", "source": "marker_v2", "marker_block_id": "/page/16/Text/6"}
56
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0185", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Equation", "text": "E_K[\\delta_{\\mathcal{L}_k}] - E_K[\\mu^{(k)}] \\le 10e^{-\\tilde{\\beta}} \\int_{S_\\alpha^{+(k)}(u)} \\|\\mathcal{X}_{\\mu^{(k)}}(x)\\|_2^2 d\\mu^{(k)}(x). (31)", "source": "marker_v2", "marker_block_id": "/page/16/Equation/7"}
57
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0186", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "As a consequence, the solution \\mu_t^{(k)} converges exponentially fast to a Dirac mass \\delta_{\\mathcal{L}_k} supported at a local consensus point \\mathcal{L}_k \\in S_{\\alpha}^{+(k)}(u) :", "source": "marker_v2", "marker_block_id": "/page/16/Text/8"}
58
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0187", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Equation", "text": "W_2(\\mu_t^{(k)}, \\delta_{\\mathcal{L}_k}) \\le 20e^{-\\tilde{\\beta}} e^{-\\frac{e^{\\tilde{\\beta}}}{20}t} \\left( \\int_{S_{\\alpha}^{+(k)}(u)} \\|\\mathcal{X}_{\\mu^{(k)}}(x)\\|_2^2 d\\mu^{(k)}(x) \\right)^{\\frac{1}{2}}. (32)", "source": "marker_v2", "marker_block_id": "/page/16/Equation/9"}
59
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0188", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "Equivalently,", "source": "marker_v2", "marker_block_id": "/page/16/Text/10"}
60
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0189", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Equation", "text": "W_2(\\mu_t^{(k)}, \\delta_{\\mathcal{L}_k}) \\le C_k e^{-\\lambda_k t},\\tag{33}", "source": "marker_v2", "marker_block_id": "/page/16/Equation/11"}
61
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0190", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "with constants depending on \\sigma and the geometry of C_k .", "source": "marker_v2", "marker_block_id": "/page/16/Text/12"}
62
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0191", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "Global Multi-cluster Convergence. Since each component measure \\mu_t^{(k)} converges to a Dirac mass \\delta_{\\mathcal{L}_k} in Wasserstein gradient flow W_2 , the full system converges to a fragmented consensus:", "source": "marker_v2", "marker_block_id": "/page/16/Text/13"}
63
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0192", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Equation", "text": "\\mu_t = \\sum_{k=1}^m \\pi_k \\mu_t^{(k)} \\xrightarrow{W_2} \\sum_{k=1}^m \\pi_k \\delta_{\\mathcal{L}_k}. (34)", "source": "marker_v2", "marker_block_id": "/page/16/Equation/14"}
64
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0193", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "This establishes the existence of stable multi-cluster equilibria under Krause Attention.", "source": "marker_v2", "marker_block_id": "/page/16/Text/15"}
65
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0194", "section": "C.2. Multi-Cluster Formation under Mean-Field Dynamics", "page_start": 17, "page_end": 17, "type": "Text", "text": "Both the clustering-emergence mechanism and the mean-field analysis demonstrate that Krause Attention fundamentally alters Transformer dynamics: global synchronization is replaced by structured bounded-confidence interactions, encouraging stable multi-cluster representations. This behavior contrasts sharply with the global consensus dynamics induced by standard self-attention and provides a principled theoretical explanation for the empirical robustness of Krause Attention.", "source": "marker_v2", "marker_block_id": "/page/16/Text/16"}
66
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0195", "section": "D.1.1. VISION TRANSFORMERS", "page_start": 17, "page_end": 17, "type": "Text", "text": "All models are trained from scratch under the same training protocol. The detailed training recipes are exhibited in Table 7 for CIFAR-10/100 , in Table 8 for Fashion-MNIST , and in Table 9 for ImageNet-1K .", "source": "marker_v2", "marker_block_id": "/page/16/Text/20"}
67
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0196", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 17, "page_end": 17, "type": "Text", "text": "All models are trained from scratch under the same training protocol for each dataset. Table 10 presents the detailed configurations.", "source": "marker_v2", "marker_block_id": "/page/16/Text/22"}
68
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0197", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Caption", "text": "Table 7. Implementation details of Vision Transformers on CIFAR-10/100 .", "source": "marker_v2", "marker_block_id": "/page/17/Caption/1"}
69
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0198", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Text", "text": "953954955", "source": "marker_v2", "marker_block_id": "/page/17/Text/33"}
70
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0199", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Text", "text": "956957", "source": "marker_v2", "marker_block_id": "/page/17/Text/34"}
71
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0200", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Text", "text": "962963", "source": "marker_v2", "marker_block_id": "/page/17/Text/39"}
72
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0201", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Text", "text": "970971", "source": "marker_v2", "marker_block_id": "/page/17/Text/45"}
73
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0202", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Text", "text": "973974", "source": "marker_v2", "marker_block_id": "/page/17/Text/47"}
74
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0203", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Text", "text": "978979", "source": "marker_v2", "marker_block_id": "/page/17/Text/51"}
75
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0204", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "Caption", "text": "Models Hyperparameters (K)ViT-T (K)ViT-S (K)ViT-B Batch Size 256 256 256 Optimizer AdamW AdamW AdamW 3 \\cdot 10^{-4} 3\\cdot 10^{-4} 1 \\cdot 10^{-4} LR LR Decay cosine cosine cosine Weight Decay 0.05 0.05 0.05 Warmup Epochs 10 10 10 Epochs 300 300 300 GPUs 1xH100 1xH100 1xH100 Sigma \\sigma 2.5 2.5 2.5 Dropout Х Х Х Drop Path 0.1/% 0.1/% 0.1/xH. flip / 1 / 0.8 0.8 0.8 Mixup Alpha Cutmix Alpha 1.0 1.0 1.0 Label Smoothing 0.1 0.1 0.1 0.8 Mixup Prob. 0.8 0.8", "source": "marker_v2", "marker_block_id": "/page/17/Caption/2"}
76
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0205", "section": "D.1.2. AUTOREGRESSIVE TRANSFORMERS", "page_start": 18, "page_end": 18, "type": "TableGroup", "text": "Table 8. Implementation details of Vision Transformers on Fashion-MNIST . Hyperparameters Models (K)ViT-T (K)ViT-S (K)ViT-B Batch Size 256 256 256 Optimizer AdamW AdamW AdamW LR 3 \\cdot 10^{-4} 3 \\cdot 10^{-4} 1 \\cdot 10^{-4} LR Decay cosine cosine cosine Weight Decay 0.05 0.05 0.05 Warmup Epochs 10 10 10 Epochs 300 300 300 GPUs 1xH100 1xH100 1xH100 Sigma \\sigma 2.5 2.5 2.5 Dropout X X X Drop Path 0.1/X 0.1/X 0.1/X H. flip ✓ ✓ ✓ Mixup Alpha 0.8 0.8 0.8 Cutmix Alpha 1.0 1.0 1.0 Label Smoothing 0.1 0.1 0.1 Mixup Prob. 0.8 0.8 0.8", "source": "marker_v2", "marker_block_id": "/page/17/TableGroup/422"}
77
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0206", "section": "D.1.3. LLMs finetuned with Lora", "page_start": 18, "page_end": 18, "type": "Text", "text": "The original LLMs as well as Krause-LLMs are finetuned under the same set of LoRA hyperparameters. We finetune the Q, K, V, O projection matrices and MLP modules for both attention mechanisms. For Krause-LLMs, at the skip connection of every layer, Krause attention is combined with standard attention with a learnable mixing ratio, which is initialized with 0.8:0.2 across all models. Under this setup, LoRA finetuning increases the parameter count of the baseline LLMs by 0.52%, while Krause-LLMs incur a slightly higher overhead of 0.59%. The detailed settings are shown in Table 11.", "source": "marker_v2", "marker_block_id": "/page/17/Text/6"}
78
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0207", "section": "D.1.4. EVALUATION DATASETS AND PROMPTS FOR LLMS", "page_start": 18, "page_end": 18, "type": "Text", "text": "Evaluation Datasets. We conduct a comprehensive evaluation of Krause-LLMs and LLMs across five critical linguistic dimensions, including reading comprehension (BoolQ), logical inference (CB, ANLI (R1-R3), and MNLI), commonsense reasoning (PIQA), expert-level knowledge (MMLU-Pro), and instruction-following evaluations (IFEval). For the IFEval benchmark, we adopt Strict Acc. as the reported evaluation metric.", "source": "marker_v2", "marker_block_id": "/page/17/Text/8"}
79
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0208", "section": "D.1.4. EVALUATION DATASETS AND PROMPTS FOR LLMS", "page_start": 18, "page_end": 18, "type": "Text", "text": "Prompts for LLMs. We present our prompts used for LoRA finetuning experiments in Table 20 at the end of Appendix.", "source": "marker_v2", "marker_block_id": "/page/17/Text/9"}
80
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0209", "section": "D.2.1. RESULTS ON FASHION-MNIST", "page_start": 18, "page_end": 18, "type": "Text", "text": "We further assess KViTs on Fashion-MNIST, adopting the same local neighborhood and top-k settings used for CIFAR-10/100. The results in Table 12 show that Krause Attention yields consistent accuracy improvements across all ViT variants, with negligible changes in parameter counts and a reduction in computational cost. These results further support the robustness and generalization benefits of Krause Attention across image datasets with different visual characteristics.", "source": "marker_v2", "marker_block_id": "/page/17/Text/12"}
81
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0210", "section": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS", "page_start": 18, "page_end": 18, "type": "Text", "text": "We visualize attention heatmaps from KViTs on representative samples in Fig. 8. The results show that bounded-confidence interactions naturally give rise to distinct multi-cluster synchronization patterns across attention heads. These visualizations provide an intuitive illustration of how Krause Attention induces multiple coherent token groups rather than global mixing within Transformer layers.", "source": "marker_v2", "marker_block_id": "/page/17/Text/14"}
82
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0211", "section": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS", "page_start": 18, "page_end": 18, "type": "Text", "text": "In Fig. 9, we further examine the evolution of attention patterns across layers. The right panel shows that standard self-attention progressively converges to nearly identical attention maps in deeper layers, a behavior often associated with representational collapse or attention sink formation. In contrast, the left panel demonstrates that Krause Attention maintains diverse, structured attention patterns throughout the network depth. This qualitative difference visually confirms the ability", "source": "marker_v2", "marker_block_id": "/page/17/Text/15"}
83
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0212", "section": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS", "page_start": 19, "page_end": 19, "type": "Caption", "text": "Table 9. Implementation details of Vision Transformers on ImageNet-1K .", "source": "marker_v2", "marker_block_id": "/page/18/Caption/2"}
84
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0213", "section": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS", "page_start": 19, "page_end": 19, "type": "Caption", "text": "Table 10. Implementation details of autoregressive Transformers.", "source": "marker_v2", "marker_block_id": "/page/18/Caption/3"}
85
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0214", "section": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS", "page_start": 19, "page_end": 19, "type": "Table", "text": "Hyperparameters Mo dels 11) perpurumeters (K)ViT-S-16/32 (K)ViT-B-16/32 Batch Size 1024 1024 Optimizer AdamW AdamW LR 3 \\cdot 10^{-4} 2/3 \\cdot 10^{-4} LR Decay cosine cosine Weight Decay 0.05 0.05 Warmup Epochs 10 10 Epochs 300 300 GPUs 4xH100 4xH100 Sigma \\sigma 4.0 4.0 Dropout X X Drop Path 0.1/ X 0.1/X H. flip ✓ ✓ Mixup Alpha 0.8 0.8 Cutmix Alpha 1.0 1.0 Label Smoothing 0.1 0.1 Rand Augment 2&9 2&9 Random Erasing 0.25 0.25 Mixup Prob. 0.8 0.8", "source": "marker_v2", "marker_block_id": "/page/18/Table/4"}
86
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0215", "section": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS", "page_start": 19, "page_end": 19, "type": "Table", "text": "Hyperparameters Models Tryperparameters MNIST CIFAR-10 Training Batch Size 64 8 Optimizer AdamW AdamW LR 1 \\cdot 10^{-3} 1 \\cdot 10^{-3} LR Decay cosine cosine Warmup Ratio 0.05 0.05 Epochs 30 30 GPUs 4xH100 4xH100 Sigma \\sigma 2.5 2.5 Dropout 0.1 0.1 Hidden Dimension 256 512 Heads 8 8 Layers 12 16 Temperature 1.0 1.0 Generation Batch Size 2000 200", "source": "marker_v2", "marker_block_id": "/page/18/Table/5"}
87
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0216", "section": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS", "page_start": 19, "page_end": 19, "type": "Text", "text": "of Krause Attention to preserve multi-cluster structure and avoid excessive concentration of attention mass.", "source": "marker_v2", "marker_block_id": "/page/18/Text/6"}
88
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0217", "section": "D.2.3. ABLATION STUDIES", "page_start": 19, "page_end": 19, "type": "Text", "text": "Table 13 displays the detailed ablation studies of each component under two training settings. As can be observed, the RBF-based distance kernel used in Krause Attention already provides a substantial improvement over standard ViT-S, even in the absence of local interaction constraints or top-k sparsification. This observation holds consistently across both training settings, indicating that replacing dot-product similarity with distance-based aggregation constitutes a strong inductive bias on its own.", "source": "marker_v2", "marker_block_id": "/page/18/Text/8"}
89
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0218", "section": "D.2.3. ABLATION STUDIES", "page_start": 19, "page_end": 19, "type": "Text", "text": "When models are trained without Mixup and CutMix, introducing either local interaction or top-k selection leads to additional accuracy gains, and combining both mechanisms yields the best overall performance. This suggests that, in the absence of heavy data mixing, selective and localized interactions help the model focus on semantically coherent token groups and reinforce meaningful cluster formation.", "source": "marker_v2", "marker_block_id": "/page/18/Text/9"}
90
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0219", "section": "D.2.3. ABLATION STUDIES", "page_start": 19, "page_end": 19, "type": "Text", "text": "In contrast, when Mixup and CutMix are applied, the benefit of top-k selection becomes less pronounced. Since these augmentations partially mix image patches from different categories, the resulting token representations are less semantically localized, making selective interactions harder to exploit for improved discrimination. In this regime, enforcing top-k sparsity mainly contributes to computational efficiency rather than accuracy, while local interaction remains beneficial for both performance and FLOP reduction.", "source": "marker_v2", "marker_block_id": "/page/18/Text/10"}
91
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0220", "section": "D.2.3. ABLATION STUDIES", "page_start": 19, "page_end": 19, "type": "Text", "text": "Across all ablation settings, KViTs maintain essentially identical parameter counts to ViT-S. Enforcing local interaction consistently reduces FLOPs, and even the dense variant of Krause Attention remains more efficient than standard dot-product attention. Overall, these ablations disentangle the roles of the RBF kernel, locality, and selective sparsity, and highlight how their contributions interact with data augmentation strategies in shaping both performance and efficiency.", "source": "marker_v2", "marker_block_id": "/page/18/Text/11"}
92
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0221", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 19, "page_end": 19, "type": "Text", "text": "SwinTransformers incorporate strong locality priors through hierarchical, window-based self-attention and represent a widely adopted alternative to global Vision Transformers for image recognition (Liu et al., 2021). To isolate the contribution of the proposed bounded-confidence interaction dynamics from architectural locality alone, we replace the vanilla attention modules in Swin-T and Swin-S with Krause Attention and compare the resulting models (KSwin-T and KSwin-S) against their original counterparts under identical training protocols.", "source": "marker_v2", "marker_block_id": "/page/18/Text/13"}
93
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0222", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 19, "page_end": 19, "type": "Text", "text": "Since SwinTransformers already operate on small fixed windows, we do not introduce additional locality constraints or", "source": "marker_v2", "marker_block_id": "/page/18/Text/14"}
94
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0223", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 20, "page_end": 20, "type": "Caption", "text": "Table 11. Implementation details of finetuning with LoRA, where QK, KK, VK, O K is the parameter set of Krause Attention.", "source": "marker_v2", "marker_block_id": "/page/19/Caption/1"}
95
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0224", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 20, "page_end": 20, "type": "TableGroup", "text": "Hyperparameters Value LoRA Rank (r) 16 LoRA Alpha (α) 32 LoRA Dropout 0.1 (Krause) Sigma σ 5.5 Target Modules Q, K, V, O projs, Gate, Up, Down projs (Krause) Target Modules Q, K, V, O projs, Gate, Up, Down projs, QK, KK, VK, OK projs Task Type Causal LM Optimizer AdamW Learning Rate (LR) 5 · 10−5 LR Scheduler cosine Weight Decay 0.01 Warmup Ratio 0.1 Total Epochs 2 Per-device Batch Size 4 Gradient Accumulation 8 Mixed Precision BF16 Max Gradient Norm 1.0 Gradient Checkpointing ✓ GPUs 4xH100 Dataloader Workers 8 Logging Steps 5 Evaluation Strategy steps (150) Table 12. Image classification results on Fashion-MNIST.", "source": "marker_v2", "marker_block_id": "/page/19/TableGroup/391"}
96
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0225", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 20, "page_end": 20, "type": "Table", "text": "Models Accuracy (%) # Parameters FLOPs ViT-T 93.47 5,353,738 0.28G KViT-T 94.65 5,353,750 0.19G ViT-S 94.11 21,324,298 1.09G KViT-S 94.71 21,324,310 0.73G ViT-B 95.74 85,115,914 4.30G KViT-B 96.10 85,115,926 2.89G", "source": "marker_v2", "marker_block_id": "/page/19/Table/4"}
97
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0226", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 20, "page_end": 20, "type": "Text", "text": "top-k sparsification in this setting. Instead, we focus exclusively on replacing dot-product similarity with the RBF-based distance kernel that underlies Krause Attention. This allows us to evaluate whether the bounded-confidence interaction rule provides benefits beyond the locality bias already present in Swin architectures.", "source": "marker_v2", "marker_block_id": "/page/19/Text/5"}
98
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0227", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 20, "page_end": 20, "type": "Text", "text": "As shown in Table 14, Krause SwinTransformers consistently outperform their vanilla counterparts on CIFAR-10 across both model scales. Despite nearly identical parameter counts, KSwin-T and KSwin-S achieve higher classification accuracy while substantially reducing FLOPs. The efficiency gain arises from the kernel formulation itself. Compared to dot-product attention, the RBF-based distance kernel decomposes into simpler element-wise operations and avoids expensive inner products and softmax normalization, leading to lower computational cost even within identical attention windows. These results indicate that the performance gains of Krause Attention do not arise merely from enforcing locality, but from the distance-based interaction dynamics themselves.", "source": "marker_v2", "marker_block_id": "/page/19/Text/6"}
99
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0228", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 20, "page_end": 20, "type": "Text", "text": "Notably, even in architectures where local attention is already hard-coded, replacing similarity-based aggregation with bounded-confidence, distance-aware interactions improves both efficiency and predictive performance. This further supports the view that Krause Attention introduces a complementary and robust inductive bias that extends beyond architectural locality, reinforcing its general applicability across different Transformer backbones.", "source": "marker_v2", "marker_block_id": "/page/19/Text/7"}
100
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0229", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 21, "page_end": 21, "type": "FigureGroup", "text": "Figure 8. Qualitative comparisons of Krause Attention and standard attention on ImageNet-1K.", "source": "marker_v2", "marker_block_id": "/page/20/FigureGroup/389"}
101
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0230", "section": "D.2.4. KRAUSE SWINTRANSFORMERS", "page_start": 21, "page_end": 21, "type": "TableGroup", "text": "Table 13. Image classification results under ablation studies on CIFAR-10. Models w/o Mixup & Cutmix w/ Mixup & Cutmix Accuracy(%) # Parameters FLOPs Accuracy(%) # Parameters FLOPs KViT-S (w/o top-k) 91.85 21,342,358 0.97G 95.59 21,342,358 0.97G KViT-S (w/o local) 87.55 21,342,358 1.41G 93.96 21,342,358 1.41G KViT-S (w/o local&top-k) 87.28 21,342,358 1.41G 94.06 21,342,358 1.41G KViT-S 92.16 21,342,358 0.97G 95.20 21,342,358 0.97G ViT-S 82.33 21,342,346 1.43G 93.33 21,342,346 1.43G", "source": "marker_v2", "marker_block_id": "/page/20/TableGroup/390"}
102
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0231", "section": "D.3.1. QUALITATIVE EXAMPLES", "page_start": 21, "page_end": 21, "type": "Text", "text": "We evaluate conditional generation through image completion on MNIST and CIFAR-10, where the top half of each image is masked. The model is conditioned on the visible portion and tasked with generating the remaining pixels. Representative results are shown in Fig. 10. For each input image, five completion samples are produced. These results demonstrate that KARMs generate coherent and diverse completions consistent with the underlying digit/object structure.", "source": "marker_v2", "marker_block_id": "/page/20/Text/7"}
103
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0232", "section": "D.3.2. ABLATION STUDIES", "page_start": 21, "page_end": 21, "type": "Text", "text": "Varying Window Size and Sparsity Level. As shown in Table 15, reducing the local window size substantially increases inference throughput, with only a mild gradual degradation in generation quality. This behavior highlights a controllable trade-off between efficiency and fidelity in KARMs: smaller windows yield faster generation at the cost of slightly higher BPD, allowing practitioners to adjust the window and top-k parameters according to application-specific requirements.", "source": "marker_v2", "marker_block_id": "/page/20/Text/9"}
104
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0233", "section": "D.3.2. ABLATION STUDIES", "page_start": 21, "page_end": 21, "type": "Text", "text": "Varying Individual Components. Table 16 presents an ablation study that isolates the effects of the local window and top-k selection in KARMs. Removing the local window improves the likelihood estimation but substantially increases per-step computation, resulting in slower overall generation. In contrast, disabling top-k selection increases throughput by eliminating the selection overhead, but leads to degraded generation quality. Notably, the lowest BPD is achieved when both locality and sparsity constraints are removed, highlighting the intrinsic modeling strength of the underlying distance-based Krause Attention itself. This observation is consistent with the behavior observed in the Krause SwinTransformer experiments.", "source": "marker_v2", "marker_block_id": "/page/20/Text/10"}
105
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0234", "section": "D.3.2. ABLATION STUDIES", "page_start": 21, "page_end": 21, "type": "Text", "text": "Taken together, these results demonstrate that Krause Attention provides a modular and flexible design, where locality and sparsity can be selectively enabled to trade off performance and efficiency under different resource constraints.", "source": "marker_v2", "marker_block_id": "/page/20/Text/11"}
106
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0235", "section": "D.3.2. ABLATION STUDIES", "page_start": 22, "page_end": 22, "type": "FigureGroup", "text": "Figure 9. Evolution of attention scores across layers in KViTs/ViTs. Krause Attention (left) achieves stable multi-cluster formation, while standard attention (right) progressively converges to a single global consensus.", "source": "marker_v2", "marker_block_id": "/page/21/FigureGroup/321"}
107
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0236", "section": "D.3.2. ABLATION STUDIES", "page_start": 22, "page_end": 22, "type": "TableGroup", "text": "Table 14. Image classification results of Krause SwinTransformer on CIFAR-10. Models Accuracy(%) # Parameters FLOPs Swin-T 90.73 27,510,484 0.20G KSwin-T 91.09 27,510,622 0.09G Swin-S 90.21 48,811,108 0.38G KSwin-S 91.13 48,811,390 0.18G", "source": "marker_v2", "marker_block_id": "/page/21/TableGroup/322"}
108
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0237", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 22, "page_end": 22, "type": "Text", "text": "Qwen1.5-7B is an intermediate release in the Qwen family and shares the same architectural configuration and tokenizer as Qwen2 (Yang et al., 2024a) . We evaluate Krause-Qwen1.5-7B under a zero-shot setting across the same language understanding and reasoning benchmarks used in the main text, while systematically varying the key Krause Attention hyperparameters, namely the local window size and top-k sparsity level. The results under different parameter configurations are summarized in Table 17. Overall, Krause-Qwen1.5-7B consistently matches or improves upon the LoRA-finetuned baseline across a wide range of tasks, including BoolQ, PIQA, MNLI, ANLI, MMLU-Pro, and IFEval. Performance remains robust across different window and sparsity settings, indicating that Krause Attention is not overly sensitive to precise hyperparameter choices. Notably, on the ANLI benchmarks, LoRA finetuning of Qwen1.5-7B often leads to performance degradation. Krause-Qwen1.5-7B shows a similar trend, but with noticeably reduced degradation, indicating improved robustness of Krause attention even under potentially harmful finetuning.", "source": "marker_v2", "marker_block_id": "/page/21/Text/7"}
109
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0238", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 22, "page_end": 22, "type": "Text", "text": "Beyond task performance, Krause-Qwen1.5-7B also exhibits improved attention dynamics. Consistent with observations for Krause-Llama-3-8B, Krause-Qwen1.5-7B substantially alleviates attention sink behavior. As illustrated in Fig. 11, the vanilla Qwen1.5-7B model begins to show pronounced attention concentration on the first token starting around layer 9. In contrast, Krause-Qwen1.5-7B suppresses this collapse at comparable depths, maintaining stable and well-distributed attention scores. While recent methods such as gated attention (Qiu et al., 2025) explicitly modify attention parameterization", "source": "marker_v2", "marker_block_id": "/page/21/Text/8"}
110
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0239", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 23, "page_end": 23, "type": "FigureGroup", "text": "Figure 10. Samples completed by KARMs on MNIST (left) and CIFAR-10 (right).", "source": "marker_v2", "marker_block_id": "/page/22/FigureGroup/416"}
111
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0240", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 23, "page_end": 23, "type": "Caption", "text": "Table 15. Image generation performance of KARMs on MNIST with varying window sizes and sparsity levels. Inference throughput is measured on a single NVIDIA H100 GPU. Window and sparsity settings are reported as window size/top-k.", "source": "marker_v2", "marker_block_id": "/page/22/Caption/3"}
112
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0241", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 23, "page_end": 23, "type": "TableGroup", "text": "Models BPD (↓) Images/sec (↑) KARM(128/96) 0.5652 105.6037 KARM(96/64) 0.5704 127.3919 KARM(64/48) 0.5853 169.5279 Table 16. Ablation study of KARMs on MNIST image generation. Inference throughput is measured on a single NVIDIA H100 GPU.", "source": "marker_v2", "marker_block_id": "/page/22/TableGroup/417"}
113
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0242", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 23, "page_end": 23, "type": "Table", "text": "Models BPD (↓) Images/sec (↑) KARM 0.5652 105.6037 KARM(w/o window) 0.5635 42.6981 KARM(w/o top-k) 0.5674 127.7694 KARM(w/o window&top-k) 0.5593 45.9693", "source": "marker_v2", "marker_block_id": "/page/22/Table/6"}
114
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0243", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 23, "page_end": 23, "type": "Text", "text": "to address attention sinks, Krause Attention approaches the problem from a complementary perspective by reshaping the underlying interaction dynamics through bounded-confidence synchronization.", "source": "marker_v2", "marker_block_id": "/page/22/Text/7"}
115
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0244", "section": "D.4.1. RESULTS ON KRAUSE-QWEN", "page_start": 23, "page_end": 23, "type": "FigureGroup", "text": "Figure 11. First token attention scores of Krause-Qwen1.5-7B (right) and Qwen1.5-7B (left).", "source": "marker_v2", "marker_block_id": "/page/22/FigureGroup/418"}
116
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0245", "section": "D.4.2. EXTENDED RESULTS ON KRAUSE-LLAMA", "page_start": 23, "page_end": 23, "type": "Text", "text": "In addition to the results reported in the main text, we further evaluate the zero-shot performance of Krause-Llama3-8B under a range of Krause Attention configurations. Specifically, we vary the local window size and top-k sparsity level while keeping all other settings fixed. The results are summarized in Table 18. Across a broad set of language understanding and reasoning benchmarks, Krause-Llama3-8B consistently outperforms the LoRA-finetuned Llama3-8B baseline, while exhibiting strong robustness across different parameter choices. Performance gains are particularly pronounced on challenging benchmarks such as ANLI and MNLI, where Krause Attention yields higher accuracy and macro-F1 scores. These results further demonstrate that the benefits of Krause Attention extend beyond a single configuration and are stable across a range of sparsity and locality hyperparameters. We also provide two examples of IFEval responses which are produced by Llama3-8B (finetuned w/ LoRA) and Krause-Llama3-8B in Table 21 at the end of Appendix.", "source": "marker_v2", "marker_block_id": "/page/22/Text/11"}
117
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0246", "section": "D.4.2. EXTENDED RESULTS ON KRAUSE-LLAMA", "page_start": 24, "page_end": 24, "type": "TableGroup", "text": "Table 17. Language understanding and reasoning results of Krause-Qwen1.5-7B. NLI results are reported in Acc. / Macro-F1, while other tasks are reported in Acc. (%). Parameter settings are reported in window size/top-k. Models BoolQ CB PIQA MNLI ANLI-R1 ANLI-R2 ANLI-R3 MMLU-Pro IFEval Qwen1.5-7B Qwen1.5-7B (finetuned w/ LoRA) 84.08 87.50/78.57 72.45 82.73/81.47 40.40/37.34 38.70/36.45 41.33/38.87 83.80 76.79/53.73 49.57 57.10/46.36 47.30/38.84 45.40/39.14 46.67/38.05 30.56 36.11 26.99 26.43 Krause-Qwen1.5-7B (48/24) Krause-Qwen1.5-7B (32/16) Krause-Qwen1.5-7B (18/12) 84.66 87.50/78.62 73.42 83.83/82.82 40.90/37.94 38.20/36.18 41.17/38.99 84.78 85.71/70.09 73.70 83.49/82.41 41.30/37.88 39.80/37.25 41.17/38.62 84.63 85.71/70.06 72.66 83.40/82.27 41.20/37.64 39.10/36.50 42.33/39.59 34.72 37.50 37.50 31.79 32.16 32.17 Table 18. Language understanding and reasoning results of Krause-Llama3-8B. NLI results are reported in Acc. / Macro-F1, while other tasks are reported in Acc. (%). Parameter settings are reported in window size/top-k.", "source": "marker_v2", "marker_block_id": "/page/23/TableGroup/615"}
118
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0247", "section": "D.4.2. EXTENDED RESULTS ON KRAUSE-LLAMA", "page_start": 24, "page_end": 24, "type": "Table", "text": "Models BoolQ CB PIQA MNLI ANLI-R1 ANLI-R2 ANLI-R3 MMLU-Pro IFEval Llama3-8B 76.13 41.07/19.41 51.52 35.45/18.11 33.40/16.69 33.40/16.69 33.50/17.04 37.50 22.18 Llama3-8B (finetuned w/ LoRA) 80.41 60.71/47.81 75.16 59.53/55.29 38.70/30.62 39.90/33.37 44.92/39.57 Krause-Llama3-8B (48/24) 82.89 66.07/49.60 77.61 62.85/56.91 43.20/39.03 41.30/37.90 45.83/42.41 41.67 40.28 32.72 36.60 Krause-Llama3-8B (32/16) 80.59 64.29/48.04 77.77 63.27/53.72 40.30/33.01 40.50/34.27 45.67/39.84 41.67 34.01 Krause-Llama3-8B (18/12) 82.12 66.07/47.36 77.50 62.00/54.85 39.50/32.25 40.30/34.03 44.83/39.64 38.89 34.57", "source": "marker_v2", "marker_block_id": "/page/23/Table/4"}
119
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0248", "section": "D.4.3. INFERENCE SPEED COMPARISON", "page_start": 24, "page_end": 24, "type": "Text", "text": "We benchmark the inference throughput of standard LLMs and Krause-LLMs on the BoolQ dataset. As can be observed in Table 19, due to the introduction of a parallel Krause Attention pathway and shortcut connections during inference, Krause-LLMs incur a modest overhead compared to their backbone models. Nevertheless, the observed throughput reduction is minimal, and Krause-LLMs maintain inference speeds that are highly comparable to both the base and LoRA-finetuned baselines across all configurations. These results indicate that the proposed attention modification introduces negligible runtime overhead while delivering consistent performance gains in language understanding tasks.", "source": "marker_v2", "marker_block_id": "/page/23/Text/6"}
120
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0249", "section": "D.4.3. INFERENCE SPEED COMPARISON", "page_start": 24, "page_end": 24, "type": "TableGroup", "text": "Table 19. Inference speed of LLMs and Krause-LLMs on BoolQ. All results are measured on a single NVIDIA H100 GPU. Models Tokens/sec Models Tokens/sec Llama3-8B 110.99 Qwen1.5-7B 110.40 Llama3-8B (LoRA) 110.88 Qwen1.5-7B (LoRA) 109.87 Krause-Llama3-8B (48/24) 110.10 Krause-Qwen1.5-7B (48/24) 109.72 Krause-Llama3-8B (32/16) 110.26 Krause-Qwen1.5-7B (32/16) 109.87 Krause-Llama3-8B (18/12) 110.64 Krause-Qwen1.5-7B (18/12) 109.84", "source": "marker_v2", "marker_block_id": "/page/23/TableGroup/616"}
121
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0250", "section": "E. Theoretical Perspective on Attention Sink Phenomena", "page_start": 24, "page_end": 24, "type": "Text", "text": "Recent empirical studies have documented the emergence of attention sinks in deep Transformer models (Xiao et al., 2024; Qiu et al., 2025) , in which a small subset of tokens persistently accumulates a disproportionate fraction of attention mass across layers and heads. While this phenomenon is typically characterized at the level of attention distributions, it admits a natural interpretation from the dynamical systems viewpoint of self-attention. In particular, attention sinks can be viewed as a structural consequence of the global synchronization dynamics induced by standard self-attention.", "source": "marker_v2", "marker_block_id": "/page/23/Text/10"}
122
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0251", "section": "E. Theoretical Perspective on Attention Sink Phenomena", "page_start": 24, "page_end": 24, "type": "Text", "text": "Prior theoretical analyses (Geshkovski et al., 2023; Chen et al., 2025) model self-attention as a globally coupled interacting particle system, where token representations evolve under repeated, globally normalized aggregation. Under dot-product attention, this interaction induces a contraction in representation space. In the mean-field limit and as depth increases, the system exhibits a strong tendency toward global synchronization, with token representations converging toward a single dominant mode. This contraction progressively reduces representational diversity and can lead to representational collapse.", "source": "marker_v2", "marker_block_id": "/page/23/Text/11"}
123
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0252", "section": "E. Theoretical Perspective on Attention Sink Phenomena", "page_start": 24, "page_end": 24, "type": "Text", "text": "From this viewpoint, attention sinks arise as a concrete manifestation of the same collapse dynamics. Global softmax", "source": "marker_v2", "marker_block_id": "/page/23/Text/12"}
124
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0253", "section": "E. Theoretical Perspective on Attention Sink Phenomena", "page_start": 25, "page_end": 25, "type": "Text", "text": "normalization amplifies small initial asymmetries among tokens, which may originate from positional bias, frequency effects, or stochastic initialization. Tokens that receive slightly higher attention early on become increasingly influential, shaping subsequent representations and reinforcing their dominance through repeated attention updates. This positive feedback loop translates representation synchronization into extreme concentration of attention mass, yielding a small number of persistent sink tokens. Attention sinks therefore do not constitute an independent mechanism, but emerge naturally from the global coupling and contraction dynamics of standard self-attention.", "source": "marker_v2", "marker_block_id": "/page/24/Text/1"}
125
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0254", "section": "E. Theoretical Perspective on Attention Sink Phenomena", "page_start": 25, "page_end": 25, "type": "Text", "text": "Krause Attention directly intervenes in this dynamical pathway. By replacing global dot-product similarity with boundedconfidence, distance-based interactions and enforcing locality and selective sparsity, Krause Attention prevents unrestricted global coupling. This modification breaks the positive feedback loop responsible for global synchronization and representation collapse, instead promoting stable multi-cluster dynamics. Consequently, no single token can dominate as a global attractor, providing a principled theoretical explanation for the empirically observed mitigation of attention sink behavior.", "source": "marker_v2", "marker_block_id": "/page/24/Text/2"}
126
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0255", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Text", "text": "The clustering behavior induced by Krause Attention is closely related to classical ideas from competitive learning and lateral interaction models developed in neuroscience and early machine learning. In competitive learning frameworks, neurons or units compete to represent inputs, leading to the emergence of a small number of dominant prototypes or clusters. Canonical examples include vector quantization and self-organizing maps (SOMs), where learning dynamics drive representations toward a partitioned organization of the input space (Gray, 1984; Rumelhart & Zipser, 1985; Kohonen, 2002) .", "source": "marker_v2", "marker_block_id": "/page/24/Text/4"}
127
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0256", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Text", "text": "In biological neural systems, such competition is often implemented through lateral interactions within a layer. A well-studied mechanism is the Mexican-hat interaction pattern, which combines short-range excitation with longer-range inhibition (Amari, 1977; Ermentrout, 1998) . At a high level, the activity u i of a neuron can be described by a lateral interaction rule:", "source": "marker_v2", "marker_block_id": "/page/24/Text/5"}
128
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0257", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Equation", "text": "u_i \\leftarrow \\sum_j K(d_{i,j}) u_j \\tag{35}", "source": "marker_v2", "marker_block_id": "/page/24/Equation/6"}
129
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0258", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Text", "text": "where di,j denotes the distance in feature or cortical space, and the kernel K(·) is positive at short range and negative at long range. This center–surround structure causes nearby neurons to synchronize and form local clusters, while distant neurons are suppressed. As a result, lateral inhibition prevents unrestricted global synchronization and instead promotes specialization, diversity, and the emergence of multiple stable activity patterns (Grossberg, 1988) .", "source": "marker_v2", "marker_block_id": "/page/24/Text/7"}
130
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0259", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Text", "text": "Krause bounded-confidence dynamics (Krause et al., 2000; Hegselmann & Krause, 2002) can be viewed as a simplified and analytically tractable abstraction of this principle. Rather than explicitly introducing inhibitory interactions, Krause models restrict interactions to agents within a confidence radius. Agents synchronize locally with nearby neighbors, while long-range interactions are entirely removed. This implicit form of inhibition is sufficient to prevent global consensus and leads instead to robust multi-cluster equilibria, a hallmark of competitive self-organization in interacting particle systems.", "source": "marker_v2", "marker_block_id": "/page/24/Text/8"}
131
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0260", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Text", "text": "Krause Attention instantiates this idea within Transformer architectures. Instead of computing attention weights from global query–key similarity, it defines interactions through query–key distances and restricts aggregation to a bounded local neighborhood. In its simplest form, Krause Attention computes", "source": "marker_v2", "marker_block_id": "/page/24/Text/9"}
132
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0261", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Equation", "text": "a_{i,j} \\propto \\exp(-||Qx_k - Kx_j||^2), \\quad j \\in \\xi_i, \\tag{36}", "source": "marker_v2", "marker_block_id": "/page/24/Equation/10"}
133
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0262", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Text", "text": "followed by normalization over a local support ξ i and optional top-k selection. Nearby tokens reinforce one another through smooth, distance-aware coupling, while distant tokens are excluded from interaction altogether.", "source": "marker_v2", "marker_block_id": "/page/24/Text/11"}
134
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0263", "section": "F. Relation to Competitive Learning and Lateral Interaction Dynamics", "page_start": 25, "page_end": 25, "type": "Text", "text": "Although Krause Attention does not introduce explicit repulsive forces as in classical Mexican-hat kernels, it implements an implicit form of lateral inhibition. Long-range interactions are suppressed through bounded-confidence constraints, while normalization and sparsity restrict competition to local neighborhoods. As a result, distant tokens neither attract nor compete with one another, which functionally mirrors the role of inhibition in preventing global synchronization in lateral-interaction models. From a dynamical viewpoint, each Krause Attention layer performs a single discrete update of a bounded-confidence interaction process, and stacking layers corresponds to iterating a competitive self-organization dynamic, analogous to repeatedly equilibrating lateral interactions in classical competitive learning systems. This interpretation aligns naturally with recent views of Transformers as interacting particle systems (Geshkovski et al., 2023; Chen et al., 2025) and clarifies why Krause Attention supports stable multi-cluster equilibria rather than collapsing toward a single global mode.", "source": "marker_v2", "marker_block_id": "/page/24/Text/12"}
135
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0264", "section": "Krause Synchronization Transformers", "page_start": 26, "page_end": 26, "type": "Text", "text": "These observations situate Krause Attention within a broader class of competitive self-organization models. Analogous dynamics appear in lateral-interaction systems, where local excitation and long-range inhibition yield multi-cluster equilibria. Krause Attention captures this principle in an abstract form, realizing inhibitory effects implicitly through boundedconfidence, distance-based interactions rather than explicit repulsion.", "source": "marker_v2", "marker_block_id": "/page/25/Text/1"}
136
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0265", "section": "Krause Synchronization Transformers", "page_start": 26, "page_end": 26, "type": "Text", "text": "This perspective also provides insight into the mitigation of attention sink phenomena. Attention sinks can be viewed as an extreme outcome of unrestricted global competition under repeated globally normalized attention updates (Xiao et al., 2024) . In contrast, Krause Attention confines competition to local neighborhoods, preventing any single token from emerging as a global attractor. In this sense, bounded-confidence dynamics play a stabilizing role analogous to lateral inhibition in competitive learning, preserving representational diversity while enabling structured and coherent coordination.", "source": "marker_v2", "marker_block_id": "/page/25/Text/2"}
137
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0266", "section": "BoolQ Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Passage: Ethanol fuel – All biomass goes through at least some of these steps: it needs to be grown, collected, dried, fermented, distilled, and burned. All of these steps require resources and an infrastructure. The total amount of energy input into the process compared to the energy released by burning the resulting ethanol fuel is known as the energy balance (or \"energy returned on energy invested\"). Figures compiled in a 2007 report by National Geographic Magazine point to modest results for corn ethanol produced in the US: one unit of fossil-fuel energy is required to create 1.3 energy units from the resulting ethanol. The energy balance for sugarcane ethanol produced in Brazil is more favorable, with one unit of fossil-fuel energy required to create 8 from the ethanol. Energy balance estimates are not easily produced, thus numerous such reports have been generated that are contradictory. For instance, a separate survey reports that production of ethanol from sugarcane, which requires a tropical climate to grow productively, returns from 8 to 9 units of energy for each unit expended, as compared to corn, which only returns about 1.34 units of fuel energy for each unit of energy expended. A 2006 University of California Berkeley study, after analyzing six separate studies, concluded that producing ethanol from corn uses much less petroleum than producing gasoline.", "source": "marker_v2", "marker_block_id": "/page/26/Text/29"}
138
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0267", "section": "BoolQ Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Question: does ethanol take more energy make that produces?", "source": "marker_v2", "marker_block_id": "/page/26/Text/30"}
139
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0268", "section": "BoolQ Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "### Response: [Model predicts: Yes/No]", "source": "marker_v2", "marker_block_id": "/page/26/Text/31"}
140
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0269", "section": "NLI Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Premise: Valence the void-brain, Valence the virtuous valet. Why couldn't the figger choose his own portion of titanic anatomy to shaft? Did he think he was helping?", "source": "marker_v2", "marker_block_id": "/page/26/Text/33"}
141
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0270", "section": "NLI Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Hypothesis: Valence was helping A. entailment B. contradiction C. neutral", "source": "marker_v2", "marker_block_id": "/page/26/Text/34"}
142
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0271", "section": "NLI Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "### Response: [Model predicts token: A, B, or C]", "source": "marker_v2", "marker_block_id": "/page/26/Text/35"}
143
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0272", "section": "PIQA Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Question: How do I ready a guinea pig cage for it's new occupants?", "source": "marker_v2", "marker_block_id": "/page/26/Text/37"}
144
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0273", "section": "PIQA Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "A. Provide the guinea pig with a cage full of a few inches of bedding made of ripped paper strips, you will also need to supply it with a water bottle and a food dish.", "source": "marker_v2", "marker_block_id": "/page/26/Text/38"}
145
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0274", "section": "PIQA Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "B. Provide the guinea pig with a cage full of a few inches of bedding made of ripped jeans material, you will also need to supply it with a water bottle and a food dish.", "source": "marker_v2", "marker_block_id": "/page/26/Text/39"}
146
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0275", "section": "PIQA Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "### Response: [Model predicts token: A, or B]", "source": "marker_v2", "marker_block_id": "/page/26/Text/40"}
147
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0276", "section": "MMLU-Pro Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Question: The symmetric group S n has n elements, hence it is not true that S 10 has 10 elements. Find the characteristic of the ring 2Z.", "source": "marker_v2", "marker_block_id": "/page/26/Text/42"}
148
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0277", "section": "MMLU-Pro Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Choices:", "source": "marker_v2", "marker_block_id": "/page/26/Text/43"}
149
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0278", "section": "MMLU-Pro Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "A. 0 B. 30 C. 3 D. 10 E. 12 F. 50 G. 2 H. 100 I. 20 J. 5", "source": "marker_v2", "marker_block_id": "/page/26/Text/44"}
150
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0279", "section": "MMLU-Pro Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "### Response: [Model predicts token: A-J]", "source": "marker_v2", "marker_block_id": "/page/26/Text/45"}
151
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0280", "section": "IFEval Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Given the sentence \"Two young boys with toy guns and horns.\" can you ask a question? Please ensure that your response is in English, and in all lowercase letters. No capital letters are allowed.", "source": "marker_v2", "marker_block_id": "/page/26/Text/47"}
152
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0281", "section": "IFEval Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "### Response: [Model generates free-form text response]", "source": "marker_v2", "marker_block_id": "/page/26/Text/48"}
153
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0282", "section": "IFEval Prompt Example", "page_start": 27, "page_end": 27, "type": "Text", "text": "Table 20. Prompt examples used for Krause-LLMs and LLMs.", "source": "marker_v2", "marker_block_id": "/page/26/Text/49"}
154
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0283", "section": "IFEval Answers", "page_start": 28, "page_end": 28, "type": "Text", "text": "Instruction: Write a casual blog post about how the outer solar system is different from the inner solar system, and what that means for the possibility of life. Wrap your entire response with double quotation marks. Your response should contain 17 or more sentences.", "source": "marker_v2", "marker_block_id": "/page/27/Text/18"}
155
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0284", "section": "IFEval Answers", "page_start": 28, "page_end": 28, "type": "Text", "text": "Llama3-8B (finetuned w/ LoRA) Response: \"The outer solar system has different characteristics than the inner solar system. The inner solar system has a small mass, but the outer solar system has a large mass. The inner solar system has short periods, but the outer solar system has long periods. The inner solar system has small eccentricities, but the outer solar system has large eccentricities. The inner solar system has small inclinations, but the outer solar system has large inclinations. The inner solar system has low densities, but the outer solar system has high densities. The inner solar system has small semimajor axes, but the outer solar system has large semimajor axes. The inner solar system has small semiminor axes, but the outer solar system has large semiminor axes. The inner solar system has small mean anomalies, but the outer solar system has large mean anomalies. The inner solar system has small longitudes of the ascending nodes, but the outer solar system has large longitudes of the ascending nodes. The inner solar system has small arguments of perihelion, but the outer solar system has large arguments of perihelion. The inner solar system has small periods of perihelion, but the outer solar system has large periods of perihelion. The inner solar system has small semiaxes of perihelion, but the outer solar system has large semiaxes of perihelion. The inner solar system has small semiaxes of aphelion, but the outer solar system has large semiaxes of aphelion. The inner solar system has small periods of aphelion, but the outer solar system has large periods of aphelion. The inner solar system has small semimajor axes of perihelion, but the outer solar system has large semimajor axes of perihelion. The inner solar system has small semimajor axes of aphelion, but the outer solar system has large semimajor axes of aphelion. The inner solar system has small semiminor axes of perihelion, but the outer solar system has large semiminor axes of perihelion. The inner solar system has small semiminor axes of aphelion, but the outer solar system has large semiminor axes of aphelion. The inner solar system has small mean anomalies of perihelion, but the outer solar system has large mean anomalies of perihelion. The inner solar system has small mean anomalies of aphelion, but the outer solar system\"", "source": "marker_v2", "marker_block_id": "/page/27/Text/19"}
156
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0285", "section": "IFEval Answers", "page_start": 28, "page_end": 28, "type": "Text", "text": "Krause-Llama3-8B Response: \"The outer solar system is different from the inner solar system in that it has a much greater abundance of ices. This is due to the fact that the outer solar system is much farther away from the Sun. The outer solar system also has a much greater abundance of volatile materials, which are materials that can easily be vaporized or melted. This is due to the fact that the outer solar system is much colder than the inner solar system. The possibility of life in the outer solar system is much greater than in the inner solar system. This is due to the fact that the outer solar system has a much greater abundance of ices and volatile materials. These materials are necessary for life as we know it. The outer solar system is also much colder than the inner solar system, which makes it much more hospitable for life. The outer solar system is also much farther away from the Sun, which means that there is much less radiation from the Sun. This makes it much safer for life. Overall, the outer solar system is a much more hospitable place for life than the inner solar system. This is due to the fact that it has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The possibility of life in the outer solar system is much greater than in the inner solar system. This is due to the fact that the outer solar system has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The outer solar system is also much more hospitable for life than the inner solar system. This is due to the fact that it has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The possibility of life in the outer solar system is much greater than in the inner solar system. This is due to the fact that the outer solar system has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The outer solar system is also much more hospitable for life than the inner solar system. This is due to the fact that it has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The possibility of life in the outer solar system is much greater than\"", "source": "marker_v2", "marker_block_id": "/page/27/Text/20"}
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/appendix_text_v3.txt ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [p. 13 | section: Krause Synchronization Transformers | type: ListGroup]
2
+ –Appendix Contents– A Additional Discussion and Limitations B Extended Related Work C Multi-Cluster Behavior of Krause Attention C.1 Multi-Cluster Formation under Clustering Emergence Dynamics C.2 Multi-Cluster Formation under Mean-Field Dynamics D Extended Experiment Results D.1 Implementation Details * D.1.1 Vision Transformers * D.1.2 Autoregressive Transformers * D.1.3 LLMs finetuned with LoRA * D.1.4 Evaluation Datasets and Prompts for LLMs D.2 Vision Transformers for Image Recognition * D.2.1 Results on Fashion-MNIST * D.2.2 Qualitative Results of Attention Heatmaps * D.2.3 Ablation Studies * D.2.4 Krause SwinTransformers D.3 Autoregressive Models for Image Generation * D.3.1 Qualitative Examples * D.3.2 Ablation Studies D.4 LLMs for Language Understanding and Reasoning * D.4.1 Results on Krause-Qwen * D.4.2 Extended Results on Krause-Llama * D.4.3 Inference Speed Comparison E Theoretical Perspective on Attention Sink Phenomena F Relation to Competitive Learning and Lateral Interaction Dynamics
3
+
4
+ [p. 14 | section: A. Additional Discussion and Limitations | type: Text]
5
+ Theoretical Scope. Krause Attention is motivated by bounded-confidence and synchronization dynamics, and our theoretical analysis focuses on idealized continuous-time and mean-field formulations. As with prior dynamical analyses of Transformer models, these results characterize asymptotic behavior and provide qualitative insight into clustering and synchronization phenomena, rather than exact predictions for finite-depth, finite-width networks used in practice.
6
+
7
+ [p. 14 | section: A. Additional Discussion and Limitations | type: Text]
8
+ Hyperparameter Selection. Krause Attention introduces several task-dependent hyperparameters, most notably the local interaction window size, the top-k sparsity level, and the scale parameter \sigma of the RBF distance kernel. These parameters jointly control the trade-off between locality, selectivity, and computational efficiency. While the window size and sparsity level directly affect interaction structure, the RBF scale governs the sensitivity of distance-based coupling. In practice, we observe that the scale parameter is not a critical or fragile choice: across all experiments, it reliably converges to a suitable range during training and exhibits stable behavior over a broad initialization interval. Nevertheless, we do not claim the existence of a universally optimal configuration, and systematic strategies for automatic or adaptive hyperparameter selection remain an interesting direction for future work.
9
+
10
+ [p. 14 | section: A. Additional Discussion and Limitations | type: Text]
11
+ Integration with LLMs. In our LLM experiments, Krause Attention is evaluated as an auxiliary interaction pathway trained via LoRA on top of pretrained models, rather than as a full replacement for standard self-attention. Investigating deeper integration and end-to-end pretraining with Krause Attention remains an important direction for future work.
12
+
13
+ [p. 14 | section: B. Extended Related Work | type: Text]
14
+ Efficient Transformers. Despite their empirical success, standard Transformers incur quadratic computational and memory complexity with respect to sequence length, which fundamentally limits their scalability to long-context and high-resolution settings. To address this bottleneck, a substantial body of work has investigated sub-quadratic approximations to self-attention. One line of research reduces the cost of self-attention by imposing structured sparsity on the attention pattern. Sparse attention methods (Child et al., 2019) restrict token interactions to predefined subsets, lowering the effective complexity from \mathcal{O}(N^2d) to \mathcal{O}(N\sqrt{N}d) . A second class of approaches focuses on approximating the attention computation itself. Performer (Choromanski et al., 2021) replaces the softmax operation with random feature—based kernel approximations, yielding linear-time \mathcal{O}(Nd) attention, while Linformer (Wang et al., 2020) exploits low-rank structure in the attention matrix to reduce complexity. Related linear attention formulations (Katharopoulos et al., 2020) further re-express self-attention using kernelized representations. Finally, Reformer (Kitaev et al., 2020) adopts a hashing-based strategy, employing locality-sensitive hashing to cluster similar tokens and obtain \mathcal{O}(N \log Nd) complexity.
15
+
16
+ [p. 14 | section: B. Extended Related Work | type: Text]
17
+ Transformer Alternatives. Beyond the routes we discussed in the main paper, another line of research focuses on architectural alternatives. State-Space Models (SSMs), particularly Structured State-Space Sequences (S4) (Gu et al., 2022) and Mamba (Gu & Dao, 2023; Dao & Gu, 2024), offer linear-time inference and demonstrate superior scaling in language and vision tasks (Zhu et al., 2024; Yang et al., 2024b; Liu et al., 2024; Munir et al., 2025). Hybrid architectures (Lieber et al., 2024; Glorioso et al., 2024; De et al., 2024) attempt to combine the efficiency of SSMs with the expressivity of Transformers. Other designs, such as the Perceiver (Jaegle et al., 2021), utilize latent bottlenecks to process high-dimensional inputs. Despite their efficiency, these models often trade the iterative refinement of dense attention for state compression.
18
+
19
+ [p. 14 | section: C. Multi-Cluster Behavior of Krause Attention | type: Text]
20
+ This appendix provides two complementary theoretical analyses supporting the multi-cluster behavior of Krause Attention. The first argument adapts the clustering emergence framework of Geshkovski et al. (2023) to our sparse, distance-based attention mechanism. The second analysis leverages the mean-field Wasserstein gradient-flow perspective developed by Chen et al. (2025). Both analyses characterize the dynamics under appropriate separation or bounded-confidence assumptions and provide mechanistic insight into why Krause Attention admits stable multi-cluster configurations.
21
+
22
+ [p. 14 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
23
+ We analyze the multi-cluster formation by adopting the interacting-particle formulation of Transformer dynamics introduced by Geshkovski et al. (2023). Let \{z_i(t)\}_{i=1}^N \subset \mathbb{R}^D denote rescaled token representations evolving according to
24
+
25
+ [p. 14 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Equation]
26
+ \dot{z}_i(t) = \sum_{z_j \in \mathcal{N}_i(t)} a_{i,j}(t) V z_j(t) (13)
27
+
28
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
29
+ where the attention weights a_{i,j}(t) are defined by the Krause Attention rule in Eq. (12), incorporating distance-based similarity, local and selective interactions.
30
+
31
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
32
+ As will be discussed in Sec. C.2, bounded-confidence dynamics naturally evolve toward well-separated clusters. Motivated by this phenomenon, we analyze the particle-level dynamics after such fragmentation has occurred and consider the regime in which tokens form m disconnected groups \{C_k\}_{k=1}^m lying outside each other's interaction ranges. This conditional analysis is standard in clustering dynamics and allows us to characterize the resulting multi-cluster behavior precisely.
33
+
34
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
35
+ Hard Interaction Cutoff. Unlike standard self-attention, Krause Attention imposes an explicit bounded-confidence rule: tokens interact only with neighbors selected by the local top-k operator. Hence, whenever two token groups \mathcal{G}_1 and \mathcal{G}_2 are separated such that
36
+
37
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Equation]
38
+ z_2 \notin \xi_1^k \quad \text{for all } z_1 \in \mathcal{G}_1, \ z_2 \in \mathcal{G}_2, \tag{14}
39
+
40
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
41
+ their pairwise attention weights satisfy
42
+
43
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Equation]
44
+ a_{1,2}(t) = 0 \quad \text{at all times.} \tag{15}
45
+
46
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
47
+ The sparsity and indicator structure therefore enforce a strict cutoff in the interaction graph, preventing any influence between sufficiently distant token groups.
48
+
49
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
50
+ Block-diagonalization of Attention Dynamics. The transition from global to local dynamics is rooted in the reducibility of the attention matrix. In standard self-attention, the matrix A(t) is strictly positive (A_{ij} > 0) , making it an irreducible stochastic matrix. By the Perron-Frobenius theorem, A(t) has a unique largest eigenvalue \lambda = 1 with a corresponding strictly positive eigenvector \Lambda , which facilitates the global convergence toward a single cluster (Geshkovski et al., 2023).
51
+
52
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
53
+ In contrast, the Krause Attention interaction graph G(t) decomposes into m disjoint connected components \{C_k\}_{k=1}^m . Formally, if z_i \in C_p and z_j \in C_q (p \neq q) , the sparsity mechanism ensures A_{ij}(t) = 0 . Consequently, A(t) is a reducible stochastic matrix, and its spectrum is the union of the spectra of its blocks:
54
+
55
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Equation]
56
+ \sigma(A(t)) = \bigcup_{k=1}^{m} \sigma(A_{\mathcal{C}_k}(t)). \tag{16}
57
+
58
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
59
+ Since each A_{\mathcal{C}_k}(t) is itself a row-stochastic matrix, the eigenvalue \lambda=1 has a multiplicity of at least m. The corresponding eigenspace is spanned by vectors of the clusters \{\boldsymbol{\Lambda}_{\mathcal{C}_1},\ldots,\boldsymbol{\Lambda}_{\mathcal{C}_m}\} , where
60
+
61
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Equation]
62
+ (\mathbf{\Lambda}_{\mathcal{C}_k})_i = \begin{cases} 1 & \text{if } i \in \mathcal{C}_k \\ 0 & \text{otherwise} \end{cases} (17)
63
+
64
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
65
+ Then the global attention matrix A(t) naturally takes a block-diagonal form without requiring external constraints:
66
+
67
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Equation]
68
+ A(t) = \begin{pmatrix} A_{\mathcal{C}_1}(t) & 0 & \dots & 0 \\ 0 & A_{\mathcal{C}_2}(t) & \dots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \dots & A_{\mathcal{C}_m}(t) \end{pmatrix} (18)
69
+
70
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
71
+ This multiplicity prevents the global contraction of tokens. Each cluster C_k evolves within its own invariant subspace S_k = \text{span}\{\mathbf{\Lambda}_{C_k}\} , and its dynamics are governed independently by:
72
+
73
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Equation]
74
+ \dot{Z}_{\mathcal{C}_k}(t) = A_{\mathcal{C}_k}(t) Z_{\mathcal{C}_k}(t) V^{\top}. \tag{19}
75
+
76
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
77
+ Mathematically, the absence of inter-cluster spectral coupling ensures that the velocity \dot{z}_i for any i \in \mathcal{C}_p is a zero-gradient direction with respect to any z_j \in \mathcal{C}_q , thereby rigorously sustaining the multi-cluster geometry.
78
+
79
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
80
+ Independence from the Spectrum of V. In standard self-attention, the connectivity of the attention matrix implies that the dominant eigenvector of V induces global alignment across all tokens (Geshkovski et al., 2023). If V has a dominant eigenvector v_1 , it tends to pull all z_i towards the line spanned by v_1 . In contrast, Krause Attention decouples the dynamics: even if tokens from different clusters align locally with the same eigenmode of V, there is no coupling force between their centroids. Considering the block dynamics in Eq. (19), the clustering structure is preserved because the cross-cluster entries [A]_{i,j} remain strictly zero for z_i \in \mathcal{C}_p, z_j \in \mathcal{C}_q ( p \neq q ). The linear transformations induced by V act only within the subspaces of each cluster and cannot reintroduce cross-cluster interaction.
81
+
82
+ [p. 15 | section: C.1. Multi-Cluster Formation under Clustering Emergence Dynamics | type: Text]
83
+ Thus, Krause Attention maintains cluster separation dynamically, enabling the persistence of multi-cluster configurations independent of the global spectral dominance of V.
84
+
85
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
86
+ We now analyze Krause Attention using the mean-field framework of Chen et al. (2025). Let \mu_t denote the empirical distribution of token representations on the unit sphere \mathbb{S}^{d-1} .
87
+
88
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
89
+ Truncated Interaction Kernel. Our Krause Attention replaces the global dot-product kernel with a truncated RBF kernel
90
+
91
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
92
+ a_{x,y} = \exp\left(-\frac{\|Qx - Ky\|^2}{2\sigma^2}\right), \quad y \in \xi_x(y), (20)
93
+
94
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
95
+ where \xi_x(y) is the selected local neighborhood described in the main text, which enforces bounded-confidence interactions consistent with the classical Krause model. In particular, the sparse neighborhood operator \xi_x(\cdot) induces a compact-support interaction kernel: there exists a radius R>0 such that
96
+
97
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
98
+ a_{x,y} = 0 whenever ||Qx - Ky|| > R . (21)
99
+
100
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
101
+ The induced mean-field dynamics satisfy the continuity equation:
102
+
103
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
104
+ \partial_t \mu_t + \operatorname{div}(\mu_t \mathcal{X}_{\mu_t}) = 0, \quad \mathcal{X}_{\mu_t}(x) = \int_{\mathbb{S}^{d-1}} \mathcal{P}_x[y] a_{x,y} \, \mathrm{d}\mu_t(y), \tag{22}
105
+
106
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
107
+ and correspond to a Wasserstein gradient flow maximizing the interaction energy
108
+
109
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
110
+ E_K[\mu] = \frac{1}{2\tilde{\beta}} \iint_{\mathbb{S}^{d-1} \times \mathbb{S}^{d-1}} a_{x,y} d\mu(x) d\mu(y), \qquad \tilde{\beta} := \frac{1}{2\sigma^2}. (23)
111
+
112
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
113
+ Decomposition into Independent Flows. Bounded-confidence dynamics are known to generically evolve toward fragmented configurations consisting of well-separated opinion clusters (Hegselmann & Krause, 2002; Lorenz, 2007; Blondel et al., 2009; Chen et al., 2025). In particular, uniform or sufficiently broad initial measures are linearly unstable and tend to break into clusters with characteristic spacing on the order of 2R, where R denotes the interaction radius (Wang et al., 2017). Motivated by this phenomenon, we focus our analysis on the regime after such fragmentation has taken place. In what follows, we assume that at some time there exists a time t_0 \ge 0 such that the measure \mu_{t_0} admits a decomposition
114
+
115
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
116
+ \mu_{t_0} = \sum_{k=1}^{m} \pi_k \mu_{t_0}^{(k)}, \quad \text{supp}(\mu_{t_0}^{(k)}) \subset \mathcal{C}_k \subset \mathbb{S}^{d-1}, (24)
117
+
118
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
119
+ where the sets \{\mathcal{C}_k\}_{k=1}^m satisfy
120
+
121
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
122
+ \inf_{x \in \mathcal{C}_n, y \in \mathcal{C}_a} \|Qx - Ky\| > R \quad \text{for all } p \neq q. (25)
123
+
124
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
125
+ Under this condition, the interaction kernel vanishes across clusters,
126
+
127
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
128
+ a_{r,y} = 0 \quad \forall x \in \mathcal{C}_p, \ y \in \mathcal{C}_q, \ p \neq q. (26)
129
+
130
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
131
+ Consequently, for x \in \mathcal{C}_p ,
132
+
133
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
134
+ \mathcal{X}_{\mu_t}(x) = \int_{\mathcal{C}_n} \mathcal{P}_x[y] a_{x,y} \, \mathrm{d}\mu_t^{(p)}(y), \tag{27}
135
+
136
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
137
+ which implies that the velocity field is entirely determined by intra-cluster interactions.
138
+
139
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
140
+ Moreover, since the velocity field \mathcal{X}_{\mu_t} is tangential to \mathbb{S}^{d-1} and depends only on points within the same cluster, the flow map associated with the continuity equation preserves the supports \mathcal{C}_k . In particular, no trajectory starting in \mathcal{C}_p can enter the interaction range of another cluster \mathcal{C}_q .
141
+
142
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
143
+ Therefore, each C_k is forward invariant under the mean-field dynamics, and the global flow decomposes into m independent gradient flows:
144
+
145
+ [p. 16 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
146
+ \partial_t \mu_t^{(k)} + \operatorname{div}\left(\mu_t^{(k)} \mathcal{X}_{\mu_t^{(k)}}\right) = 0, \quad \mathcal{X}_{\mu_t^{(k)}}(x) = \int_{\mathbb{S}^{d-1}} \mathcal{P}_x[y] a_{x,y} \, \mathrm{d}\mu_t^{(k)}(y). \tag{28}
147
+
148
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
149
+ Exponential Convergence within Clusters. We now analyze the dynamics within a single cluster C_k . Since C_k is isolated, the evolution of \mu_t^{(k)} is governed by the restriction of the Krause Attention energy E_K to C_k . Along the flow, the energy satisfies
150
+
151
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
152
+ \frac{\mathrm{d}}{\mathrm{d}t} E_K[\mu^{(k)}] = \int_{\mathbb{S}^{d-1}} \|\mathcal{X}_{\mu^{(k)}}(x)\|_2^2 \,\mathrm{d}\mu^{(k)}(x) \geqslant 0. (29)
153
+
154
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
155
+ Fix d \geqslant 2 , \tilde{\beta} > 0 , \alpha \in [0, \pi/2) and u \in \mathbb{S}^{d-1} . Let S_{\alpha}^{+(k)}(u) \subseteq \mathbb{S}^{d-1} denote the spherical cap
156
+
157
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
158
+ S_{\alpha}^{+(k)}(u) := \{ x \in \mathcal{C}_k \mid \langle x, u \rangle \geqslant \cos \alpha \}, \quad k = 1, \dots, m. (30)
159
+
160
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
161
+ Assume that \mu_0^{(k)} is supported on S_{\alpha}^{+(k)}(u) .
162
+
163
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
164
+ If 10(1+\sqrt{\tilde{\beta}})\tan\alpha\leqslant 1 , then following Chen et al. (2025), the interaction energy satisfies a Polyak–Łojasiewicz inequality:
165
+
166
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
167
+ E_K[\delta_{\mathcal{L}_k}] - E_K[\mu^{(k)}] \le 10e^{-\tilde{\beta}} \int_{S_\alpha^{+(k)}(u)} \|\mathcal{X}_{\mu^{(k)}}(x)\|_2^2 d\mu^{(k)}(x). (31)
168
+
169
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
170
+ As a consequence, the solution \mu_t^{(k)} converges exponentially fast to a Dirac mass \delta_{\mathcal{L}_k} supported at a local consensus point \mathcal{L}_k \in S_{\alpha}^{+(k)}(u) :
171
+
172
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
173
+ W_2(\mu_t^{(k)}, \delta_{\mathcal{L}_k}) \le 20e^{-\tilde{\beta}} e^{-\frac{e^{\tilde{\beta}}}{20}t} \left( \int_{S_{\alpha}^{+(k)}(u)} \|\mathcal{X}_{\mu^{(k)}}(x)\|_2^2 d\mu^{(k)}(x) \right)^{\frac{1}{2}}. (32)
174
+
175
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
176
+ Equivalently,
177
+
178
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
179
+ W_2(\mu_t^{(k)}, \delta_{\mathcal{L}_k}) \le C_k e^{-\lambda_k t},\tag{33}
180
+
181
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
182
+ with constants depending on \sigma and the geometry of C_k .
183
+
184
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
185
+ Global Multi-cluster Convergence. Since each component measure \mu_t^{(k)} converges to a Dirac mass \delta_{\mathcal{L}_k} in Wasserstein gradient flow W_2 , the full system converges to a fragmented consensus:
186
+
187
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Equation]
188
+ \mu_t = \sum_{k=1}^m \pi_k \mu_t^{(k)} \xrightarrow{W_2} \sum_{k=1}^m \pi_k \delta_{\mathcal{L}_k}. (34)
189
+
190
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
191
+ This establishes the existence of stable multi-cluster equilibria under Krause Attention.
192
+
193
+ [p. 17 | section: C.2. Multi-Cluster Formation under Mean-Field Dynamics | type: Text]
194
+ Both the clustering-emergence mechanism and the mean-field analysis demonstrate that Krause Attention fundamentally alters Transformer dynamics: global synchronization is replaced by structured bounded-confidence interactions, encouraging stable multi-cluster representations. This behavior contrasts sharply with the global consensus dynamics induced by standard self-attention and provides a principled theoretical explanation for the empirical robustness of Krause Attention.
195
+
196
+ [p. 17 | section: D.1.1. VISION TRANSFORMERS | type: Text]
197
+ All models are trained from scratch under the same training protocol. The detailed training recipes are exhibited in Table 7 for CIFAR-10/100 , in Table 8 for Fashion-MNIST , and in Table 9 for ImageNet-1K .
198
+
199
+ [p. 17 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Text]
200
+ All models are trained from scratch under the same training protocol for each dataset. Table 10 presents the detailed configurations.
201
+
202
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Caption]
203
+ Table 7. Implementation details of Vision Transformers on CIFAR-10/100 .
204
+
205
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Text]
206
+ 953954955
207
+
208
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Text]
209
+ 956957
210
+
211
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Text]
212
+ 962963
213
+
214
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Text]
215
+ 970971
216
+
217
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Text]
218
+ 973974
219
+
220
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Text]
221
+ 978979
222
+
223
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: Caption]
224
+ Models Hyperparameters (K)ViT-T (K)ViT-S (K)ViT-B Batch Size 256 256 256 Optimizer AdamW AdamW AdamW 3 \cdot 10^{-4} 3\cdot 10^{-4} 1 \cdot 10^{-4} LR LR Decay cosine cosine cosine Weight Decay 0.05 0.05 0.05 Warmup Epochs 10 10 10 Epochs 300 300 300 GPUs 1xH100 1xH100 1xH100 Sigma \sigma 2.5 2.5 2.5 Dropout Х Х Х Drop Path 0.1/% 0.1/% 0.1/xH. flip / 1 / 0.8 0.8 0.8 Mixup Alpha Cutmix Alpha 1.0 1.0 1.0 Label Smoothing 0.1 0.1 0.1 0.8 Mixup Prob. 0.8 0.8
225
+
226
+ [p. 18 | section: D.1.2. AUTOREGRESSIVE TRANSFORMERS | type: TableGroup]
227
+ Table 8. Implementation details of Vision Transformers on Fashion-MNIST . Hyperparameters Models (K)ViT-T (K)ViT-S (K)ViT-B Batch Size 256 256 256 Optimizer AdamW AdamW AdamW LR 3 \cdot 10^{-4} 3 \cdot 10^{-4} 1 \cdot 10^{-4} LR Decay cosine cosine cosine Weight Decay 0.05 0.05 0.05 Warmup Epochs 10 10 10 Epochs 300 300 300 GPUs 1xH100 1xH100 1xH100 Sigma \sigma 2.5 2.5 2.5 Dropout X X X Drop Path 0.1/X 0.1/X 0.1/X H. flip ✓ ✓ ✓ Mixup Alpha 0.8 0.8 0.8 Cutmix Alpha 1.0 1.0 1.0 Label Smoothing 0.1 0.1 0.1 Mixup Prob. 0.8 0.8 0.8
228
+
229
+ [p. 18 | section: D.1.3. LLMs finetuned with Lora | type: Text]
230
+ The original LLMs as well as Krause-LLMs are finetuned under the same set of LoRA hyperparameters. We finetune the Q, K, V, O projection matrices and MLP modules for both attention mechanisms. For Krause-LLMs, at the skip connection of every layer, Krause attention is combined with standard attention with a learnable mixing ratio, which is initialized with 0.8:0.2 across all models. Under this setup, LoRA finetuning increases the parameter count of the baseline LLMs by 0.52%, while Krause-LLMs incur a slightly higher overhead of 0.59%. The detailed settings are shown in Table 11.
231
+
232
+ [p. 18 | section: D.1.4. EVALUATION DATASETS AND PROMPTS FOR LLMS | type: Text]
233
+ Evaluation Datasets. We conduct a comprehensive evaluation of Krause-LLMs and LLMs across five critical linguistic dimensions, including reading comprehension (BoolQ), logical inference (CB, ANLI (R1-R3), and MNLI), commonsense reasoning (PIQA), expert-level knowledge (MMLU-Pro), and instruction-following evaluations (IFEval). For the IFEval benchmark, we adopt Strict Acc. as the reported evaluation metric.
234
+
235
+ [p. 18 | section: D.1.4. EVALUATION DATASETS AND PROMPTS FOR LLMS | type: Text]
236
+ Prompts for LLMs. We present our prompts used for LoRA finetuning experiments in Table 20 at the end of Appendix.
237
+
238
+ [p. 18 | section: D.2.1. RESULTS ON FASHION-MNIST | type: Text]
239
+ We further assess KViTs on Fashion-MNIST, adopting the same local neighborhood and top-k settings used for CIFAR-10/100. The results in Table 12 show that Krause Attention yields consistent accuracy improvements across all ViT variants, with negligible changes in parameter counts and a reduction in computational cost. These results further support the robustness and generalization benefits of Krause Attention across image datasets with different visual characteristics.
240
+
241
+ [p. 18 | section: D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS | type: Text]
242
+ We visualize attention heatmaps from KViTs on representative samples in Fig. 8. The results show that bounded-confidence interactions naturally give rise to distinct multi-cluster synchronization patterns across attention heads. These visualizations provide an intuitive illustration of how Krause Attention induces multiple coherent token groups rather than global mixing within Transformer layers.
243
+
244
+ [p. 18 | section: D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS | type: Text]
245
+ In Fig. 9, we further examine the evolution of attention patterns across layers. The right panel shows that standard self-attention progressively converges to nearly identical attention maps in deeper layers, a behavior often associated with representational collapse or attention sink formation. In contrast, the left panel demonstrates that Krause Attention maintains diverse, structured attention patterns throughout the network depth. This qualitative difference visually confirms the ability
246
+
247
+ [p. 19 | section: D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS | type: Caption]
248
+ Table 9. Implementation details of Vision Transformers on ImageNet-1K .
249
+
250
+ [p. 19 | section: D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS | type: Caption]
251
+ Table 10. Implementation details of autoregressive Transformers.
252
+
253
+ [p. 19 | section: D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS | type: Table]
254
+ Hyperparameters Mo dels 11) perpurumeters (K)ViT-S-16/32 (K)ViT-B-16/32 Batch Size 1024 1024 Optimizer AdamW AdamW LR 3 \cdot 10^{-4} 2/3 \cdot 10^{-4} LR Decay cosine cosine Weight Decay 0.05 0.05 Warmup Epochs 10 10 Epochs 300 300 GPUs 4xH100 4xH100 Sigma \sigma 4.0 4.0 Dropout X X Drop Path 0.1/ X 0.1/X H. flip ✓ ✓ Mixup Alpha 0.8 0.8 Cutmix Alpha 1.0 1.0 Label Smoothing 0.1 0.1 Rand Augment 2&9 2&9 Random Erasing 0.25 0.25 Mixup Prob. 0.8 0.8
255
+
256
+ [p. 19 | section: D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS | type: Table]
257
+ Hyperparameters Models Tryperparameters MNIST CIFAR-10 Training Batch Size 64 8 Optimizer AdamW AdamW LR 1 \cdot 10^{-3} 1 \cdot 10^{-3} LR Decay cosine cosine Warmup Ratio 0.05 0.05 Epochs 30 30 GPUs 4xH100 4xH100 Sigma \sigma 2.5 2.5 Dropout 0.1 0.1 Hidden Dimension 256 512 Heads 8 8 Layers 12 16 Temperature 1.0 1.0 Generation Batch Size 2000 200
258
+
259
+ [p. 19 | section: D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS | type: Text]
260
+ of Krause Attention to preserve multi-cluster structure and avoid excessive concentration of attention mass.
261
+
262
+ [p. 19 | section: D.2.3. ABLATION STUDIES | type: Text]
263
+ Table 13 displays the detailed ablation studies of each component under two training settings. As can be observed, the RBF-based distance kernel used in Krause Attention already provides a substantial improvement over standard ViT-S, even in the absence of local interaction constraints or top-k sparsification. This observation holds consistently across both training settings, indicating that replacing dot-product similarity with distance-based aggregation constitutes a strong inductive bias on its own.
264
+
265
+ [p. 19 | section: D.2.3. ABLATION STUDIES | type: Text]
266
+ When models are trained without Mixup and CutMix, introducing either local interaction or top-k selection leads to additional accuracy gains, and combining both mechanisms yields the best overall performance. This suggests that, in the absence of heavy data mixing, selective and localized interactions help the model focus on semantically coherent token groups and reinforce meaningful cluster formation.
267
+
268
+ [p. 19 | section: D.2.3. ABLATION STUDIES | type: Text]
269
+ In contrast, when Mixup and CutMix are applied, the benefit of top-k selection becomes less pronounced. Since these augmentations partially mix image patches from different categories, the resulting token representations are less semantically localized, making selective interactions harder to exploit for improved discrimination. In this regime, enforcing top-k sparsity mainly contributes to computational efficiency rather than accuracy, while local interaction remains beneficial for both performance and FLOP reduction.
270
+
271
+ [p. 19 | section: D.2.3. ABLATION STUDIES | type: Text]
272
+ Across all ablation settings, KViTs maintain essentially identical parameter counts to ViT-S. Enforcing local interaction consistently reduces FLOPs, and even the dense variant of Krause Attention remains more efficient than standard dot-product attention. Overall, these ablations disentangle the roles of the RBF kernel, locality, and selective sparsity, and highlight how their contributions interact with data augmentation strategies in shaping both performance and efficiency.
273
+
274
+ [p. 19 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: Text]
275
+ SwinTransformers incorporate strong locality priors through hierarchical, window-based self-attention and represent a widely adopted alternative to global Vision Transformers for image recognition (Liu et al., 2021). To isolate the contribution of the proposed bounded-confidence interaction dynamics from architectural locality alone, we replace the vanilla attention modules in Swin-T and Swin-S with Krause Attention and compare the resulting models (KSwin-T and KSwin-S) against their original counterparts under identical training protocols.
276
+
277
+ [p. 19 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: Text]
278
+ Since SwinTransformers already operate on small fixed windows, we do not introduce additional locality constraints or
279
+
280
+ [p. 20 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: Caption]
281
+ Table 11. Implementation details of finetuning with LoRA, where QK, KK, VK, O K is the parameter set of Krause Attention.
282
+
283
+ [p. 20 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: TableGroup]
284
+ Hyperparameters Value LoRA Rank (r) 16 LoRA Alpha (α) 32 LoRA Dropout 0.1 (Krause) Sigma σ 5.5 Target Modules Q, K, V, O projs, Gate, Up, Down projs (Krause) Target Modules Q, K, V, O projs, Gate, Up, Down projs, QK, KK, VK, OK projs Task Type Causal LM Optimizer AdamW Learning Rate (LR) 5 · 10−5 LR Scheduler cosine Weight Decay 0.01 Warmup Ratio 0.1 Total Epochs 2 Per-device Batch Size 4 Gradient Accumulation 8 Mixed Precision BF16 Max Gradient Norm 1.0 Gradient Checkpointing ✓ GPUs 4xH100 Dataloader Workers 8 Logging Steps 5 Evaluation Strategy steps (150) Table 12. Image classification results on Fashion-MNIST.
285
+
286
+ [p. 20 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: Table]
287
+ Models Accuracy (%) # Parameters FLOPs ViT-T 93.47 5,353,738 0.28G KViT-T 94.65 5,353,750 0.19G ViT-S 94.11 21,324,298 1.09G KViT-S 94.71 21,324,310 0.73G ViT-B 95.74 85,115,914 4.30G KViT-B 96.10 85,115,926 2.89G
288
+
289
+ [p. 20 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: Text]
290
+ top-k sparsification in this setting. Instead, we focus exclusively on replacing dot-product similarity with the RBF-based distance kernel that underlies Krause Attention. This allows us to evaluate whether the bounded-confidence interaction rule provides benefits beyond the locality bias already present in Swin architectures.
291
+
292
+ [p. 20 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: Text]
293
+ As shown in Table 14, Krause SwinTransformers consistently outperform their vanilla counterparts on CIFAR-10 across both model scales. Despite nearly identical parameter counts, KSwin-T and KSwin-S achieve higher classification accuracy while substantially reducing FLOPs. The efficiency gain arises from the kernel formulation itself. Compared to dot-product attention, the RBF-based distance kernel decomposes into simpler element-wise operations and avoids expensive inner products and softmax normalization, leading to lower computational cost even within identical attention windows. These results indicate that the performance gains of Krause Attention do not arise merely from enforcing locality, but from the distance-based interaction dynamics themselves.
294
+
295
+ [p. 20 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: Text]
296
+ Notably, even in architectures where local attention is already hard-coded, replacing similarity-based aggregation with bounded-confidence, distance-aware interactions improves both efficiency and predictive performance. This further supports the view that Krause Attention introduces a complementary and robust inductive bias that extends beyond architectural locality, reinforcing its general applicability across different Transformer backbones.
297
+
298
+ [p. 21 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: FigureGroup]
299
+ Figure 8. Qualitative comparisons of Krause Attention and standard attention on ImageNet-1K.
300
+
301
+ [p. 21 | section: D.2.4. KRAUSE SWINTRANSFORMERS | type: TableGroup]
302
+ Table 13. Image classification results under ablation studies on CIFAR-10. Models w/o Mixup & Cutmix w/ Mixup & Cutmix Accuracy(%) # Parameters FLOPs Accuracy(%) # Parameters FLOPs KViT-S (w/o top-k) 91.85 21,342,358 0.97G 95.59 21,342,358 0.97G KViT-S (w/o local) 87.55 21,342,358 1.41G 93.96 21,342,358 1.41G KViT-S (w/o local&top-k) 87.28 21,342,358 1.41G 94.06 21,342,358 1.41G KViT-S 92.16 21,342,358 0.97G 95.20 21,342,358 0.97G ViT-S 82.33 21,342,346 1.43G 93.33 21,342,346 1.43G
303
+
304
+ [p. 21 | section: D.3.1. QUALITATIVE EXAMPLES | type: Text]
305
+ We evaluate conditional generation through image completion on MNIST and CIFAR-10, where the top half of each image is masked. The model is conditioned on the visible portion and tasked with generating the remaining pixels. Representative results are shown in Fig. 10. For each input image, five completion samples are produced. These results demonstrate that KARMs generate coherent and diverse completions consistent with the underlying digit/object structure.
306
+
307
+ [p. 21 | section: D.3.2. ABLATION STUDIES | type: Text]
308
+ Varying Window Size and Sparsity Level. As shown in Table 15, reducing the local window size substantially increases inference throughput, with only a mild gradual degradation in generation quality. This behavior highlights a controllable trade-off between efficiency and fidelity in KARMs: smaller windows yield faster generation at the cost of slightly higher BPD, allowing practitioners to adjust the window and top-k parameters according to application-specific requirements.
309
+
310
+ [p. 21 | section: D.3.2. ABLATION STUDIES | type: Text]
311
+ Varying Individual Components. Table 16 presents an ablation study that isolates the effects of the local window and top-k selection in KARMs. Removing the local window improves the likelihood estimation but substantially increases per-step computation, resulting in slower overall generation. In contrast, disabling top-k selection increases throughput by eliminating the selection overhead, but leads to degraded generation quality. Notably, the lowest BPD is achieved when both locality and sparsity constraints are removed, highlighting the intrinsic modeling strength of the underlying distance-based Krause Attention itself. This observation is consistent with the behavior observed in the Krause SwinTransformer experiments.
312
+
313
+ [p. 21 | section: D.3.2. ABLATION STUDIES | type: Text]
314
+ Taken together, these results demonstrate that Krause Attention provides a modular and flexible design, where locality and sparsity can be selectively enabled to trade off performance and efficiency under different resource constraints.
315
+
316
+ [p. 22 | section: D.3.2. ABLATION STUDIES | type: FigureGroup]
317
+ Figure 9. Evolution of attention scores across layers in KViTs/ViTs. Krause Attention (left) achieves stable multi-cluster formation, while standard attention (right) progressively converges to a single global consensus.
318
+
319
+ [p. 22 | section: D.3.2. ABLATION STUDIES | type: TableGroup]
320
+ Table 14. Image classification results of Krause SwinTransformer on CIFAR-10. Models Accuracy(%) # Parameters FLOPs Swin-T 90.73 27,510,484 0.20G KSwin-T 91.09 27,510,622 0.09G Swin-S 90.21 48,811,108 0.38G KSwin-S 91.13 48,811,390 0.18G
321
+
322
+ [p. 22 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: Text]
323
+ Qwen1.5-7B is an intermediate release in the Qwen family and shares the same architectural configuration and tokenizer as Qwen2 (Yang et al., 2024a) . We evaluate Krause-Qwen1.5-7B under a zero-shot setting across the same language understanding and reasoning benchmarks used in the main text, while systematically varying the key Krause Attention hyperparameters, namely the local window size and top-k sparsity level. The results under different parameter configurations are summarized in Table 17. Overall, Krause-Qwen1.5-7B consistently matches or improves upon the LoRA-finetuned baseline across a wide range of tasks, including BoolQ, PIQA, MNLI, ANLI, MMLU-Pro, and IFEval. Performance remains robust across different window and sparsity settings, indicating that Krause Attention is not overly sensitive to precise hyperparameter choices. Notably, on the ANLI benchmarks, LoRA finetuning of Qwen1.5-7B often leads to performance degradation. Krause-Qwen1.5-7B shows a similar trend, but with noticeably reduced degradation, indicating improved robustness of Krause attention even under potentially harmful finetuning.
324
+
325
+ [p. 22 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: Text]
326
+ Beyond task performance, Krause-Qwen1.5-7B also exhibits improved attention dynamics. Consistent with observations for Krause-Llama-3-8B, Krause-Qwen1.5-7B substantially alleviates attention sink behavior. As illustrated in Fig. 11, the vanilla Qwen1.5-7B model begins to show pronounced attention concentration on the first token starting around layer 9. In contrast, Krause-Qwen1.5-7B suppresses this collapse at comparable depths, maintaining stable and well-distributed attention scores. While recent methods such as gated attention (Qiu et al., 2025) explicitly modify attention parameterization
327
+
328
+ [p. 23 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: FigureGroup]
329
+ Figure 10. Samples completed by KARMs on MNIST (left) and CIFAR-10 (right).
330
+
331
+ [p. 23 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: Caption]
332
+ Table 15. Image generation performance of KARMs on MNIST with varying window sizes and sparsity levels. Inference throughput is measured on a single NVIDIA H100 GPU. Window and sparsity settings are reported as window size/top-k.
333
+
334
+ [p. 23 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: TableGroup]
335
+ Models BPD (↓) Images/sec (↑) KARM(128/96) 0.5652 105.6037 KARM(96/64) 0.5704 127.3919 KARM(64/48) 0.5853 169.5279 Table 16. Ablation study of KARMs on MNIST image generation. Inference throughput is measured on a single NVIDIA H100 GPU.
336
+
337
+ [p. 23 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: Table]
338
+ Models BPD (↓) Images/sec (↑) KARM 0.5652 105.6037 KARM(w/o window) 0.5635 42.6981 KARM(w/o top-k) 0.5674 127.7694 KARM(w/o window&top-k) 0.5593 45.9693
339
+
340
+ [p. 23 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: Text]
341
+ to address attention sinks, Krause Attention approaches the problem from a complementary perspective by reshaping the underlying interaction dynamics through bounded-confidence synchronization.
342
+
343
+ [p. 23 | section: D.4.1. RESULTS ON KRAUSE-QWEN | type: FigureGroup]
344
+ Figure 11. First token attention scores of Krause-Qwen1.5-7B (right) and Qwen1.5-7B (left).
345
+
346
+ [p. 23 | section: D.4.2. EXTENDED RESULTS ON KRAUSE-LLAMA | type: Text]
347
+ In addition to the results reported in the main text, we further evaluate the zero-shot performance of Krause-Llama3-8B under a range of Krause Attention configurations. Specifically, we vary the local window size and top-k sparsity level while keeping all other settings fixed. The results are summarized in Table 18. Across a broad set of language understanding and reasoning benchmarks, Krause-Llama3-8B consistently outperforms the LoRA-finetuned Llama3-8B baseline, while exhibiting strong robustness across different parameter choices. Performance gains are particularly pronounced on challenging benchmarks such as ANLI and MNLI, where Krause Attention yields higher accuracy and macro-F1 scores. These results further demonstrate that the benefits of Krause Attention extend beyond a single configuration and are stable across a range of sparsity and locality hyperparameters. We also provide two examples of IFEval responses which are produced by Llama3-8B (finetuned w/ LoRA) and Krause-Llama3-8B in Table 21 at the end of Appendix.
348
+
349
+ [p. 24 | section: D.4.2. EXTENDED RESULTS ON KRAUSE-LLAMA | type: TableGroup]
350
+ Table 17. Language understanding and reasoning results of Krause-Qwen1.5-7B. NLI results are reported in Acc. / Macro-F1, while other tasks are reported in Acc. (%). Parameter settings are reported in window size/top-k. Models BoolQ CB PIQA MNLI ANLI-R1 ANLI-R2 ANLI-R3 MMLU-Pro IFEval Qwen1.5-7B Qwen1.5-7B (finetuned w/ LoRA) 84.08 87.50/78.57 72.45 82.73/81.47 40.40/37.34 38.70/36.45 41.33/38.87 83.80 76.79/53.73 49.57 57.10/46.36 47.30/38.84 45.40/39.14 46.67/38.05 30.56 36.11 26.99 26.43 Krause-Qwen1.5-7B (48/24) Krause-Qwen1.5-7B (32/16) Krause-Qwen1.5-7B (18/12) 84.66 87.50/78.62 73.42 83.83/82.82 40.90/37.94 38.20/36.18 41.17/38.99 84.78 85.71/70.09 73.70 83.49/82.41 41.30/37.88 39.80/37.25 41.17/38.62 84.63 85.71/70.06 72.66 83.40/82.27 41.20/37.64 39.10/36.50 42.33/39.59 34.72 37.50 37.50 31.79 32.16 32.17 Table 18. Language understanding and reasoning results of Krause-Llama3-8B. NLI results are reported in Acc. / Macro-F1, while other tasks are reported in Acc. (%). Parameter settings are reported in window size/top-k.
351
+
352
+ [p. 24 | section: D.4.2. EXTENDED RESULTS ON KRAUSE-LLAMA | type: Table]
353
+ Models BoolQ CB PIQA MNLI ANLI-R1 ANLI-R2 ANLI-R3 MMLU-Pro IFEval Llama3-8B 76.13 41.07/19.41 51.52 35.45/18.11 33.40/16.69 33.40/16.69 33.50/17.04 37.50 22.18 Llama3-8B (finetuned w/ LoRA) 80.41 60.71/47.81 75.16 59.53/55.29 38.70/30.62 39.90/33.37 44.92/39.57 Krause-Llama3-8B (48/24) 82.89 66.07/49.60 77.61 62.85/56.91 43.20/39.03 41.30/37.90 45.83/42.41 41.67 40.28 32.72 36.60 Krause-Llama3-8B (32/16) 80.59 64.29/48.04 77.77 63.27/53.72 40.30/33.01 40.50/34.27 45.67/39.84 41.67 34.01 Krause-Llama3-8B (18/12) 82.12 66.07/47.36 77.50 62.00/54.85 39.50/32.25 40.30/34.03 44.83/39.64 38.89 34.57
354
+
355
+ [p. 24 | section: D.4.3. INFERENCE SPEED COMPARISON | type: Text]
356
+ We benchmark the inference throughput of standard LLMs and Krause-LLMs on the BoolQ dataset. As can be observed in Table 19, due to the introduction of a parallel Krause Attention pathway and shortcut connections during inference, Krause-LLMs incur a modest overhead compared to their backbone models. Nevertheless, the observed throughput reduction is minimal, and Krause-LLMs maintain inference speeds that are highly comparable to both the base and LoRA-finetuned baselines across all configurations. These results indicate that the proposed attention modification introduces negligible runtime overhead while delivering consistent performance gains in language understanding tasks.
357
+
358
+ [p. 24 | section: D.4.3. INFERENCE SPEED COMPARISON | type: TableGroup]
359
+ Table 19. Inference speed of LLMs and Krause-LLMs on BoolQ. All results are measured on a single NVIDIA H100 GPU. Models Tokens/sec Models Tokens/sec Llama3-8B 110.99 Qwen1.5-7B 110.40 Llama3-8B (LoRA) 110.88 Qwen1.5-7B (LoRA) 109.87 Krause-Llama3-8B (48/24) 110.10 Krause-Qwen1.5-7B (48/24) 109.72 Krause-Llama3-8B (32/16) 110.26 Krause-Qwen1.5-7B (32/16) 109.87 Krause-Llama3-8B (18/12) 110.64 Krause-Qwen1.5-7B (18/12) 109.84
360
+
361
+ [p. 24 | section: E. Theoretical Perspective on Attention Sink Phenomena | type: Text]
362
+ Recent empirical studies have documented the emergence of attention sinks in deep Transformer models (Xiao et al., 2024; Qiu et al., 2025) , in which a small subset of tokens persistently accumulates a disproportionate fraction of attention mass across layers and heads. While this phenomenon is typically characterized at the level of attention distributions, it admits a natural interpretation from the dynamical systems viewpoint of self-attention. In particular, attention sinks can be viewed as a structural consequence of the global synchronization dynamics induced by standard self-attention.
363
+
364
+ [p. 24 | section: E. Theoretical Perspective on Attention Sink Phenomena | type: Text]
365
+ Prior theoretical analyses (Geshkovski et al., 2023; Chen et al., 2025) model self-attention as a globally coupled interacting particle system, where token representations evolve under repeated, globally normalized aggregation. Under dot-product attention, this interaction induces a contraction in representation space. In the mean-field limit and as depth increases, the system exhibits a strong tendency toward global synchronization, with token representations converging toward a single dominant mode. This contraction progressively reduces representational diversity and can lead to representational collapse.
366
+
367
+ [p. 24 | section: E. Theoretical Perspective on Attention Sink Phenomena | type: Text]
368
+ From this viewpoint, attention sinks arise as a concrete manifestation of the same collapse dynamics. Global softmax
369
+
370
+ [p. 25 | section: E. Theoretical Perspective on Attention Sink Phenomena | type: Text]
371
+ normalization amplifies small initial asymmetries among tokens, which may originate from positional bias, frequency effects, or stochastic initialization. Tokens that receive slightly higher attention early on become increasingly influential, shaping subsequent representations and reinforcing their dominance through repeated attention updates. This positive feedback loop translates representation synchronization into extreme concentration of attention mass, yielding a small number of persistent sink tokens. Attention sinks therefore do not constitute an independent mechanism, but emerge naturally from the global coupling and contraction dynamics of standard self-attention.
372
+
373
+ [p. 25 | section: E. Theoretical Perspective on Attention Sink Phenomena | type: Text]
374
+ Krause Attention directly intervenes in this dynamical pathway. By replacing global dot-product similarity with boundedconfidence, distance-based interactions and enforcing locality and selective sparsity, Krause Attention prevents unrestricted global coupling. This modification breaks the positive feedback loop responsible for global synchronization and representation collapse, instead promoting stable multi-cluster dynamics. Consequently, no single token can dominate as a global attractor, providing a principled theoretical explanation for the empirically observed mitigation of attention sink behavior.
375
+
376
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Text]
377
+ The clustering behavior induced by Krause Attention is closely related to classical ideas from competitive learning and lateral interaction models developed in neuroscience and early machine learning. In competitive learning frameworks, neurons or units compete to represent inputs, leading to the emergence of a small number of dominant prototypes or clusters. Canonical examples include vector quantization and self-organizing maps (SOMs), where learning dynamics drive representations toward a partitioned organization of the input space (Gray, 1984; Rumelhart & Zipser, 1985; Kohonen, 2002) .
378
+
379
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Text]
380
+ In biological neural systems, such competition is often implemented through lateral interactions within a layer. A well-studied mechanism is the Mexican-hat interaction pattern, which combines short-range excitation with longer-range inhibition (Amari, 1977; Ermentrout, 1998) . At a high level, the activity u i of a neuron can be described by a lateral interaction rule:
381
+
382
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Equation]
383
+ u_i \leftarrow \sum_j K(d_{i,j}) u_j \tag{35}
384
+
385
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Text]
386
+ where di,j denotes the distance in feature or cortical space, and the kernel K(·) is positive at short range and negative at long range. This center–surround structure causes nearby neurons to synchronize and form local clusters, while distant neurons are suppressed. As a result, lateral inhibition prevents unrestricted global synchronization and instead promotes specialization, diversity, and the emergence of multiple stable activity patterns (Grossberg, 1988) .
387
+
388
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Text]
389
+ Krause bounded-confidence dynamics (Krause et al., 2000; Hegselmann & Krause, 2002) can be viewed as a simplified and analytically tractable abstraction of this principle. Rather than explicitly introducing inhibitory interactions, Krause models restrict interactions to agents within a confidence radius. Agents synchronize locally with nearby neighbors, while long-range interactions are entirely removed. This implicit form of inhibition is sufficient to prevent global consensus and leads instead to robust multi-cluster equilibria, a hallmark of competitive self-organization in interacting particle systems.
390
+
391
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Text]
392
+ Krause Attention instantiates this idea within Transformer architectures. Instead of computing attention weights from global query–key similarity, it defines interactions through query–key distances and restricts aggregation to a bounded local neighborhood. In its simplest form, Krause Attention computes
393
+
394
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Equation]
395
+ a_{i,j} \propto \exp(-||Qx_k - Kx_j||^2), \quad j \in \xi_i, \tag{36}
396
+
397
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Text]
398
+ followed by normalization over a local support ξ i and optional top-k selection. Nearby tokens reinforce one another through smooth, distance-aware coupling, while distant tokens are excluded from interaction altogether.
399
+
400
+ [p. 25 | section: F. Relation to Competitive Learning and Lateral Interaction Dynamics | type: Text]
401
+ Although Krause Attention does not introduce explicit repulsive forces as in classical Mexican-hat kernels, it implements an implicit form of lateral inhibition. Long-range interactions are suppressed through bounded-confidence constraints, while normalization and sparsity restrict competition to local neighborhoods. As a result, distant tokens neither attract nor compete with one another, which functionally mirrors the role of inhibition in preventing global synchronization in lateral-interaction models. From a dynamical viewpoint, each Krause Attention layer performs a single discrete update of a bounded-confidence interaction process, and stacking layers corresponds to iterating a competitive self-organization dynamic, analogous to repeatedly equilibrating lateral interactions in classical competitive learning systems. This interpretation aligns naturally with recent views of Transformers as interacting particle systems (Geshkovski et al., 2023; Chen et al., 2025) and clarifies why Krause Attention supports stable multi-cluster equilibria rather than collapsing toward a single global mode.
402
+
403
+ [p. 26 | section: Krause Synchronization Transformers | type: Text]
404
+ These observations situate Krause Attention within a broader class of competitive self-organization models. Analogous dynamics appear in lateral-interaction systems, where local excitation and long-range inhibition yield multi-cluster equilibria. Krause Attention captures this principle in an abstract form, realizing inhibitory effects implicitly through boundedconfidence, distance-based interactions rather than explicit repulsion.
405
+
406
+ [p. 26 | section: Krause Synchronization Transformers | type: Text]
407
+ This perspective also provides insight into the mitigation of attention sink phenomena. Attention sinks can be viewed as an extreme outcome of unrestricted global competition under repeated globally normalized attention updates (Xiao et al., 2024) . In contrast, Krause Attention confines competition to local neighborhoods, preventing any single token from emerging as a global attractor. In this sense, bounded-confidence dynamics play a stabilizing role analogous to lateral inhibition in competitive learning, preserving representational diversity while enabling structured and coherent coordination.
408
+
409
+ [p. 27 | section: BoolQ Prompt Example | type: Text]
410
+ Passage: Ethanol fuel – All biomass goes through at least some of these steps: it needs to be grown, collected, dried, fermented, distilled, and burned. All of these steps require resources and an infrastructure. The total amount of energy input into the process compared to the energy released by burning the resulting ethanol fuel is known as the energy balance (or "energy returned on energy invested"). Figures compiled in a 2007 report by National Geographic Magazine point to modest results for corn ethanol produced in the US: one unit of fossil-fuel energy is required to create 1.3 energy units from the resulting ethanol. The energy balance for sugarcane ethanol produced in Brazil is more favorable, with one unit of fossil-fuel energy required to create 8 from the ethanol. Energy balance estimates are not easily produced, thus numerous such reports have been generated that are contradictory. For instance, a separate survey reports that production of ethanol from sugarcane, which requires a tropical climate to grow productively, returns from 8 to 9 units of energy for each unit expended, as compared to corn, which only returns about 1.34 units of fuel energy for each unit of energy expended. A 2006 University of California Berkeley study, after analyzing six separate studies, concluded that producing ethanol from corn uses much less petroleum than producing gasoline.
411
+
412
+ [p. 27 | section: BoolQ Prompt Example | type: Text]
413
+ Question: does ethanol take more energy make that produces?
414
+
415
+ [p. 27 | section: BoolQ Prompt Example | type: Text]
416
+ ### Response: [Model predicts: Yes/No]
417
+
418
+ [p. 27 | section: NLI Prompt Example | type: Text]
419
+ Premise: Valence the void-brain, Valence the virtuous valet. Why couldn't the figger choose his own portion of titanic anatomy to shaft? Did he think he was helping?
420
+
421
+ [p. 27 | section: NLI Prompt Example | type: Text]
422
+ Hypothesis: Valence was helping A. entailment B. contradiction C. neutral
423
+
424
+ [p. 27 | section: NLI Prompt Example | type: Text]
425
+ ### Response: [Model predicts token: A, B, or C]
426
+
427
+ [p. 27 | section: PIQA Prompt Example | type: Text]
428
+ Question: How do I ready a guinea pig cage for it's new occupants?
429
+
430
+ [p. 27 | section: PIQA Prompt Example | type: Text]
431
+ A. Provide the guinea pig with a cage full of a few inches of bedding made of ripped paper strips, you will also need to supply it with a water bottle and a food dish.
432
+
433
+ [p. 27 | section: PIQA Prompt Example | type: Text]
434
+ B. Provide the guinea pig with a cage full of a few inches of bedding made of ripped jeans material, you will also need to supply it with a water bottle and a food dish.
435
+
436
+ [p. 27 | section: PIQA Prompt Example | type: Text]
437
+ ### Response: [Model predicts token: A, or B]
438
+
439
+ [p. 27 | section: MMLU-Pro Prompt Example | type: Text]
440
+ Question: The symmetric group S n has n elements, hence it is not true that S 10 has 10 elements. Find the characteristic of the ring 2Z.
441
+
442
+ [p. 27 | section: MMLU-Pro Prompt Example | type: Text]
443
+ Choices:
444
+
445
+ [p. 27 | section: MMLU-Pro Prompt Example | type: Text]
446
+ A. 0 B. 30 C. 3 D. 10 E. 12 F. 50 G. 2 H. 100 I. 20 J. 5
447
+
448
+ [p. 27 | section: MMLU-Pro Prompt Example | type: Text]
449
+ ### Response: [Model predicts token: A-J]
450
+
451
+ [p. 27 | section: IFEval Prompt Example | type: Text]
452
+ Given the sentence "Two young boys with toy guns and horns." can you ask a question? Please ensure that your response is in English, and in all lowercase letters. No capital letters are allowed.
453
+
454
+ [p. 27 | section: IFEval Prompt Example | type: Text]
455
+ ### Response: [Model generates free-form text response]
456
+
457
+ [p. 27 | section: IFEval Prompt Example | type: Text]
458
+ Table 20. Prompt examples used for Krause-LLMs and LLMs.
459
+
460
+ [p. 28 | section: IFEval Answers | type: Text]
461
+ Instruction: Write a casual blog post about how the outer solar system is different from the inner solar system, and what that means for the possibility of life. Wrap your entire response with double quotation marks. Your response should contain 17 or more sentences.
462
+
463
+ [p. 28 | section: IFEval Answers | type: Text]
464
+ Llama3-8B (finetuned w/ LoRA) Response: "The outer solar system has different characteristics than the inner solar system. The inner solar system has a small mass, but the outer solar system has a large mass. The inner solar system has short periods, but the outer solar system has long periods. The inner solar system has small eccentricities, but the outer solar system has large eccentricities. The inner solar system has small inclinations, but the outer solar system has large inclinations. The inner solar system has low densities, but the outer solar system has high densities. The inner solar system has small semimajor axes, but the outer solar system has large semimajor axes. The inner solar system has small semiminor axes, but the outer solar system has large semiminor axes. The inner solar system has small mean anomalies, but the outer solar system has large mean anomalies. The inner solar system has small longitudes of the ascending nodes, but the outer solar system has large longitudes of the ascending nodes. The inner solar system has small arguments of perihelion, but the outer solar system has large arguments of perihelion. The inner solar system has small periods of perihelion, but the outer solar system has large periods of perihelion. The inner solar system has small semiaxes of perihelion, but the outer solar system has large semiaxes of perihelion. The inner solar system has small semiaxes of aphelion, but the outer solar system has large semiaxes of aphelion. The inner solar system has small periods of aphelion, but the outer solar system has large periods of aphelion. The inner solar system has small semimajor axes of perihelion, but the outer solar system has large semimajor axes of perihelion. The inner solar system has small semimajor axes of aphelion, but the outer solar system has large semimajor axes of aphelion. The inner solar system has small semiminor axes of perihelion, but the outer solar system has large semiminor axes of perihelion. The inner solar system has small semiminor axes of aphelion, but the outer solar system has large semiminor axes of aphelion. The inner solar system has small mean anomalies of perihelion, but the outer solar system has large mean anomalies of perihelion. The inner solar system has small mean anomalies of aphelion, but the outer solar system"
465
+
466
+ [p. 28 | section: IFEval Answers | type: Text]
467
+ Krause-Llama3-8B Response: "The outer solar system is different from the inner solar system in that it has a much greater abundance of ices. This is due to the fact that the outer solar system is much farther away from the Sun. The outer solar system also has a much greater abundance of volatile materials, which are materials that can easily be vaporized or melted. This is due to the fact that the outer solar system is much colder than the inner solar system. The possibility of life in the outer solar system is much greater than in the inner solar system. This is due to the fact that the outer solar system has a much greater abundance of ices and volatile materials. These materials are necessary for life as we know it. The outer solar system is also much colder than the inner solar system, which makes it much more hospitable for life. The outer solar system is also much farther away from the Sun, which means that there is much less radiation from the Sun. This makes it much safer for life. Overall, the outer solar system is a much more hospitable place for life than the inner solar system. This is due to the fact that it has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The possibility of life in the outer solar system is much greater than in the inner solar system. This is due to the fact that the outer solar system has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The outer solar system is also much more hospitable for life than the inner solar system. This is due to the fact that it has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The possibility of life in the outer solar system is much greater than in the inner solar system. This is due to the fact that the outer solar system has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The outer solar system is also much more hospitable for life than the inner solar system. This is due to the fact that it has a much greater abundance of ices and volatile materials, and it is much colder and farther away from the Sun. This makes it much safer for life. The possibility of life in the outer solar system is much greater than"
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets.json ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "filename": "_page_0_Picture_22.jpeg",
4
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_0_Picture_22.jpeg",
5
+ "bytes": 42769,
6
+ "width": 587,
7
+ "height": 447,
8
+ "aspect_ratio": 1.313199105145414,
9
+ "keep": true,
10
+ "reject_reason": null,
11
+ "model_path": "assets/_page_0_Picture_22.jpeg"
12
+ },
13
+ {
14
+ "filename": "_page_20_Figure_1.jpeg",
15
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_20_Figure_1.jpeg",
16
+ "bytes": 109234,
17
+ "width": 1337,
18
+ "height": 524,
19
+ "aspect_ratio": 2.551526717557252,
20
+ "keep": true,
21
+ "reject_reason": null,
22
+ "model_path": "assets/_page_20_Figure_1.jpeg"
23
+ },
24
+ {
25
+ "filename": "_page_21_Figure_1.jpeg",
26
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_21_Figure_1.jpeg",
27
+ "bytes": 137564,
28
+ "width": 1403,
29
+ "height": 798,
30
+ "aspect_ratio": 1.7581453634085213,
31
+ "keep": true,
32
+ "reject_reason": null,
33
+ "model_path": "assets/_page_21_Figure_1.jpeg"
34
+ },
35
+ {
36
+ "filename": "_page_22_Figure_1.jpeg",
37
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_22_Figure_1.jpeg",
38
+ "bytes": 71462,
39
+ "width": 1225,
40
+ "height": 367,
41
+ "aspect_ratio": 3.337874659400545,
42
+ "keep": true,
43
+ "reject_reason": null,
44
+ "model_path": "assets/_page_22_Figure_1.jpeg"
45
+ },
46
+ {
47
+ "filename": "_page_22_Figure_8.jpeg",
48
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_22_Figure_8.jpeg",
49
+ "bytes": 62690,
50
+ "width": 1399,
51
+ "height": 473,
52
+ "aspect_ratio": 2.9577167019027484,
53
+ "keep": true,
54
+ "reject_reason": null,
55
+ "model_path": "assets/_page_22_Figure_8.jpeg"
56
+ },
57
+ {
58
+ "filename": "_page_3_Figure_2.jpeg",
59
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_3_Figure_2.jpeg",
60
+ "bytes": 63436,
61
+ "width": 1171,
62
+ "height": 435,
63
+ "aspect_ratio": 2.6919540229885057,
64
+ "keep": true,
65
+ "reject_reason": null,
66
+ "model_path": "assets/_page_3_Figure_2.jpeg"
67
+ },
68
+ {
69
+ "filename": "_page_6_Figure_7.jpeg",
70
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_6_Figure_7.jpeg",
71
+ "bytes": 28202,
72
+ "width": 549,
73
+ "height": 291,
74
+ "aspect_ratio": 1.8865979381443299,
75
+ "keep": true,
76
+ "reject_reason": null,
77
+ "model_path": "assets/_page_6_Figure_7.jpeg"
78
+ },
79
+ {
80
+ "filename": "_page_6_Picture_16.jpeg",
81
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_6_Picture_16.jpeg",
82
+ "bytes": 22089,
83
+ "width": 566,
84
+ "height": 217,
85
+ "aspect_ratio": 2.608294930875576,
86
+ "keep": true,
87
+ "reject_reason": null,
88
+ "model_path": "assets/_page_6_Picture_16.jpeg"
89
+ },
90
+ {
91
+ "filename": "_page_7_Figure_11.jpeg",
92
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_7_Figure_11.jpeg",
93
+ "bytes": 11580,
94
+ "width": 534,
95
+ "height": 195,
96
+ "aspect_ratio": 2.7384615384615385,
97
+ "keep": true,
98
+ "reject_reason": null,
99
+ "model_path": "assets/_page_7_Figure_11.jpeg"
100
+ },
101
+ {
102
+ "filename": "_page_7_Figure_18.jpeg",
103
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_7_Figure_18.jpeg",
104
+ "bytes": 21691,
105
+ "width": 579,
106
+ "height": 301,
107
+ "aspect_ratio": 1.9235880398671097,
108
+ "keep": true,
109
+ "reject_reason": null,
110
+ "model_path": "assets/_page_7_Figure_18.jpeg"
111
+ },
112
+ {
113
+ "filename": "_page_7_Figure_7.jpeg",
114
+ "path": "data/processed_papers/icml26_20260429_1952_duequeue/marker_raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_markdown/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/_page_7_Figure_7.jpeg",
115
+ "bytes": 32568,
116
+ "width": 578,
117
+ "height": 345,
118
+ "aspect_ratio": 1.6753623188405797,
119
+ "keep": true,
120
+ "reject_reason": null,
121
+ "model_path": "assets/_page_7_Figure_7.jpeg"
122
+ }
123
+ ]
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_0_Picture_22.jpeg ADDED

Git LFS Details

  • SHA256: b58b4d154aad149145fcf36c7afd1e64787d4a7bee47e144516125ddcce62daa
  • Pointer size: 130 Bytes
  • Size of remote file: 42.8 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_20_Figure_1.jpeg ADDED

Git LFS Details

  • SHA256: ce7e003e3908aa876975519ac4e638377ec1488ce42e171baf3e64d4c98b1ab9
  • Pointer size: 131 Bytes
  • Size of remote file: 109 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_21_Figure_1.jpeg ADDED

Git LFS Details

  • SHA256: 91ef34ac2ce6640239b542a3ecf3c6c4a18d455feccc87970589cb6dbbf79f66
  • Pointer size: 131 Bytes
  • Size of remote file: 138 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_22_Figure_1.jpeg ADDED

Git LFS Details

  • SHA256: 664a768abbaeb6a786dc75ef7f6459df45beb4138c97e1793b09bb64d3fc0caf
  • Pointer size: 130 Bytes
  • Size of remote file: 71.5 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_22_Figure_8.jpeg ADDED

Git LFS Details

  • SHA256: 028d760059284ea3a38d5393d6b628be513badb3a7ef25ab73fbb9feaf6efaaf
  • Pointer size: 130 Bytes
  • Size of remote file: 62.7 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_3_Figure_2.jpeg ADDED

Git LFS Details

  • SHA256: 50bfa81e9dc993cce85bc35b08e56e59ae27fcc5fc631ca44f4d1b5e707b4aa7
  • Pointer size: 130 Bytes
  • Size of remote file: 63.4 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_6_Figure_7.jpeg ADDED

Git LFS Details

  • SHA256: be55c80203b5c147a76988a1cb94d0e882edb2065d33bf1a4b823af69bcd1295
  • Pointer size: 130 Bytes
  • Size of remote file: 28.2 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_6_Picture_16.jpeg ADDED

Git LFS Details

  • SHA256: eeae0c9040a8f5f0fa2cdd52620a0c4c0ecc13c93602caa8ed732aeca110906f
  • Pointer size: 130 Bytes
  • Size of remote file: 22.1 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_7_Figure_11.jpeg ADDED

Git LFS Details

  • SHA256: dc844b52744ddbe5fee6bc2a8d77839c9a43519c156af9299279fbbd1d52ed77
  • Pointer size: 130 Bytes
  • Size of remote file: 11.6 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_7_Figure_18.jpeg ADDED

Git LFS Details

  • SHA256: 685cc0bc53fdff1abf2f8f16f096b614d979834872bfd4256716a3a014bda084
  • Pointer size: 130 Bytes
  • Size of remote file: 21.7 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/assets/_page_7_Figure_7.jpeg ADDED

Git LFS Details

  • SHA256: 38df1159d8b24f52be90abbc54875111e3d5aee2531aa96244c00b151777cac7
  • Pointer size: 130 Bytes
  • Size of remote file: 32.6 kB
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/chunks_v3_anonymized.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/dataset_meta.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7",
3
+ "pipeline": "Paper2Markdown-V3",
4
+ "ok": true,
5
+ "page_count": 28,
6
+ "chunk_count": 286,
7
+ "main_body_chunk_count": 118,
8
+ "appendix_chunk_count": 156,
9
+ "reference_chunk_count": 12,
10
+ "model_text_chars": 44118,
11
+ "raw_markdown_chars": 133833,
12
+ "sanitized_chars": 116859,
13
+ "page_provenance": {
14
+ "min_page": 1,
15
+ "max_page": 28,
16
+ "invalid_count": 0
17
+ },
18
+ "marker_block_type_counts": {
19
+ "Caption": 6,
20
+ "Code": 1,
21
+ "Equation": 40,
22
+ "FigureGroup": 9,
23
+ "Footnote": 2,
24
+ "ListGroup": 10,
25
+ "PageFooter": 29,
26
+ "PageHeader": 53,
27
+ "PictureGroup": 2,
28
+ "SectionHeader": 69,
29
+ "Table": 5,
30
+ "TableGroup": 12,
31
+ "TableOfContents": 1,
32
+ "Text": 643
33
+ },
34
+ "asset_count_raw": 11,
35
+ "asset_count_model_kept": 11,
36
+ "asset_count_rejected": 0,
37
+ "asset_reject_reasons": {
38
+ "kept": 11
39
+ },
40
+ "artifact_leak_audit": {
41
+ "ok": true,
42
+ "hits": {
43
+ "Anonymous Authors": [],
44
+ "ACKNOWLEDGMENT": [],
45
+ "OpenReview": [],
46
+ "\"accept_label\"": [],
47
+ "\"decision\"": [],
48
+ "\"decision_tier\"": [],
49
+ "\"source_status\"": [],
50
+ "Meta-review": [],
51
+ "Official Review": [],
52
+ "official_reviews": [],
53
+ "meta_reviews": [],
54
+ "suggested_verdict_score": []
55
+ },
56
+ "artifact_count": 2
57
+ },
58
+ "default_model_input": "model_text_v3.txt",
59
+ "appendix_input": "appendix_text_v3.txt",
60
+ "reference_input": "reference_text_v3.txt",
61
+ "source": "koala_icml26_due_queue",
62
+ "run_name": "icml26_20260429_1952_duequeue"
63
+ }
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/main_body_chunks.jsonl ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0000", "section": "Abstract", "page_start": 1, "page_end": 1, "type": "Text", "text": "Self-attention in Transformers relies on globally normalized softmax weights, causing all tokens to compete for influence at every layer. When composed across depth, this interaction pattern induces strong synchronization dynamics that favor convergence toward a dominant mode, a behavior associated with representation collapse and attention sink phenomena. We introduce Krause Attention, a principled attention mechanism inspired by bounded-confidence consensus dynamics. Krause Attention replaces similaritybased global aggregation with distance-based, localized, and selectively sparse interactions, promoting structured local synchronization instead of global mixing. We relate this behavior to recent theory modeling Transformer dynamics as interacting particle systems, and show how bounded-confidence interactions naturally moderate attention concentration and alleviate attention sinks. Restricting interactions to local neighborhoods also reduces runtime complexity from quadratic to linear in sequence length. Experiments across vision (ViT on CIFAR/ImageNet), autoregressive generation (MNIST/CIFAR-10), and large language models (Llama/Qwen) demonstrate consistent gains with substantially reduced computation, highlighting bounded-confidence dynamics as a scalable and effective inductive bias for attention. Project page is available at io/krause-sync-transformers/ .", "source": "marker_v2", "marker_block_id": "/page/0/Text/17"}
2
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0001", "section": "1. Introduction", "page_start": 1, "page_end": 1, "type": "Text", "text": "Transformers (Vaswani et al., 2017) have become the de facto architecture across a wide range of domains, including vision, language, and generative modeling. Their success is", "source": "marker_v2", "marker_block_id": "/page/0/Text/19"}
3
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0002", "section": "1. Introduction", "page_start": 1, "page_end": 1, "type": "PictureGroup", "text": "Figure 1. Krause Attention, grounded in bounded-confidence interactions, promotes localized multi-cluster synchronization (top). In contrast, standard self-attention tends to induce globally coupled dynamics that concentrate attention onto a dominant mode, often manifesting as attention sinks (Xiao et al., 2024) (bottom).", "source": "marker_v2", "marker_block_id": "/page/0/PictureGroup/449"}
4
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0003", "section": "1. Introduction", "page_start": 1, "page_end": 1, "type": "Text", "text": "primarily attributed to the self-attention mechanism, which enables flexible, content-dependent interactions among tokens. Despite their immense success, it remains an open question to deeply understand why self-attention is so effective, and how its design can be systematically improved.", "source": "marker_v2", "marker_block_id": "/page/0/Text/24"}
5
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0004", "section": "1. Introduction", "page_start": 1, "page_end": 1, "type": "Text", "text": "Recent theoretical work characterizes self-attention dynamics as interacting particle systems, such as how tokens as particles evolve through the layers (Geshkovski et al., 2023) . Global normalization induces globally coupled dynamics that exhibit strong synchronization tendencies: in the meanfield limit, token representations contract toward a single dominant mode (Chen et al., 2025) . This dominant mode can be connected to attention sink phenomena, where attention mass collapses onto few tokens (Xiao et al., 2024) . Together, these findings suggest that globally normalized attention can induce undesirable coordination effects when iterated across depth, motivating a fundamental question:", "source": "marker_v2", "marker_block_id": "/page/0/Text/25"}
6
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0005", "section": "1. Introduction", "page_start": 1, "page_end": 1, "type": "Text", "text": "Should globally normalized similarity govern token interactions by default?", "source": "marker_v2", "marker_block_id": "/page/0/Text/26"}
7
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0006", "section": "1. Introduction", "page_start": 1, "page_end": 1, "type": "Text", "text": "To tackle this question, we propose a different principle: instead of global similarity-based aggregation, we explicitly model token interactions through bounded-confidence synchronization dynamics . Our approach draws inspira-", "source": "marker_v2", "marker_block_id": "/page/0/Text/27"}
8
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0007", "section": "1. Introduction", "page_start": 2, "page_end": 2, "type": "Text", "text": "tion from the Krause consensus model (Krause et al., 2000; Hegselmann & Krause, 2002) , where interacting agents only influence neighbors within a bounded confidence radius, leading to stable multi-cluster structures rather than global consensus. We introduce Krause Attention (Fig. 1) as a Transformer-compatible instantiation of this idea. Krause Attention replaces query–key dot-product similarity with query–key distance , which is mapped through a radial basis function (RBF) kernel to produce smooth, well-conditioned interaction weights. These distance-based affinities are further constrained by a local neighborhood and selective top-k sparsity, enforcing a bounded-confidence interaction pattern in which tokens primarily interact only with nearby representations, while distant tokens exert little or no influence.", "source": "marker_v2", "marker_block_id": "/page/1/Text/1"}
9
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0008", "section": "1. Introduction", "page_start": 2, "page_end": 2, "type": "Text", "text": "This design has direct implications for the resulting dynamics. Theoretically, we connect Krause Attention to recent analyses of Transformer clustering dynamics (Geshkovski et al., 2023; Chen et al., 2025) . While standard self-attention induces globally coupled dynamics that favor convergence toward a single consensus, the bounded-confidence structure of Krause Attention supports stable multi-cluster forma tions . This naturally moderates attention concentration and mitigates attention sink behavior. Locality further reduces computational cost from O(N2d) to O(NW d), where W is the local window size, and d the token dimension.", "source": "marker_v2", "marker_block_id": "/page/1/Text/2"}
10
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0009", "section": "1. Introduction", "page_start": 2, "page_end": 2, "type": "Text", "text": "Empirically, Krause Attention delivers consistent and substantial gains across vision, generation, and language modeling tasks. For image classification, Krause Vision Transformers (ViTs) consistently outperform standard ViTs (Dosovitskiy, 2021) on CIFAR-10/100 and ImageNet-1K, achieving an average accuracy improvement of +3.7% while reducing FLOPs by approximately 30% across model scales. In autoregressive image generation (Parmar et al., 2018) , Krause-based models achieve lower negative log-likelihood than standard Transformers while enabling more than 2× faster inference. For LLMs (Yang et al., 2024a; Grattafiori et al., 2024) , integrating Krause Attention as an auxiliary pathway consistently improves zero-shot evaluation performance over LoRAfinetuned baselines (Hu et al., 2022) on a broad suite of challenging language reasoning benchmarks, indicating improved robustness to attention concentration effects. Together, these results demonstrate that bounded-confidence dynamics provide a scalable, computationally efficient, and practically effective inductive bias for self-attention mechanisms across diverse modalities and model regimes.", "source": "marker_v2", "marker_block_id": "/page/1/Text/3"}
11
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0010", "section": "2. Related Work", "page_start": 2, "page_end": 2, "type": "Text", "text": "Transformer Architectures. Since their introduction, Transformers (Vaswani et al., 2017) have become the prevailing architectural paradigms across a wide range of machine learning domains. Their success stems from the scalable", "source": "marker_v2", "marker_block_id": "/page/1/Text/5"}
12
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0011", "section": "2. Related Work", "page_start": 2, "page_end": 2, "type": "Text", "text": "self-attention mechanisms and flexible token representations, which have enabled consistent performance improvements with increasing model and data size. In visual recognition, Vision Transformers (Dosovitskiy, 2021) and subsequent variants (Liu et al., 2021; Touvron et al., 2021a; Rao et al., 2021; Yuan et al., 2021; Han et al., 2021; Wang et al., 2021; Touvron et al., 2022; Heo et al., 2024) have demonstrated that Transformer-based models can match or surpass convolutional networks when trained under comparable large-scale settings. Beyond discriminative tasks, Transformer-based architectures also underpin a broad spectrum of generative modeling frameworks, including autoregressive models (Parmar et al., 2018; Chen et al., 2020; Yu et al., 2022; Cao et al., 2021; Touvron et al., 2021b) and diffusion models built on Transformer backbones (He et al., 2022; Chang et al., 2022; Peebles & Xie, 2023; Wang et al., 2025) . In language modeling, decoder-only Transformers scaled to hundreds of billions of parameters form the foundation of modern LLMs (Achiam et al., 2023; Grattafiori et al., 2024; Yang et al., 2024a; Liu et al., 2025; Comanici et al., 2025) , further reinforcing the central role of Transformers.", "source": "marker_v2", "marker_block_id": "/page/1/Text/6"}
13
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0012", "section": "2. Related Work", "page_start": 2, "page_end": 2, "type": "Text", "text": "A large body of work seeks to reduce the quadratic cost of standard self-attention. Sparse attention architectures (Child et al., 2019) restrict interactions to predefined patterns, while low-rank or kernel-based approaches such as Linformer (Wang et al., 2020) , Performer (Choromanski et al., 2021) , and other linear-attention variants (Katharopoulos et al., 2020) approximate attention through low-dimensional projections or random feature kernels. Hashing-based methods like Reformer (Kitaev et al., 2020) further achieve subquadratic complexity by clustering tokens. While these approaches improve scalability, they often weaken finegrained, content-dependent interactions. Our work is orthogonal to this efficiency-oriented line of research. Instead of approximating softmax attention or imposing fixed sparsity patterns, we redesign the interaction rule itself by drawing on Krause bounded-confidence dynamics. Krause Attention introduces distance-based, content-adaptive sparsity that promotes localized multi-cluster structures. Although the use of local neighborhoods reduces the computational cost to O(NW d), our primary goal is to endow attention with an explicit and theoretically grounded synchronization prior, which is distinct from prior sub-quadratic approximations.", "source": "marker_v2", "marker_block_id": "/page/1/Text/7"}
14
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0013", "section": "2. Related Work", "page_start": 2, "page_end": 2, "type": "Text", "text": "Understanding Transformers. Substantial progress has been made toward interpreting Transformers through physical and mathematical lenses (Bruno et al., 2025b; Ren et al., 2025; Rigollet, 2025) . Self-attention mechanisms have been connected to kernel methods (Tsai et al., 2019) , continuous Hopfield networks (Ramsauer et al., 2021) , and energybased associative memories (Yang et al., 2022; Hoover et al., 2023; Hu et al., 2024) . Complementary studies have examined relationships with convolution operators (Cordonnier et al., 2020) and non-convex optimization landscapes (Hu", "source": "marker_v2", "marker_block_id": "/page/1/Text/8"}
15
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0014", "section": "2. Related Work", "page_start": 3, "page_end": 3, "type": "Text", "text": "et al., 2024; Abbe et al., 2024). From a dynamical system perspective, the evolution of token representations is often modeled as an interacting particle system (Geshkovski et al., 2023), with links to global synchronization phenomena such as Kuramoto models (Kuramoto, 2005; Acebrón et al., 2005). Recent analyses further establish quantitative bounds on convergence toward mean-field limits (Chen et al., 2025; Rigollet, 2025; Bruno et al., 2025a). Inspired by Geshkovski et al. (2023), we adopt Krause opinion dynamics (Krause et al., 2000; Hegselmann & Krause, 2002) as a principled mechanism for structuring attention. Incorporating bounded confidence into the attention kernel enforces localized, query-key distance-aware token interactions rather than global mixing. As a result, the induced dynamics align with theoretical analyses showing that restricting global coupling can instead support the emergence of multiple coherent clusters.", "source": "marker_v2", "marker_block_id": "/page/2/Text/1"}
16
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0015", "section": "3. Preliminaries", "page_start": 3, "page_end": 3, "type": "Text", "text": "127128", "source": "marker_v2", "marker_block_id": "/page/2/Text/41"}
17
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0016", "section": "3.1. Self-attention Mechanism", "page_start": 3, "page_end": 3, "type": "Text", "text": "Self-attention is the central component underlying the Transformer architecture. Given an input sequence X \\in \\mathbb{R}^{N \\times d} , where N denotes the sequence length and d the embedding dimension, the input is linearly projected into queries, keys, and values through learnable weight matrices:", "source": "marker_v2", "marker_block_id": "/page/2/Text/4"}
18
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0017", "section": "3.1. Self-attention Mechanism", "page_start": 3, "page_end": 3, "type": "Equation", "text": "Q = XW_Q, \\quad K = XW_K, \\quad V = XW_V \\tag{1}", "source": "marker_v2", "marker_block_id": "/page/2/Equation/5"}
19
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0018", "section": "3.1. Self-attention Mechanism", "page_start": 3, "page_end": 3, "type": "Equation", "text": "\\mathbf{z}_{i} = \\sum_{j=1}^{N} \\operatorname{Softmax} \\left( \\frac{\\mathbf{q}_{i} \\mathbf{k}_{j}^{T}}{\\sqrt{d_{k}}} \\right) \\cdot \\mathbf{v}_{j} (2)", "source": "marker_v2", "marker_block_id": "/page/2/Equation/7"}
20
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0019", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Text", "text": "The Krause consensus model describes a non-linear, discrete-time dynamical system over a population of N", "source": "marker_v2", "marker_block_id": "/page/2/Text/10"}
21
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0020", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Text", "text": "agents, where interaction strengths depend on the agents' current states. Let x(t) \\in \\mathbb{R}^N denotes the vector of agent states at iteration t. The system evolves according to:", "source": "marker_v2", "marker_block_id": "/page/2/Text/11"}
22
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0021", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Equation", "text": "x(t+1) = A(x(t),t) x(t) (3)", "source": "marker_v2", "marker_block_id": "/page/2/Equation/12"}
23
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0022", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Text", "text": "where A\\big(x(t),t\\big)\\in\\mathbb{R}^{N\\times N} is a row-stochastic influence matrix whose entries a_{i,j}\\big(x(t),t\\big) quantify the influence of agent j on agent i. A defining feature of the model is the bounded confidence mechanism, which restricts interactions to agents whose states lie within a confidence radius \\epsilon . For each agent i, the active neighbor set is defined as", "source": "marker_v2", "marker_block_id": "/page/2/Text/13"}
24
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0023", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Equation", "text": "I(i, x(t)) = \\{ j \\in 1, \\dots, N \\mid |x_i(t) - x_j(t)| \\le \\epsilon \\} (4)", "source": "marker_v2", "marker_block_id": "/page/2/Equation/14"}
25
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0024", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Text", "text": "The influence weights are assigned uniformly within this active neighborhood, with a_{i,j}\\big(x(t),t\\big)=|I\\big(i,x(t)\\big)|^{-1} if j\\in I\\big(i,x(t)\\big) and 0 otherwise.", "source": "marker_v2", "marker_block_id": "/page/2/Text/15"}
26
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0025", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Text", "text": "This state-dependent nonlinearity drives the system toward a partitioned equilibrium. In the limit, agents self-organize into disjoint, invariant clusters, within which local consensus emerges while distinct clusters remain separated.", "source": "marker_v2", "marker_block_id": "/page/2/Text/16"}
27
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0026", "section": "3.2. Krause Consensus Model", "page_start": 3, "page_end": 3, "type": "Text", "text": "Our Krause Attention mechanism instantiates the Krause consensus model within the Transformer architecture by treating tokens as agents and their value embeddings as agent states. Under this mapping, attention weights define state-dependent influence matrices that govern how token representations are updated, mirroring the bounded-confidence interactions of the classical Krause model.", "source": "marker_v2", "marker_block_id": "/page/2/Text/17"}
28
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0027", "section": "4. Krause Attention", "page_start": 3, "page_end": 3, "type": "Text", "text": "This section introduces Krause Attention (see Fig. 2), the core mechanism of Krause Synchronization Transformers. Secs. 4.1 and 4.2 detail the key components of the method, while Sec. 4.3 connects Krause Attention to clustering and synchronization analyses of Transformer models.", "source": "marker_v2", "marker_block_id": "/page/2/Text/19"}
29
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0028", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 3, "page_end": 3, "type": "Text", "text": "To translate the Krause consensus model into an attention mechanism, a central requirement is a principled notion of similarity between Transformer tokens, which are interpreted as interacting agents. In our framework, token interactions are governed by a distance-based measure of opinion similarity. Instead of the dot-product similarity used in standard Transformers, we explicitly quantify dissimilarity using the Euclidean distance between query and key representations. Formally, given query vectors q_i and key vectors k_i , we define the pairwise query–key distance as", "source": "marker_v2", "marker_block_id": "/page/2/Text/21"}
30
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0029", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 3, "page_end": 3, "type": "Equation", "text": "\\Delta_{i,j} = ||q_i - k_j|| = \\sqrt{\\sum_{n=1}^{d_k} (q_{i,n} - k_{j,n})^2} (5)", "source": "marker_v2", "marker_block_id": "/page/2/Equation/22"}
31
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0030", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "FigureGroup", "text": "Figure~2. Krause Attention computes RBF affinity scores, restricts updates to local neighborhoods, and applies top-k selective interactions.", "source": "marker_v2", "marker_block_id": "/page/3/FigureGroup/288"}
32
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0031", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "which serves as a measure of opinion dissimilarity between tokens i and j. To map distances into interaction strengths, we apply a radial basis function (RBF) kernel to the squared distances, yielding unnormalized affinity scores", "source": "marker_v2", "marker_block_id": "/page/3/Text/4"}
33
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0032", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "Equation", "text": "s_{i,j} = \\exp(-\\frac{\\Delta_{i,j}^2}{2\\sigma^2}) \\tag{6}", "source": "marker_v2", "marker_block_id": "/page/3/Equation/5"}
34
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0033", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "where \\sigma is a learnable scale parameter which controls the effective interaction range. This kernel assigns higher affinity to tokens with similar query–key representations, while naturally suppressing interactions between distant tokens. Importantly, the RBF distance admits a separable formulation with respect to queries and keys. This distance-based structure induces localized, distance-aware interactions and naturally facilitates structured sparsity (see Sec. 4.2).", "source": "marker_v2", "marker_block_id": "/page/3/Text/6"}
35
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0034", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "The final dense attention weights are obtained by normalizing these affinities across the sequence dimension:", "source": "marker_v2", "marker_block_id": "/page/3/Text/7"}
36
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0035", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "Equation", "text": "a_{i,j} = \\frac{s_{i,j}}{\\sum_{n=1}^{N} s_{i,n}},\\tag{7}", "source": "marker_v2", "marker_block_id": "/page/3/Equation/8"}
37
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0036", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "which ensures a valid probability distribution over tokens and induces global interactions across all tokens.", "source": "marker_v2", "marker_block_id": "/page/3/Text/9"}
38
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0037", "section": "4.1. Distance-Aware Query-Key Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "Notably, unlike standard attention, we do not apply an additional softmax operation to the RBF scores. The exponential nonlinearity and temperature-like scaling introduced by softmax are already captured by the RBF kernel through its exponential form and variance parameter \\sigma^2 . As a result, this formulation produces distance-aware, smoothly decaying attention weights that naturally align with the bounded-confidence interactions characteristic of Krause dynamics.", "source": "marker_v2", "marker_block_id": "/page/3/Text/10"}
39
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0038", "section": "4.2. Local and Selective Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "The dense attention in Eq. (7) enables unrestricted global information exchange, allowing every token to interact with all others. While effective in standard Transformers, such fully coupled mixing induces strong global competition and", "source": "marker_v2", "marker_block_id": "/page/3/Text/12"}
40
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0039", "section": "4.2. Local and Selective Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "repeated averaging, which can obscure local structure and amplify dominant tokens across layers. To address this, we introduce local and selective interaction constraints, which are central to bounded-confidence consensus models.", "source": "marker_v2", "marker_block_id": "/page/3/Text/13"}
41
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0040", "section": "4.2. Local and Selective Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "Local Interactions. We introduce a local interaction mechanism that restricts attention to a pre-defined neighborhood \\mathcal{N}_i for each token i. Instead of normalizing attention weights over the full sequence, the resultant normalization is performed only within this neighborhood:", "source": "marker_v2", "marker_block_id": "/page/3/Text/14"}
42
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0041", "section": "4.2. Local and Selective Interactions", "page_start": 4, "page_end": 4, "type": "Equation", "text": "\\tilde{a}_{i,j} = \\frac{s_{i,j}}{\\sum_{\\ell \\in \\mathcal{N}_i} s_{i,\\ell}}, \\qquad j \\in \\mathcal{N}_i, (8)", "source": "marker_v2", "marker_block_id": "/page/3/Equation/15"}
43
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0042", "section": "4.2. Local and Selective Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "so that token i aggregates information exclusively from locally admissible neighbors. This explicit locality constraint enforces a hard interaction cutoff analogous to bounded confidence in Krause dynamics, suppressing global averaging while preserving meaningful local interactions.", "source": "marker_v2", "marker_block_id": "/page/3/Text/16"}
44
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0043", "section": "4.2. Local and Selective Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "The neighborhood \\mathcal{N}_i is defined according to the underlying task structure. In vision models, tokens are associated with spatial locations, and \\mathcal{N}_i corresponds to a local spatial window centered at token i. In autoregressive settings, including image generation and language modeling, \\mathcal{N}_i is instead specified as a causal window that limits attention to a fixed number of preceding tokens.", "source": "marker_v2", "marker_block_id": "/page/3/Text/17"}
45
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0044", "section": "4.2. Local and Selective Interactions", "page_start": 4, "page_end": 4, "type": "Text", "text": "Selective Interactions. While local interaction enforces spatial or temporal locality, bounded-confidence dynamics further imply that influence should be both limited and competitive, with only a small subset of neighbors exerting meaningful impact. To operationalize this principle, we introduce a mechanism that sparsifies attention within each local neighborhood. Specifically, for each token i, we select the top-k neighbors in \\mathcal{N}_i according to their similarity scores. Let \\xi_i^k \\subseteq \\mathcal{N}_i, |\\xi_i^k| = k denote the set of indices corresponding to the k largest values of s_{i,j} for j \\in \\mathcal{N}_i .", "source": "marker_v2", "marker_block_id": "/page/3/Text/18"}
46
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0045", "section": "4.2. Local and Selective Interactions", "page_start": 5, "page_end": 5, "type": "Text", "text": "Attention weights are then normalized over this reduced set:", "source": "marker_v2", "marker_block_id": "/page/4/Text/1"}
47
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0046", "section": "4.2. Local and Selective Interactions", "page_start": 5, "page_end": 5, "type": "Equation", "text": "\\tilde{a}_{i,j}^* = \\frac{s_{i,j}}{\\sum_{\\ell \\in \\xi_i^k} s_{i,\\ell}}, \\qquad j \\in \\xi_i^k, (9)", "source": "marker_v2", "marker_block_id": "/page/4/Equation/2"}
48
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0047", "section": "4.2. Local and Selective Interactions", "page_start": 5, "page_end": 5, "type": "Text", "text": "ensuring that token i interacts only with its most relevant local neighbors. This top-k formulation constitutes the final attention rule used in Krause Attention. The output representation is then computed as z_i = \\sum_{j \\in \\mathcal{N}_i} \\tilde{a}_{i,j}^* \\cdot v_j .", "source": "marker_v2", "marker_block_id": "/page/4/Text/3"}
49
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0048", "section": "4.2. Local and Selective Interactions", "page_start": 5, "page_end": 5, "type": "Text", "text": "This selective interaction mechanism induces a functionally sparse-attention pattern, in which each token aggregates information from a small, adaptively chosen subset of neighbors. By combining local interaction with the selective topk selection, our Krause Attention departs from the dense, fully connected interaction graph of standard self-attention and instead recovers the characteristic local clustering with global coherence behavior of Krause consensus dynamics.", "source": "marker_v2", "marker_block_id": "/page/4/Text/4"}
50
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0049", "section": "4.2. Local and Selective Interactions", "page_start": 5, "page_end": 5, "type": "Text", "text": "Time Complexity and Training Algorithm. Krause Attention restricts interactions to a local neighborhood and further prunes them via top-k selective sparsity. As a result, each token attends only to W nearby candidates and retains k < W of them during selective mixing. This yields an overall time complexity \\mathcal{O}(NWd) , which is strictly lower than the \\mathcal{O}(N^2) cost of self-attention while preserving contentadaptive token interactions. Algorithm 1 summarizes the forward pass of a Krause Attention layer. The procedure follows three steps: (1) compute query-key distances and transform them using an RBF kernel, (2) apply locality masking followed by top-k selection within the local region, and (3) normalize affinities and aggregate values.", "source": "marker_v2", "marker_block_id": "/page/4/Text/5"}
51
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0050", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Code", "text": "1: Input: token sequence x, local neighborhoods \\{\\mathcal{N}_i\\}, sparsity level k, kernel scales \\{\\sigma_h\\}", "source": "marker_v2", "marker_block_id": "/page/4/Code/7"}
52
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0051", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "2: Output: updated token sequence z", "source": "marker_v2", "marker_block_id": "/page/4/Text/8"}
53
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0052", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "3: Compute projections: (q, k, v) \\leftarrow \\text{Linear}(x)", "source": "marker_v2", "marker_block_id": "/page/4/Text/9"}
54
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0053", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "4: for each head h = 1, \\ldots, H do", "source": "marker_v2", "marker_block_id": "/page/4/Text/10"}
55
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0054", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "(1) Distance-based affinity", "source": "marker_v2", "marker_block_id": "/page/4/Text/11"}
56
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0055", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Equation", "text": "6: \\Delta_{i,j} \\leftarrow \\|q_i - k_j\\|^2 Eq. (5) 7: s_{i,j} \\leftarrow \\exp\\left(-\\Delta_{i,j}/(2\\sigma_h^2)\\right) Eq. (6)", "source": "marker_v2", "marker_block_id": "/page/4/Equation/12"}
57
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0056", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Equation", "text": "7: s_{i,j} \\leftarrow \\exp\\left(-\\Delta_{i,j}/(2\\sigma_h^2)\\right) Eq. (6)", "source": "marker_v2", "marker_block_id": "/page/4/Equation/13"}
58
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0057", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "(2) Apply locality", "source": "marker_v2", "marker_block_id": "/page/4/Text/14"}
59
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0058", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Equation", "text": "263 9: s_{i,j} \\leftarrow 0 if j \\notin \\mathcal{N}_i Eq. (8)", "source": "marker_v2", "marker_block_id": "/page/4/Equation/15"}
60
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0059", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "(3) Selective top-k sparsity", "source": "marker_v2", "marker_block_id": "/page/4/Text/16"}
61
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0060", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "\\xi_i^k \\leftarrow \\operatorname{TopK}\\left(s_{i,\\mathcal{N}_i}, \\hat{k}\\right) s_{i,j} \\leftarrow 0 \\quad \\text{if } j \\notin \\xi_i^k", "source": "marker_v2", "marker_block_id": "/page/4/Text/17"}
62
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0061", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "(4) Normalization and aggregation", "source": "marker_v2", "marker_block_id": "/page/4/Text/19"}
63
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0062", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Equation", "text": "14: a_{i,j} \\leftarrow s_{i,j} / \\sum_{\\ell \\in \\xi_i^k} s_{i,\\ell} Eq. (9) 15: o_h[i] \\leftarrow \\sum_{j \\in \\xi_i^k} a_{i,j} v_j", "source": "marker_v2", "marker_block_id": "/page/4/Equation/20"}
64
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0063", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "16: end for", "source": "marker_v2", "marker_block_id": "/page/4/Text/22"}
65
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0064", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "17: z \\leftarrow \\text{Linear}_{out}(\\text{Concat}(o_1, \\dots, o_H))", "source": "marker_v2", "marker_block_id": "/page/4/Text/23"}
66
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0065", "section": "Algorithm 1 Forward pass of a Krause Attention layer", "page_start": 5, "page_end": 5, "type": "Text", "text": "18: return z", "source": "marker_v2", "marker_block_id": "/page/4/Text/24"}
67
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0066", "section": "4.3. Connection to Transformer Clustering Dynamics", "page_start": 5, "page_end": 5, "type": "Text", "text": "This subsection situates Krause Attention within recent theoretical analyses of Transformers' clustering behavior. Within this perspective, Krause Attention can be understood as a modification of standard self-attention flows, converting the fragile, architecture-dependent clustering behavior into a more explicit and controllable mechanism.", "source": "marker_v2", "marker_block_id": "/page/4/Text/26"}
68
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0067", "section": "4.3. Connection to Transformer Clustering Dynamics", "page_start": 5, "page_end": 5, "type": "Text", "text": "Background: Clustering in Self-attention. Recent work has established that self-attention layers can be interpreted as dynamical systems acting on token representations. In particular, Geshkovski et al. (2023) model the evolution of token states \\{x_i(t)\\}_{i=1}^N as a continuous-time flow", "source": "marker_v2", "marker_block_id": "/page/4/Text/27"}
69
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0068", "section": "4.3. Connection to Transformer Clustering Dynamics", "page_start": 5, "page_end": 5, "type": "Equation", "text": "V\\dot{x}_{i}(t) = \\sum_{j=1}^{N} a_{i,j}(t) Vx_{j}(t), (10)", "source": "marker_v2", "marker_block_id": "/page/4/Equation/28"}
70
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0069", "section": "4.3. Connection to Transformer Clustering Dynamics", "page_start": 5, "page_end": 5, "type": "Text", "text": "where the attention weights a_{i,j}(t) are typically defined through dot-product similarity,", "source": "marker_v2", "marker_block_id": "/page/4/Text/29"}
71
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0070", "section": "4.3. Connection to Transformer Clustering Dynamics", "page_start": 5, "page_end": 5, "type": "Equation", "text": "a_{i,j}(t) = \\frac{\\exp(\\langle Qx_i(t), Kx_j(t)\\rangle)}{\\sum_{\\ell=1}^{N} \\exp(\\langle Qx_i(t), Kx_\\ell(t)\\rangle)}. (11)", "source": "marker_v2", "marker_block_id": "/page/4/Equation/30"}
72
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0071", "section": "4.3. Connection to Transformer Clustering Dynamics", "page_start": 5, "page_end": 5, "type": "Text", "text": "Under this formulation, tokens behave as interacting particles whose trajectories are mediated by globally normalized similarity scores. While such systems may exhibit clustering behavior under specific spectral conditions on the value matrix V, such behavior is highly sensitive to initialization and architectural details (Geshkovski et al., 2023).", "source": "marker_v2", "marker_block_id": "/page/4/Text/31"}
73
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0072", "section": "4.3. Connection to Transformer Clustering Dynamics", "page_start": 5, "page_end": 5, "type": "Text", "text": "More generally, global normalization induces a strong tendency toward synchronization. In the mean-field limit N \\to \\infty , Chen et al. (2025) show that dot-product attention corresponds to a Wasserstein gradient flow that contracts exponentially toward a single consensus state. Consequently, the empirical token distribution \\mu_t = \\frac{1}{N} \\sum_{i=1}^{N} \\delta_{x_i(t)} tends toward a dominant mode, reflecting global synchronization and potential representational collapse (Xiao et al., 2024). From this dynamical perspective, attention sink behavior can be interpreted as a discrete manifestation of this global synchronization and representation contraction process.", "source": "marker_v2", "marker_block_id": "/page/4/Text/32"}
74
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0073", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 5, "page_end": 5, "type": "Text", "text": "Krause Attention fundamentally modifies this dynamical system by replacing global dot-product coupling with distance-based, bounded-confidence interactions. Concretely, the attention weights take the form", "source": "marker_v2", "marker_block_id": "/page/4/Text/34"}
75
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0074", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 5, "page_end": 5, "type": "Equation", "text": "a_{i,j}(t) = \\frac{\\exp\\left(-\\frac{\\|Qx_i(t) - Kx_j(t)\\|^2}{2\\sigma^2}\\right)}{\\sum_{\\ell \\in \\xi_i^k} \\exp\\left(-\\frac{\\|Qx_i(t) - Kx_\\ell(t)\\|^2}{2\\sigma^2}\\right)}, \\qquad j \\in \\xi_i^k, (12)", "source": "marker_v2", "marker_block_id": "/page/4/Equation/35"}
76
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0075", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 5, "page_end": 5, "type": "Text", "text": "where \\xi_i^k denotes a selected sparse, local neighborhood associated with token i.", "source": "marker_v2", "marker_block_id": "/page/4/Text/36"}
77
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0076", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 5, "page_end": 5, "type": "Text", "text": "This construction induces a bounded-confidence interaction structure: tokens influence each other only when they are", "source": "marker_v2", "marker_block_id": "/page/4/Text/37"}
78
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0077", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 6, "page_end": 6, "type": "Text", "text": "sufficiently close in representation space and belong to a shared local support. As a result, the time evolution of token states no longer follows a globally coupled flow, but instead decomposes into locally interacting subsystems.", "source": "marker_v2", "marker_block_id": "/page/5/Text/13"}
79
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0078", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 6, "page_end": 6, "type": "Text", "text": "Asymptotic Multi-cluster Behavior. This structural modification influences the long-term behavior of the dynamics. By attenuating long-range interactions, Krause Attention reduces the global synchronization effects characteristic of standard self-attention and instead encourages the formation of multiple locally coherent groups. Under suitable choices of the scale parameter \\sigma and sparsity support \\xi_i^k , the empirical distribution \\mu_t can tend\\ toward a multi-atomic structure \\mu_t \\rightharpoonup \\sum_{k=1}^m \\pi_k \\delta_{\\mathcal{L}_k} where \\mathcal{L}_k denote stable cluster representatives and \\pi_k their relative masses. This behavior closely parallels the classical Krause consensus model, where agents self-organize into multiple invariant opinion groups instead of converging to a global average. Crucially, in Krause Attention this multi-cluster tendency is induced by the architectural constraints of the attention mechanism itself, rather than any implicit biases arising from depth, initialization, or optimization-specific effects.", "source": "marker_v2", "marker_block_id": "/page/5/Text/14"}
80
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0079", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 6, "page_end": 6, "type": "Text", "text": "Implications. By encoding locality and selective interactions into the design, Krause Attention turns clustering from a fragile, emergent phenomenon into a more stable architectural inductive bias. This helps preserve token diversity and improve robustness against representation collapse.", "source": "marker_v2", "marker_block_id": "/page/5/Text/15"}
81
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0080", "section": "Bounded-confidence Dynamics in Krause Attention.", "page_start": 6, "page_end": 6, "type": "Text", "text": "Appendix C provides supporting theoretical analyses that illuminate this multi-cluster behavior from two complementary viewpoints: clustering-emergence dynamics (Geshkovski et al., 2023) and mean-field Wasserstein gradient flows (Chen et al., 2025). These analyses characterize the behavior of Krause Attention under appropriate bounded-confidence and separation assumptions, offering mechanistic insight into why multiple clusters can arise.", "source": "marker_v2", "marker_block_id": "/page/5/Text/16"}
82
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0081", "section": "5.1. Experimental Setup", "page_start": 6, "page_end": 6, "type": "Text", "text": "Image Recognition. We adopt the Vision Transformer (Dosovitskiy, 2021; Touvron et al., 2021a; Liu et al., 2021) with Krause Attention replacing the self-attention, and assess performance on CIFAR-10/100 (Krizhevsky et al., 2009), Fashion-MNIST (Xiao et al., 2017), and ImageNet-1K (Deng et al., 2009).", "source": "marker_v2", "marker_block_id": "/page/5/Text/19"}
83
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0082", "section": "5.1. Experimental Setup", "page_start": 6, "page_end": 6, "type": "Text", "text": "Image Generation. We integrate Krause Attention into autoregressive Transformers (Parmar et al., 2018) and evaluate on MNIST (LeCun et al., 2002) and CIFAR-10.", "source": "marker_v2", "marker_block_id": "/page/5/Text/20"}
84
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0083", "section": "5.1. Experimental Setup", "page_start": 6, "page_end": 6, "type": "Text", "text": "Language Modeling. We incorporate Krause Attention into Qwen (Yang et al., 2024a) and Llama (Grattafiori et al., 2024) as an auxiliary shortcut rather than a replacement", "source": "marker_v2", "marker_block_id": "/page/5/Text/21"}
85
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0084", "section": "5.1. Experimental Setup", "page_start": 6, "page_end": 6, "type": "Text", "text": "for self-attention, and trained via LoRA (Hu et al., 2022).<sup>1</sup> We evaluate these hybrid models on established language understanding benchmarks (Clark et al., 2019; Bisk et al., 2020; Wang et al., 2024; De Marneffe et al., 2019; Williams et al., 2018; Zhou et al., 2023; Nie et al., 2020).", "source": "marker_v2", "marker_block_id": "/page/5/Text/22"}
86
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0085", "section": "5.1. Experimental Setup", "page_start": 6, "page_end": 6, "type": "Text", "text": "Additional Details. Across all tasks, standard Transformers serve as primary baselines. Krause Attention is mainly controlled by two task-dependent hyperparameters: the local neighborhood size \\mathcal{N}_i and the top-k sparsity level. Additional experimental results, implementation details, and ablation studies are provided in Appendix D.", "source": "marker_v2", "marker_block_id": "/page/5/Text/23"}
87
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0086", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 6, "page_end": 6, "type": "Text", "text": "Krause Vision Transformers. We construct Krause Vision Transformers (KViTs) by replacing self-attention in standard ViT architectures with Krause Attention, leaving all other architectural components unchanged. For small- and medium-scale datasets, we evaluate ViT-Tiny/Small/Base backbones. For ImageNet-1K, we consider the commonly used ViT-S/B families. We additionally study Krause Attention within SwinTransformers. Results for KViTs on CIFAR-10/100 and ImageNet-1K are reported here.", "source": "marker_v2", "marker_block_id": "/page/5/Text/25"}
88
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0087", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 6, "page_end": 6, "type": "Text", "text": "Results on CIFAR-10/100. On CIFAR-10 and CIFAR-100, we configure Krause Attention with a local spatial neighborhood corresponding to a 4-neighbor grid. The topk interaction parameter is scheduled linearly from 2 to 4 across layers. Tables 1 and 2 report classification accuracy, parameter counts, and FLOPs. Across all backbones and datasets, KViTs consistently improve accuracy while reducing FLOPs. Parameter counts remain effectively unchanged, confirming that the performance gains stem from the Krause Attention mechanism itself rather than the model capacity.", "source": "marker_v2", "marker_block_id": "/page/5/Text/26"}
89
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0088", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 6, "page_end": 6, "type": "TableGroup", "text": "Table 1. Image classification results on CIFAR-10 . Models Accuracy(%) # Parameters FLOPs ViT-T 90.75 5,362,762 0.37G KViT-T 93.81 5,362,774 0.25G ViT-S 93.33 21,342,346 1.43G KViT-S 95.20 21,342,358 0.97G ViT-B 92.45 85,152,010 5.61G KViT-B 95.35 85,152,022 3.77G", "source": "marker_v2", "marker_block_id": "/page/5/TableGroup/352"}
90
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0089", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 6, "page_end": 6, "type": "Text", "text": "Results on ImageNet-1K. We further evaluate KViTs on ImageNet-1K to assess performance at scale. The local interaction neighborhood is expanded to 25 tokens, and the top-kselection parameter increases linearly from 8 to 16 across", "source": "marker_v2", "marker_block_id": "/page/5/Text/29"}
91
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0090", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 6, "page_end": 6, "type": "Footnote", "text": "Since long-range connections are known to be highly valuable in language modeling, this setting is intended to examine the compatibility and complementary benefits of Krause Attention in LLMs rather than to position it as a full architectural alternative.", "source": "marker_v2", "marker_block_id": "/page/5/Footnote/30"}
92
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0091", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 7, "page_end": 7, "type": "TableGroup", "text": "Table 2. Image classification results on CIFAR-100. Models Accuracy(%) # Parameters FLOPs ViT-T 66.07 5,380,132 0.37G KViT-T 74.34 5,380,144 0.25G ViT-S 71.74 21,376,996 1.43G KViT-S 77.05 21,377,008 0.97G ViT-B 72.28 85,221,220 5.61G KViT-B 78.03 85,221,232 3.77G", "source": "marker_v2", "marker_block_id": "/page/6/TableGroup/590"}
93
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0092", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 7, "page_end": 7, "type": "Text", "text": "layers to accommodate the larger spatial resolution. Table 3 shows that KViTs consistently outperform ViT counterparts while reducing FLOPs. These trends hold across model sizes and patch resolutions, indicating that Krause Attention scales effectively to large-scale vision tasks. Fig. 3 further illustrates this behavior, showing that Krause Attention preserves diverse, multi-cluster attention patterns across heads, in contrast to the more homogeneous attention in ViTs.", "source": "marker_v2", "marker_block_id": "/page/6/Text/4"}
94
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0093", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 7, "page_end": 7, "type": "TableGroup", "text": "Table 3. Image classification results on ImageNet-1K. Models Accuracy(%) # Parameters FLOPs ViT-S-16 74.04 22,050,664 4.62G KViT-S-16 75.69 22,050,676 3.22G ViT-S-32 68.78 22,878,952 1.15G KViT-S-32 71.09 22,878,964 0.79G ViT-B-16 72.19 86,567,656 17.61G KViT-B-16 74.50 86,567,668 12.03G ViT-B-32 67.20 88,224,232 4.42G KViT-B-32 70.44 88,224,244 3.00G", "source": "marker_v2", "marker_block_id": "/page/6/TableGroup/591"}
95
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0094", "section": "5.2. Vision Transformers for Image Recognition", "page_start": 7, "page_end": 7, "type": "FigureGroup", "text": "Figure 3. Krause Attention yields more diverse attention heads.", "source": "marker_v2", "marker_block_id": "/page/6/FigureGroup/592"}
96
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0095", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "Text", "text": "Krause Autoregressive Models. We further evaluate Krause Attention in the context of autoregressive image generation. Specifically, we construct Krause Autoregressive Models (KARMs) by replacing the self-attention layers in standard autoregressive Transformers with Krause Attention. We compare KARMs against two baselines:", "source": "marker_v2", "marker_block_id": "/page/6/Text/10"}
97
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0096", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "Text", "text": "(i) standard autoregressive models with full self-attention (ARM), and (ii) autoregressive models with linear attention (LARM) (Katharopoulos et al., 2020) . Performance is measured using bits-per-dimension (BPD) for likelihood quality and images-per-second for inference throughput.", "source": "marker_v2", "marker_block_id": "/page/6/Text/11"}
98
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0097", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "TableGroup", "text": "Table 4. Image generation results of KARMs on MNIST. Inference speed is measured on a single NVIDIA H100 GPU. Models BPD (↓) Images/sec (↑) Time Complexity ARM 0.5685 83.5772 O(N2d) LARM 0.5855 499.3672 O(N d2 ) KARM 0.5652 105.6037 O(NW d)", "source": "marker_v2", "marker_block_id": "/page/6/TableGroup/593"}
99
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0098", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "Text", "text": "Results on MNIST. We first conduct experiments on the MNIST dataset, where each image is represented as a sequence of length 784. For KARMs, we employ a causal local window of size 128 and set the top-k interaction parameter to 96 to balance locality and expressiveness. We generate 50K samples to measure inference throughout.", "source": "marker_v2", "marker_block_id": "/page/6/Text/14"}
100
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0099", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "Text", "text": "As shown in Table 4, KARM achieves the best likelihood (lowest BPD) among all models while also generating images substantially faster than the full-attention ARM baseline. Although KARM is slower than the linear-attention LARM, it offers markedly better likelihood, matching expectations from the intermediate time complexity O(NW d). Fig. 4 presents unconditional samples generated by KARM, illustrating the visual quality of the generated digits.", "source": "marker_v2", "marker_block_id": "/page/6/Text/15"}
101
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0100", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "PictureGroup", "text": "Figure 4. Unconditional samples generated by KARM on MNIST.", "source": "marker_v2", "marker_block_id": "/page/6/PictureGroup/594"}
102
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0101", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "Text", "text": "Results on CIFAR-10. We next evaluate on CIFAR-10, which corresponds to a longer sequence length of 3072. We expand the window size to 256 and set the top-k parameter to 192. Inference speed is evaluated on 10K samples.", "source": "marker_v2", "marker_block_id": "/page/6/Text/18"}
103
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0102", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "TableGroup", "text": "Table 5. Image generation results of KARMs on CIFAR-10. Inference speed is measured on a single NVIDIA H100 GPU. Models BPD (↓) Images/sec (↑) Time Complexity ARM 3.0224 1.8933 O(N2d) LARM 3.1836 14.4032 O(N d2 ) KARM 3.0032 4.5240 O(NW d)", "source": "marker_v2", "marker_block_id": "/page/6/TableGroup/595"}
104
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0103", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 7, "page_end": 7, "type": "Text", "text": "Quantitative results are summarized in Table 5. As in the MNIST experiments, KARMs achieve the lowest BPD", "source": "marker_v2", "marker_block_id": "/page/6/Text/21"}
105
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0104", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 8, "page_end": 8, "type": "Text", "text": "among all evaluated methods while significantly outperforming standard ARMs in generation speed. Although LARMs remain the fastest, they exhibit inferior likelihood performance compared to KARMs, highlighting the tradeoff between efficiency and modeling accuracy.", "source": "marker_v2", "marker_block_id": "/page/7/Text/6"}
106
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0105", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 8, "page_end": 8, "type": "FigureGroup", "text": "Figure 5. Samples completed by KARMs on CIFAR-10.", "source": "marker_v2", "marker_block_id": "/page/7/FigureGroup/512"}
107
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0106", "section": "5.3. Autoregressive Models for Image Generation", "page_start": 8, "page_end": 8, "type": "Text", "text": "We also evaluate image completion on CIFAR-10 by masking half of each input image and autoregressively generating the missing pixels. Fig. 5 shows five completion samples generated for masked input. These results indicate that our KARMs produce semantically consistent and visually plausible completions despite the increased dataset complexity.", "source": "marker_v2", "marker_block_id": "/page/7/Text/9"}
108
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0107", "section": "5.4. LLMs for Language Understanding and Reasoning", "page_start": 8, "page_end": 8, "type": "FigureGroup", "text": "Figure 6. Our Krause-LLMs add a Krause Attention shortcut at each layer of the original LLM architecture.", "source": "marker_v2", "marker_block_id": "/page/7/FigureGroup/513"}
109
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0108", "section": "5.4. LLMs for Language Understanding and Reasoning", "page_start": 8, "page_end": 8, "type": "Text", "text": "Krause-LLMs. To explore whether Krause Attention can also improve language modeling, we integrate it into LLMs by adding a Krause-attention shortcut at every layer (Fig. 6) . Both the Krause pathway and the original attention modules are trained via LoRA adaptation. We compare this hybrid model against the base LLMs and their LoRA-finetuned variants. All models are trained on a 50K-sample subset of Flan-v2 (Longpre et al., 2023) and evaluated under a zero-shot setting on a suite of language benchmarks.", "source": "marker_v2", "marker_block_id": "/page/7/Text/13"}
110
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0109", "section": "5.4. LLMs for Language Understanding and Reasoning", "page_start": 8, "page_end": 8, "type": "Text", "text": "Results on Krause-Llama. For Llama3-8B, the Krause Attention is configured with a causal window size of 32 and a top-k sparsity value of 16. Table 6 presents the performance of Krause-Llama3-8B. Compared to both the base Llama3-8B and its LoRA finetuned variant, Krause-Llama3- 8B achieves consistent gains on most datasets, including BoolQ, CB, PIQA, MNLI, ANLI (R1–R3), and IFEval. Performance on MMLU-Pro also matches the LoRA baseline. These results suggest that incorporating a Krause-attention", "source": "marker_v2", "marker_block_id": "/page/7/Text/14"}
111
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0110", "section": "5.4. LLMs for Language Understanding and Reasoning", "page_start": 8, "page_end": 8, "type": "Text", "text": "shortcut can provide complementary inductive bias for language tasks without degrading overall model quality.", "source": "marker_v2", "marker_block_id": "/page/7/Text/15"}
112
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0111", "section": "5.4. LLMs for Language Understanding and Reasoning", "page_start": 8, "page_end": 8, "type": "Text", "text": "Alleviating Attention Sinks. LLMs often suffer from the at tention sink effect (Xiao et al., 2024) , where the softmax normalization allocates disproportionately high attention scores on early tokens, regardless of their semantic relevance. This behavior introduces positional bias, reduces model expressivity, and weakens representation robustness.", "source": "marker_v2", "marker_block_id": "/page/7/Text/16"}
113
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0112", "section": "5.4. LLMs for Language Understanding and Reasoning", "page_start": 8, "page_end": 8, "type": "Text", "text": "Krause Attention provides a complementary, boundedconfidence mechanism for mitigating this issue. By restricting attention to the local neighborhood, distant tokens can no longer allocate weight to the initial positions once they fall outside the receptive field. As shown in Fig. 7, the base Llama model exhibits large oscillations and persistent peaks across layers, whereas Krause-LLMs produce remarkably more stable attention curves. This stabilization indicates that Krause Attention reduces reliance on fixed positional anchors and supports more robust representation learning.", "source": "marker_v2", "marker_block_id": "/page/7/Text/17"}
114
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0113", "section": "5.4. LLMs for Language Understanding and Reasoning", "page_start": 8, "page_end": 8, "type": "FigureGroup", "text": "Figure 7. Layer dynamics of first-token attentions on Llama3-8B.", "source": "marker_v2", "marker_block_id": "/page/7/FigureGroup/514"}
115
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0114", "section": "6. Conclusions", "page_start": 8, "page_end": 8, "type": "Text", "text": "This paper introduces Krause Attention, a distance-based, bounded-confidence attention mechanism that embeds an explicit multi-cluster synchronization prior into Transformer architectures. By enforcing localized and selectively sparse interactions, Krause Attention promotes structured multicluster token coordination, contrasting with the global synchronization tendencies observed in standard self-attention. Theoretical analyses relate Krause Attention to recent analyses of attention dynamics and to classical boundedconfidence models, offering mechanistic insight into why such architectures support multi-cluster behavior. Empirical evaluations across vision recognition, autoregressive image generation, and LLMs demonstrate consistent performance gains together with efficient inference. Overall, Krause Attention provides a principled and scalable inductive bias for structured token coordination, opening new avenues for consensus-driven and dynamics-aware Transformer design.", "source": "marker_v2", "marker_block_id": "/page/7/Text/21"}
116
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0115", "section": "6. Conclusions", "page_start": 9, "page_end": 9, "type": "Text", "text": "Models BoolQ CB PIQA MNLI ANLI-R1 ANLI-R2 ANLI-R3 MMLU-Pro IFEval Llama3-8B 76.13 41.07/19.41 51.52 35.45/18.11 33.40/16.69 33.40/16.69 33.50/17.04 37.50 22.18 Llama3-8B (finetuned w/ LoRA) 80.41 60.71/47.81 75.16 59.53/55.29 38.70/30.62 39.90/33.37 44.92/39.57 41.67 32.72 Krause-Llama3-8B 80.59 64.29/48.04 77.77 63.27/53.72 40.30/33.01 40.50/34.27 45.67/39.84 41.67 34.01", "source": "marker_v2", "marker_block_id": "/page/8/Text/40"}
117
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0116", "section": "6. Conclusions", "page_start": 9, "page_end": 9, "type": "Text", "text": "the field of Machine Learning by proposing a new attention mechanism grounded in bounded-confidence dynamics. Krause Attention modifies internal model interactions but does not introduce application-specific decision rules or domain-specific interventions. As with most advances in Transformer architectures, the broader societal impacts depend on the tasks and systems into which such models are deployed. While our method may contribute to improved efficiency and stability in future models, we do not identify any unique ethical concerns or societal implications beyond those already well established for research in deep learning.", "source": "marker_v2", "marker_block_id": "/page/8/Text/19"}
118
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0117", "section": "6. Conclusions", "page_start": 9, "page_end": 9, "type": "Text", "text": "This paper presents work whose primary goal is to advance", "source": "marker_v2", "marker_block_id": "/page/8/Text/42"}
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/marker_meta.json ADDED
@@ -0,0 +1,3042 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "table_of_contents": [
3
+ {
4
+ "title": "001",
5
+ "heading_level": null,
6
+ "page_id": 0,
7
+ "polygon": [
8
+ [
9
+ 25.400390625,
10
+ 80.05078125
11
+ ],
12
+ [
13
+ 40.49589920043945,
14
+ 80.05078125
15
+ ],
16
+ [
17
+ 40.49589920043945,
18
+ 162.80859375
19
+ ],
20
+ [
21
+ 25.400390625,
22
+ 162.80859375
23
+ ]
24
+ ]
25
+ },
26
+ {
27
+ "title": "002\n003\n004\n005\n006",
28
+ "heading_level": null,
29
+ "page_id": 0,
30
+ "polygon": [
31
+ [
32
+ 24.802734375,
33
+ 83.91796875
34
+ ],
35
+ [
36
+ 40.939453125,
37
+ 83.91796875
38
+ ],
39
+ [
40
+ 40.939453125,
41
+ 157.39453125
42
+ ],
43
+ [
44
+ 24.802734375,
45
+ 157.39453125
46
+ ]
47
+ ]
48
+ },
49
+ {
50
+ "title": "007\n008",
51
+ "heading_level": null,
52
+ "page_id": 0,
53
+ "polygon": [
54
+ [
55
+ 23.90625,
56
+ 112.53515625
57
+ ],
58
+ [
59
+ 41.8359375,
60
+ 112.53515625
61
+ ],
62
+ [
63
+ 41.8359375,
64
+ 179.82421875
65
+ ],
66
+ [
67
+ 23.90625,
68
+ 179.82421875
69
+ ]
70
+ ]
71
+ },
72
+ {
73
+ "title": "009\n010\n011\n012\n013\n014\n015",
74
+ "heading_level": null,
75
+ "page_id": 0,
76
+ "polygon": [
77
+ [
78
+ 22.7109375,
79
+ 170.9296875
80
+ ],
81
+ [
82
+ 41.8359375,
83
+ 170.9296875
84
+ ],
85
+ [
86
+ 41.8359375,
87
+ 262.1953125
88
+ ],
89
+ [
90
+ 22.7109375,
91
+ 262.1953125
92
+ ]
93
+ ]
94
+ },
95
+ {
96
+ "title": "016\n017\n018\n019\n020\n021",
97
+ "heading_level": null,
98
+ "page_id": 0,
99
+ "polygon": [
100
+ [
101
+ 23.30859375,
102
+ 248.2734375
103
+ ],
104
+ [
105
+ 44.82421875,
106
+ 248.2734375
107
+ ],
108
+ [
109
+ 44.82421875,
110
+ 339.5390625
111
+ ],
112
+ [
113
+ 23.30859375,
114
+ 339.5390625
115
+ ]
116
+ ]
117
+ },
118
+ {
119
+ "title": "022\n023\n024\n025\n026\n027",
120
+ "heading_level": null,
121
+ "page_id": 0,
122
+ "polygon": [
123
+ [
124
+ 20.619140625,
125
+ 327.55078125
126
+ ],
127
+ [
128
+ 40.49589920043945,
129
+ 327.55078125
130
+ ],
131
+ [
132
+ 40.49589920043945,
133
+ 408.76171875
134
+ ],
135
+ [
136
+ 20.619140625,
137
+ 408.76171875
138
+ ]
139
+ ]
140
+ },
141
+ {
142
+ "title": "028\n029\n030\n031\n032",
143
+ "heading_level": null,
144
+ "page_id": 0,
145
+ "polygon": [
146
+ [
147
+ 22.7109375,
148
+ 396.38671875
149
+ ],
150
+ [
151
+ 41.8359375,
152
+ 396.38671875
153
+ ],
154
+ [
155
+ 41.8359375,
156
+ 463.67578125
157
+ ],
158
+ [
159
+ 22.7109375,
160
+ 463.67578125
161
+ ]
162
+ ]
163
+ },
164
+ {
165
+ "title": "033\n034\n035",
166
+ "heading_level": null,
167
+ "page_id": 0,
168
+ "polygon": [
169
+ [
170
+ 22.412109375,
171
+ 432.73828125
172
+ ],
173
+ [
174
+ 40.49589920043945,
175
+ 432.73828125
176
+ ],
177
+ [
178
+ 40.49589920043945,
179
+ 501.57421875
180
+ ],
181
+ [
182
+ 22.412109375,
183
+ 501.57421875
184
+ ]
185
+ ]
186
+ },
187
+ {
188
+ "title": "036\n037\n038\n039",
189
+ "heading_level": null,
190
+ "page_id": 0,
191
+ "polygon": [
192
+ [
193
+ 23.009765625,
194
+ 493.06640625
195
+ ],
196
+ [
197
+ 40.49589920043945,
198
+ 493.06640625
199
+ ],
200
+ [
201
+ 40.49589920043945,
202
+ 552.62109375
203
+ ],
204
+ [
205
+ 23.009765625,
206
+ 552.62109375
207
+ ]
208
+ ]
209
+ },
210
+ {
211
+ "title": "040\n041",
212
+ "heading_level": null,
213
+ "page_id": 0,
214
+ "polygon": [
215
+ [
216
+ 23.607421875,
217
+ 546.8203125
218
+ ],
219
+ [
220
+ 40.49589920043945,
221
+ 546.8203125
222
+ ],
223
+ [
224
+ 40.49589920043945,
225
+ 591.6796875
226
+ ],
227
+ [
228
+ 23.607421875,
229
+ 591.6796875
230
+ ]
231
+ ]
232
+ },
233
+ {
234
+ "title": "042\n043\n044\n045",
235
+ "heading_level": null,
236
+ "page_id": 0,
237
+ "polygon": [
238
+ [
239
+ 24.802734375,
240
+ 571.2864379882812
241
+ ],
242
+ [
243
+ 40.49589920043945,
244
+ 571.2864379882812
245
+ ],
246
+ [
247
+ 40.49589920043945,
248
+ 617.1140441894531
249
+ ],
250
+ [
251
+ 24.802734375,
252
+ 617.1140441894531
253
+ ]
254
+ ]
255
+ },
256
+ {
257
+ "title": "046\n047\n048\n049",
258
+ "heading_level": null,
259
+ "page_id": 0,
260
+ "polygon": [
261
+ [
262
+ 24.205078125,
263
+ 619.1064453125
264
+ ],
265
+ [
266
+ 40.49589920043945,
267
+ 619.1064453125
268
+ ],
269
+ [
270
+ 40.49589920043945,
271
+ 674.05078125
272
+ ],
273
+ [
274
+ 24.205078125,
275
+ 674.05078125
276
+ ]
277
+ ]
278
+ },
279
+ {
280
+ "title": "Krause Synchronization Transformers",
281
+ "heading_level": null,
282
+ "page_id": 0,
283
+ "polygon": [
284
+ [
285
+ 180.52999877929688,
286
+ 89.9119873046875
287
+ ],
288
+ [
289
+ 418.359375,
290
+ 89.9119873046875
291
+ ],
292
+ [
293
+ 418.359375,
294
+ 106.34765625
295
+ ],
296
+ [
297
+ 180.52999877929688,
298
+ 106.34765625
299
+ ]
300
+ ]
301
+ },
302
+ {
303
+ "title": "Anonymous Authors1",
304
+ "heading_level": null,
305
+ "page_id": 0,
306
+ "polygon": [
307
+ [
308
+ 246.989990234375,
309
+ 141.74615478515625
310
+ ],
311
+ [
312
+ 338.2734375,
313
+ 141.74615478515625
314
+ ],
315
+ [
316
+ 338.2734375,
317
+ 152.96612548828125
318
+ ],
319
+ [
320
+ 246.989990234375,
321
+ 152.96612548828125
322
+ ]
323
+ ]
324
+ },
325
+ {
326
+ "title": "Abstract",
327
+ "heading_level": null,
328
+ "page_id": 0,
329
+ "polygon": [
330
+ [
331
+ 150.19796752929688,
332
+ 175.18359375
333
+ ],
334
+ [
335
+ 196.03125,
336
+ 175.18359375
337
+ ],
338
+ [
339
+ 196.03125,
340
+ 188.015380859375
341
+ ],
342
+ [
343
+ 150.19796752929688,
344
+ 188.015380859375
345
+ ]
346
+ ]
347
+ },
348
+ {
349
+ "title": "1. Introduction",
350
+ "heading_level": null,
351
+ "page_id": 0,
352
+ "polygon": [
353
+ [
354
+ 55.283203125,
355
+ 593.7791595458984
356
+ ],
357
+ [
358
+ 132.27606201171875,
359
+ 593.7791595458984
360
+ ],
361
+ [
362
+ 132.27606201171875,
363
+ 605.7343597412109
364
+ ],
365
+ [
366
+ 55.283203125,
367
+ 605.7343597412109
368
+ ]
369
+ ]
370
+ },
371
+ {
372
+ "title": "2. Related Work",
373
+ "heading_level": null,
374
+ "page_id": 1,
375
+ "polygon": [
376
+ [
377
+ 53.7890625,
378
+ 645.43359375
379
+ ],
380
+ [
381
+ 138.65625,
382
+ 645.43359375
383
+ ],
384
+ [
385
+ 138.65625,
386
+ 658.3053588867188
387
+ ],
388
+ [
389
+ 53.7890625,
390
+ 658.3053588867188
391
+ ]
392
+ ]
393
+ },
394
+ {
395
+ "title": "3. Preliminaries",
396
+ "heading_level": null,
397
+ "page_id": 2,
398
+ "polygon": [
399
+ [
400
+ 53.19140625,
401
+ 288.0
402
+ ],
403
+ [
404
+ 136.5,
405
+ 288.0
406
+ ],
407
+ [
408
+ 136.5,
409
+ 297.0
410
+ ],
411
+ [
412
+ 53.19140625,
413
+ 297.0
414
+ ]
415
+ ]
416
+ },
417
+ {
418
+ "title": "3.1. Self-attention Mechanism",
419
+ "heading_level": null,
420
+ "page_id": 2,
421
+ "polygon": [
422
+ [
423
+ 52.59375,
424
+ 308.25
425
+ ],
426
+ [
427
+ 183.0,
428
+ 308.25
429
+ ],
430
+ [
431
+ 183.0,
432
+ 317.25
433
+ ],
434
+ [
435
+ 52.59375,
436
+ 317.25
437
+ ]
438
+ ]
439
+ },
440
+ {
441
+ "title": "3.2. Krause Consensus Model",
442
+ "heading_level": null,
443
+ "page_id": 2,
444
+ "polygon": [
445
+ [
446
+ 51.99609375,
447
+ 676.37109375
448
+ ],
449
+ [
450
+ 181.5,
451
+ 676.37109375
452
+ ],
453
+ [
454
+ 181.5,
455
+ 685.5
456
+ ],
457
+ [
458
+ 51.99609375,
459
+ 685.5
460
+ ]
461
+ ]
462
+ },
463
+ {
464
+ "title": "4. Krause Attention",
465
+ "heading_level": null,
466
+ "page_id": 2,
467
+ "polygon": [
468
+ [
469
+ 306.0,
470
+ 441.24609375
471
+ ],
472
+ [
473
+ 408.0,
474
+ 441.24609375
475
+ ],
476
+ [
477
+ 408.0,
478
+ 451.5
479
+ ],
480
+ [
481
+ 306.0,
482
+ 451.5
483
+ ]
484
+ ]
485
+ },
486
+ {
487
+ "title": "4.1. Distance-Aware Query-Key Interactions",
488
+ "heading_level": null,
489
+ "page_id": 2,
490
+ "polygon": [
491
+ [
492
+ 304.20703125,
493
+ 534.75
494
+ ],
495
+ [
496
+ 496.5,
497
+ 534.75
498
+ ],
499
+ [
500
+ 496.5,
501
+ 545.66015625
502
+ ],
503
+ [
504
+ 304.20703125,
505
+ 545.66015625
506
+ ]
507
+ ]
508
+ },
509
+ {
510
+ "title": "4.2. Local and Selective Interactions",
511
+ "heading_level": null,
512
+ "page_id": 3,
513
+ "polygon": [
514
+ [
515
+ 52.59375,
516
+ 651.62109375
517
+ ],
518
+ [
519
+ 208.5,
520
+ 651.62109375
521
+ ],
522
+ [
523
+ 208.5,
524
+ 660.75
525
+ ],
526
+ [
527
+ 52.59375,
528
+ 660.75
529
+ ]
530
+ ]
531
+ },
532
+ {
533
+ "title": "Algorithm 1 Forward pass of a Krause Attention layer",
534
+ "heading_level": null,
535
+ "page_id": 4,
536
+ "polygon": [
537
+ [
538
+ 54.0,
539
+ 456.71484375
540
+ ],
541
+ [
542
+ 275.51953125,
543
+ 456.71484375
544
+ ],
545
+ [
546
+ 275.51953125,
547
+ 466.5
548
+ ],
549
+ [
550
+ 54.0,
551
+ 466.5
552
+ ]
553
+ ]
554
+ },
555
+ {
556
+ "title": "4.3. Connection to Transformer Clustering Dynamics",
557
+ "heading_level": null,
558
+ "page_id": 4,
559
+ "polygon": [
560
+ [
561
+ 306.75,
562
+ 69.0
563
+ ],
564
+ [
565
+ 534.3046875,
566
+ 69.0
567
+ ],
568
+ [
569
+ 534.3046875,
570
+ 78.0
571
+ ],
572
+ [
573
+ 306.75,
574
+ 78.0
575
+ ]
576
+ ]
577
+ },
578
+ {
579
+ "title": "Bounded-confidence Dynamics in Krause Attention.",
580
+ "heading_level": null,
581
+ "page_id": 4,
582
+ "polygon": [
583
+ [
584
+ 302.4140625,
585
+ 549.0
586
+ ],
587
+ [
588
+ 544.5,
589
+ 549.0
590
+ ],
591
+ [
592
+ 544.5,
593
+ 558.03515625
594
+ ],
595
+ [
596
+ 302.4140625,
597
+ 558.03515625
598
+ ]
599
+ ]
600
+ },
601
+ {
602
+ "title": "5. Experiments",
603
+ "heading_level": null,
604
+ "page_id": 5,
605
+ "polygon": [
606
+ [
607
+ 52.892578125,
608
+ 510.0
609
+ ],
610
+ [
611
+ 132.75,
612
+ 510.0
613
+ ],
614
+ [
615
+ 132.75,
616
+ 520.91015625
617
+ ],
618
+ [
619
+ 52.892578125,
620
+ 520.91015625
621
+ ]
622
+ ]
623
+ },
624
+ {
625
+ "title": "5.1. Experimental Setup",
626
+ "heading_level": null,
627
+ "page_id": 5,
628
+ "polygon": [
629
+ [
630
+ 52.892578125,
631
+ 529.5
632
+ ],
633
+ [
634
+ 158.25,
635
+ 529.5
636
+ ],
637
+ [
638
+ 158.25,
639
+ 539.47265625
640
+ ],
641
+ [
642
+ 52.892578125,
643
+ 539.47265625
644
+ ]
645
+ ]
646
+ },
647
+ {
648
+ "title": "5.2. Vision Transformers for Image Recognition",
649
+ "heading_level": null,
650
+ "page_id": 5,
651
+ "polygon": [
652
+ [
653
+ 306.0,
654
+ 219.65625
655
+ ],
656
+ [
657
+ 510.0,
658
+ 219.65625
659
+ ],
660
+ [
661
+ 510.0,
662
+ 229.5
663
+ ],
664
+ [
665
+ 306.0,
666
+ 229.5
667
+ ]
668
+ ]
669
+ },
670
+ {
671
+ "title": "5.3. Autoregressive Models for Image Generation",
672
+ "heading_level": null,
673
+ "page_id": 6,
674
+ "polygon": [
675
+ [
676
+ 51.3984375,
677
+ 628.9865417480469
678
+ ],
679
+ [
680
+ 263.5885925292969,
681
+ 628.9865417480469
682
+ ],
683
+ [
684
+ 263.5885925292969,
685
+ 638.9491424560547
686
+ ],
687
+ [
688
+ 51.3984375,
689
+ 638.9491424560547
690
+ ]
691
+ ]
692
+ },
693
+ {
694
+ "title": "5.4. LLMs for Language Understanding and Reasoning",
695
+ "heading_level": null,
696
+ "page_id": 7,
697
+ "polygon": [
698
+ [
699
+ 52.59375,
700
+ 374.4825134277344
701
+ ],
702
+ [
703
+ 289.4446716308594,
704
+ 374.4825134277344
705
+ ],
706
+ [
707
+ 289.4446716308594,
708
+ 384.4450988769531
709
+ ],
710
+ [
711
+ 52.59375,
712
+ 384.4450988769531
713
+ ]
714
+ ]
715
+ },
716
+ {
717
+ "title": "6. Conclusions",
718
+ "heading_level": null,
719
+ "page_id": 7,
720
+ "polygon": [
721
+ [
722
+ 307.1953125,
723
+ 495.2291564941406
724
+ ],
725
+ [
726
+ 381.3046875,
727
+ 495.2291564941406
728
+ ],
729
+ [
730
+ 381.3046875,
731
+ 507.1843566894531
732
+ ],
733
+ [
734
+ 307.1953125,
735
+ 507.1843566894531
736
+ ]
737
+ ]
738
+ },
739
+ {
740
+ "title": "References",
741
+ "heading_level": null,
742
+ "page_id": 8,
743
+ "polygon": [
744
+ [
745
+ 52.59375,
746
+ 345.7341613769531
747
+ ],
748
+ [
749
+ 110.9838638305664,
750
+ 345.7341613769531
751
+ ],
752
+ [
753
+ 110.9838638305664,
754
+ 357.6893615722656
755
+ ],
756
+ [
757
+ 52.59375,
758
+ 357.6893615722656
759
+ ]
760
+ ]
761
+ },
762
+ {
763
+ "title": "Krause Synchronization Transformers",
764
+ "heading_level": null,
765
+ "page_id": 12,
766
+ "polygon": [
767
+ [
768
+ 52.59375,
769
+ 67.84710693359375
770
+ ],
771
+ [
772
+ 253.40625,
773
+ 67.84710693359375
774
+ ],
775
+ [
776
+ 253.40625,
777
+ 79.80230712890625
778
+ ],
779
+ [
780
+ 52.59375,
781
+ 79.80230712890625
782
+ ]
783
+ ]
784
+ },
785
+ {
786
+ "title": "A. Additional Discussion and Limitations",
787
+ "heading_level": null,
788
+ "page_id": 13,
789
+ "polygon": [
790
+ [
791
+ 52.59375,
792
+ 67.5
793
+ ],
794
+ [
795
+ 266.25,
796
+ 67.5
797
+ ],
798
+ [
799
+ 266.25,
800
+ 77.25
801
+ ],
802
+ [
803
+ 52.59375,
804
+ 77.25
805
+ ]
806
+ ]
807
+ },
808
+ {
809
+ "title": "B. Extended Related Work",
810
+ "heading_level": null,
811
+ "page_id": 13,
812
+ "polygon": [
813
+ [
814
+ 53.7890625,
815
+ 295.5
816
+ ],
817
+ [
818
+ 192.75,
819
+ 295.5
820
+ ],
821
+ [
822
+ 192.75,
823
+ 305.25
824
+ ],
825
+ [
826
+ 53.7890625,
827
+ 305.25
828
+ ]
829
+ ]
830
+ },
831
+ {
832
+ "title": "C. Multi-Cluster Behavior of Krause Attention",
833
+ "heading_level": null,
834
+ "page_id": 13,
835
+ "polygon": [
836
+ [
837
+ 54.0,
838
+ 552.0
839
+ ],
840
+ [
841
+ 294.75,
842
+ 552.0
843
+ ],
844
+ [
845
+ 294.75,
846
+ 561.75
847
+ ],
848
+ [
849
+ 54.0,
850
+ 561.75
851
+ ]
852
+ ]
853
+ },
854
+ {
855
+ "title": "C.1. Multi-Cluster Formation under Clustering Emergence Dynamics",
856
+ "heading_level": null,
857
+ "page_id": 13,
858
+ "polygon": [
859
+ [
860
+ 54.0,
861
+ 645.43359375
862
+ ],
863
+ [
864
+ 352.5,
865
+ 645.43359375
866
+ ],
867
+ [
868
+ 352.5,
869
+ 654.75
870
+ ],
871
+ [
872
+ 54.0,
873
+ 654.75
874
+ ]
875
+ ]
876
+ },
877
+ {
878
+ "title": "C.2. Multi-Cluster Formation under Mean-Field Dynamics",
879
+ "heading_level": null,
880
+ "page_id": 15,
881
+ "polygon": [
882
+ [
883
+ 51.99609375,
884
+ 69.0
885
+ ],
886
+ [
887
+ 306.0,
888
+ 69.0
889
+ ],
890
+ [
891
+ 306.0,
892
+ 78.890625
893
+ ],
894
+ [
895
+ 51.99609375,
896
+ 78.890625
897
+ ]
898
+ ]
899
+ },
900
+ {
901
+ "title": "D. Extended Experiment Results",
902
+ "heading_level": null,
903
+ "page_id": 16,
904
+ "polygon": [
905
+ [
906
+ 53.7890625,
907
+ 583.5
908
+ ],
909
+ [
910
+ 222.75,
911
+ 583.5
912
+ ],
913
+ [
914
+ 222.75,
915
+ 594.0
916
+ ],
917
+ [
918
+ 53.7890625,
919
+ 594.0
920
+ ]
921
+ ]
922
+ },
923
+ {
924
+ "title": "D.1. Implementation Details",
925
+ "heading_level": null,
926
+ "page_id": 16,
927
+ "polygon": [
928
+ [
929
+ 53.19140625,
930
+ 603.0
931
+ ],
932
+ [
933
+ 174.75,
934
+ 603.0
935
+ ],
936
+ [
937
+ 174.75,
938
+ 613.72265625
939
+ ],
940
+ [
941
+ 53.19140625,
942
+ 613.72265625
943
+ ]
944
+ ]
945
+ },
946
+ {
947
+ "title": "D.1.1. VISION TRANSFORMERS",
948
+ "heading_level": null,
949
+ "page_id": 16,
950
+ "polygon": [
951
+ [
952
+ 53.7890625,
953
+ 620.68359375
954
+ ],
955
+ [
956
+ 190.0546875,
957
+ 620.68359375
958
+ ],
959
+ [
960
+ 190.0546875,
961
+ 630.75
962
+ ],
963
+ [
964
+ 53.7890625,
965
+ 630.75
966
+ ]
967
+ ]
968
+ },
969
+ {
970
+ "title": "D.1.2. AUTOREGRESSIVE TRANSFORMERS",
971
+ "heading_level": null,
972
+ "page_id": 16,
973
+ "polygon": [
974
+ [
975
+ 54.0,
976
+ 676.37109375
977
+ ],
978
+ [
979
+ 237.0,
980
+ 676.37109375
981
+ ],
982
+ [
983
+ 237.0,
984
+ 685.5
985
+ ],
986
+ [
987
+ 54.0,
988
+ 685.5
989
+ ]
990
+ ]
991
+ },
992
+ {
993
+ "title": "D.1.3. LLMs finetuned with Lora",
994
+ "heading_level": null,
995
+ "page_id": 17,
996
+ "polygon": [
997
+ [
998
+ 54.0,
999
+ 314.25
1000
+ ],
1001
+ [
1002
+ 219.75,
1003
+ 314.25
1004
+ ],
1005
+ [
1006
+ 219.75,
1007
+ 323.25
1008
+ ],
1009
+ [
1010
+ 54.0,
1011
+ 323.25
1012
+ ]
1013
+ ]
1014
+ },
1015
+ {
1016
+ "title": "D.1.4. EVALUATION DATASETS AND PROMPTS FOR LLMS",
1017
+ "heading_level": null,
1018
+ "page_id": 17,
1019
+ "polygon": [
1020
+ [
1021
+ 53.7890625,
1022
+ 404.12109375
1023
+ ],
1024
+ [
1025
+ 302.25,
1026
+ 404.12109375
1027
+ ],
1028
+ [
1029
+ 302.25,
1030
+ 412.62890625
1031
+ ],
1032
+ [
1033
+ 53.7890625,
1034
+ 412.62890625
1035
+ ]
1036
+ ]
1037
+ },
1038
+ {
1039
+ "title": "D.2. Vision Transformers for Image Recognition",
1040
+ "heading_level": null,
1041
+ "page_id": 17,
1042
+ "polygon": [
1043
+ [
1044
+ 54.0,
1045
+ 501.75
1046
+ ],
1047
+ [
1048
+ 261.0,
1049
+ 501.75
1050
+ ],
1051
+ [
1052
+ 261.0,
1053
+ 511.62890625
1054
+ ],
1055
+ [
1056
+ 54.0,
1057
+ 511.62890625
1058
+ ]
1059
+ ]
1060
+ },
1061
+ {
1062
+ "title": "D.2.1. RESULTS ON FASHION-MNIST",
1063
+ "heading_level": null,
1064
+ "page_id": 17,
1065
+ "polygon": [
1066
+ [
1067
+ 54.0,
1068
+ 518.58984375
1069
+ ],
1070
+ [
1071
+ 218.25,
1072
+ 518.58984375
1073
+ ],
1074
+ [
1075
+ 218.25,
1076
+ 528.75
1077
+ ],
1078
+ [
1079
+ 54.0,
1080
+ 528.75
1081
+ ]
1082
+ ]
1083
+ },
1084
+ {
1085
+ "title": "D.2.2. QUALITATIVE RESULTS OF ATTENTION HEATMAPS",
1086
+ "heading_level": null,
1087
+ "page_id": 17,
1088
+ "polygon": [
1089
+ [
1090
+ 54.0,
1091
+ 598.5
1092
+ ],
1093
+ [
1094
+ 302.25,
1095
+ 598.5
1096
+ ],
1097
+ [
1098
+ 302.25,
1099
+ 607.53515625
1100
+ ],
1101
+ [
1102
+ 54.0,
1103
+ 607.53515625
1104
+ ]
1105
+ ]
1106
+ },
1107
+ {
1108
+ "title": "D.2.3. ABLATION STUDIES",
1109
+ "heading_level": null,
1110
+ "page_id": 18,
1111
+ "polygon": [
1112
+ [
1113
+ 54.0,
1114
+ 359.25
1115
+ ],
1116
+ [
1117
+ 171.0,
1118
+ 359.25
1119
+ ],
1120
+ [
1121
+ 171.0,
1122
+ 368.25
1123
+ ],
1124
+ [
1125
+ 54.0,
1126
+ 368.25
1127
+ ]
1128
+ ]
1129
+ },
1130
+ {
1131
+ "title": "D.2.4. KRAUSE SWINTRANSFORMERS",
1132
+ "heading_level": null,
1133
+ "page_id": 18,
1134
+ "polygon": [
1135
+ [
1136
+ 53.19140625,
1137
+ 622.5
1138
+ ],
1139
+ [
1140
+ 218.25,
1141
+ 622.5
1142
+ ],
1143
+ [
1144
+ 218.25,
1145
+ 632.28515625
1146
+ ],
1147
+ [
1148
+ 53.19140625,
1149
+ 632.28515625
1150
+ ]
1151
+ ]
1152
+ },
1153
+ {
1154
+ "title": "D.3. Autoregressive Models for Image Generation",
1155
+ "heading_level": null,
1156
+ "page_id": 20,
1157
+ "polygon": [
1158
+ [
1159
+ 55.44000244140625,
1160
+ 447.43359375
1161
+ ],
1162
+ [
1163
+ 265.6010437011719,
1164
+ 447.43359375
1165
+ ],
1166
+ [
1167
+ 265.6010437011719,
1168
+ 458.39910888671875
1169
+ ],
1170
+ [
1171
+ 55.44000244140625,
1172
+ 458.39910888671875
1173
+ ]
1174
+ ]
1175
+ },
1176
+ {
1177
+ "title": "D.3.1. QUALITATIVE EXAMPLES",
1178
+ "heading_level": null,
1179
+ "page_id": 20,
1180
+ "polygon": [
1181
+ [
1182
+ 54.685546875,
1183
+ 465.99609375
1184
+ ],
1185
+ [
1186
+ 193.40028381347656,
1187
+ 465.99609375
1188
+ ],
1189
+ [
1190
+ 193.40028381347656,
1191
+ 477.1430969238281
1192
+ ],
1193
+ [
1194
+ 54.685546875,
1195
+ 477.1430969238281
1196
+ ]
1197
+ ]
1198
+ },
1199
+ {
1200
+ "title": "D.3.2. ABLATION STUDIES",
1201
+ "heading_level": null,
1202
+ "page_id": 20,
1203
+ "polygon": [
1204
+ [
1205
+ 53.19140625,
1206
+ 545.3904571533203
1207
+ ],
1208
+ [
1209
+ 170.33880615234375,
1210
+ 545.3904571533203
1211
+ ],
1212
+ [
1213
+ 170.33880615234375,
1214
+ 555.3530578613281
1215
+ ],
1216
+ [
1217
+ 53.19140625,
1218
+ 555.3530578613281
1219
+ ]
1220
+ ]
1221
+ },
1222
+ {
1223
+ "title": "D.4. LLMs for Language Understanding and Reasoning",
1224
+ "heading_level": null,
1225
+ "page_id": 21,
1226
+ "polygon": [
1227
+ [
1228
+ 52.59375,
1229
+ 496.759521484375
1230
+ ],
1231
+ [
1232
+ 292.4601745605469,
1233
+ 496.759521484375
1234
+ ],
1235
+ [
1236
+ 292.4601745605469,
1237
+ 506.72210693359375
1238
+ ],
1239
+ [
1240
+ 52.59375,
1241
+ 506.72210693359375
1242
+ ]
1243
+ ]
1244
+ },
1245
+ {
1246
+ "title": "D.4.1. RESULTS ON KRAUSE-QWEN",
1247
+ "heading_level": null,
1248
+ "page_id": 21,
1249
+ "polygon": [
1250
+ [
1251
+ 53.490234375,
1252
+ 515.49609375
1253
+ ],
1254
+ [
1255
+ 208.33421325683594,
1256
+ 515.49609375
1257
+ ],
1258
+ [
1259
+ 208.33421325683594,
1260
+ 525.465087890625
1261
+ ],
1262
+ [
1263
+ 53.490234375,
1264
+ 525.465087890625
1265
+ ]
1266
+ ]
1267
+ },
1268
+ {
1269
+ "title": "D.4.2. EXTENDED RESULTS ON KRAUSE-LLAMA",
1270
+ "heading_level": null,
1271
+ "page_id": 22,
1272
+ "polygon": [
1273
+ [
1274
+ 53.7890625,
1275
+ 592.83984375
1276
+ ],
1277
+ [
1278
+ 261.8395080566406,
1279
+ 592.83984375
1280
+ ],
1281
+ [
1282
+ 261.8395080566406,
1283
+ 603.1739959716797
1284
+ ],
1285
+ [
1286
+ 53.7890625,
1287
+ 603.1739959716797
1288
+ ]
1289
+ ]
1290
+ },
1291
+ {
1292
+ "title": "D.4.3. INFERENCE SPEED COMPARISON",
1293
+ "heading_level": null,
1294
+ "page_id": 23,
1295
+ "polygon": [
1296
+ [
1297
+ 53.7890625,
1298
+ 317.8828125
1299
+ ],
1300
+ [
1301
+ 224.92001342773438,
1302
+ 317.8828125
1303
+ ],
1304
+ [
1305
+ 224.92001342773438,
1306
+ 328.3390808105469
1307
+ ],
1308
+ [
1309
+ 53.7890625,
1310
+ 328.3390808105469
1311
+ ]
1312
+ ]
1313
+ },
1314
+ {
1315
+ "title": "E. Theoretical Perspective on Attention Sink Phenomena",
1316
+ "heading_level": null,
1317
+ "page_id": 23,
1318
+ "polygon": [
1319
+ [
1320
+ 53.19140625,
1321
+ 555.0051879882812
1322
+ ],
1323
+ [
1324
+ 344.64825439453125,
1325
+ 555.0051879882812
1326
+ ],
1327
+ [
1328
+ 344.64825439453125,
1329
+ 566.9603881835938
1330
+ ],
1331
+ [
1332
+ 53.19140625,
1333
+ 566.9603881835938
1334
+ ]
1335
+ ]
1336
+ },
1337
+ {
1338
+ "title": "F. Relation to Competitive Learning and Lateral Interaction Dynamics",
1339
+ "heading_level": null,
1340
+ "page_id": 24,
1341
+ "polygon": [
1342
+ [
1343
+ 52.59375,
1344
+ 221.942138671875
1345
+ ],
1346
+ [
1347
+ 415.96875,
1348
+ 221.942138671875
1349
+ ],
1350
+ [
1351
+ 415.96875,
1352
+ 233.8973388671875
1353
+ ],
1354
+ [
1355
+ 52.59375,
1356
+ 233.8973388671875
1357
+ ]
1358
+ ]
1359
+ },
1360
+ {
1361
+ "title": "Krause Synchronization Transformers",
1362
+ "heading_level": null,
1363
+ "page_id": 25,
1364
+ "polygon": [
1365
+ [
1366
+ 223.5234375,
1367
+ 46.40625
1368
+ ],
1369
+ [
1370
+ 372.13568115234375,
1371
+ 46.40625
1372
+ ],
1373
+ [
1374
+ 372.13568115234375,
1375
+ 56.1944580078125
1376
+ ],
1377
+ [
1378
+ 223.5234375,
1379
+ 56.1944580078125
1380
+ ]
1381
+ ]
1382
+ },
1383
+ {
1384
+ "title": "",
1385
+ "heading_level": null,
1386
+ "page_id": 26,
1387
+ "polygon": [
1388
+ [
1389
+ 28.08984375,
1390
+ 104.4140625
1391
+ ],
1392
+ [
1393
+ 47.21484375,
1394
+ 104.4140625
1395
+ ],
1396
+ [
1397
+ 47.21484375,
1398
+ 126.0703125
1399
+ ],
1400
+ [
1401
+ 28.08984375,
1402
+ 126.0703125
1403
+ ]
1404
+ ]
1405
+ },
1406
+ {
1407
+ "title": "BoolQ Prompt Example:",
1408
+ "heading_level": null,
1409
+ "page_id": 26,
1410
+ "polygon": [
1411
+ [
1412
+ 69.029296875,
1413
+ 122.52349853515625
1414
+ ],
1415
+ [
1416
+ 175.44583129882812,
1417
+ 122.52349853515625
1418
+ ],
1419
+ [
1420
+ 175.44583129882812,
1421
+ 132.64453125
1422
+ ],
1423
+ [
1424
+ 69.029296875,
1425
+ 132.64453125
1426
+ ]
1427
+ ]
1428
+ },
1429
+ {
1430
+ "title": "NLI Prompt Example:",
1431
+ "heading_level": null,
1432
+ "page_id": 26,
1433
+ "polygon": [
1434
+ [
1435
+ 69.328125,
1436
+ 286.55859375
1437
+ ],
1438
+ [
1439
+ 166.1484375,
1440
+ 286.55859375
1441
+ ],
1442
+ [
1443
+ 166.1484375,
1444
+ 298.16015625
1445
+ ],
1446
+ [
1447
+ 69.328125,
1448
+ 298.16015625
1449
+ ]
1450
+ ]
1451
+ },
1452
+ {
1453
+ "title": "PIQA Prompt Example:",
1454
+ "heading_level": null,
1455
+ "page_id": 26,
1456
+ "polygon": [
1457
+ [
1458
+ 69.029296875,
1459
+ 381.55255126953125
1460
+ ],
1461
+ [
1462
+ 173.2241668701172,
1463
+ 381.55255126953125
1464
+ ],
1465
+ [
1466
+ 173.2241668701172,
1467
+ 391.74609375
1468
+ ],
1469
+ [
1470
+ 69.029296875,
1471
+ 391.74609375
1472
+ ]
1473
+ ]
1474
+ },
1475
+ {
1476
+ "title": "MMLU-Pro Prompt Example:",
1477
+ "heading_level": null,
1478
+ "page_id": 26,
1479
+ "polygon": [
1480
+ [
1481
+ 69.626953125,
1482
+ 487.156494140625
1483
+ ],
1484
+ [
1485
+ 199.5951690673828,
1486
+ 487.156494140625
1487
+ ],
1488
+ [
1489
+ 199.5951690673828,
1490
+ 497.11907958984375
1491
+ ],
1492
+ [
1493
+ 69.626953125,
1494
+ 497.11907958984375
1495
+ ]
1496
+ ]
1497
+ },
1498
+ {
1499
+ "title": "IFEval Prompt Example:",
1500
+ "heading_level": null,
1501
+ "page_id": 26,
1502
+ "polygon": [
1503
+ [
1504
+ 69.91200256347656,
1505
+ 580.46484375
1506
+ ],
1507
+ [
1508
+ 177.5579071044922,
1509
+ 580.46484375
1510
+ ],
1511
+ [
1512
+ 177.5579071044922,
1513
+ 590.7681274414062
1514
+ ],
1515
+ [
1516
+ 69.91200256347656,
1517
+ 590.7681274414062
1518
+ ]
1519
+ ]
1520
+ },
1521
+ {
1522
+ "title": "1485",
1523
+ "heading_level": null,
1524
+ "page_id": 27,
1525
+ "polygon": [
1526
+ [
1527
+ 25.552000045776367,
1528
+ 66.515625
1529
+ ],
1530
+ [
1531
+ 45.47719955444336,
1532
+ 66.515625
1533
+ ],
1534
+ [
1535
+ 45.47719955444336,
1536
+ 83.53125
1537
+ ],
1538
+ [
1539
+ 25.552000045776367,
1540
+ 83.53125
1541
+ ]
1542
+ ]
1543
+ },
1544
+ {
1545
+ "title": "1486\n1487",
1546
+ "heading_level": null,
1547
+ "page_id": 27,
1548
+ "polygon": [
1549
+ [
1550
+ 25.552000045776367,
1551
+ 72.703125
1552
+ ],
1553
+ [
1554
+ 45.47719955444336,
1555
+ 72.703125
1556
+ ],
1557
+ [
1558
+ 45.47719955444336,
1559
+ 103.04205322265625
1560
+ ],
1561
+ [
1562
+ 25.552000045776367,
1563
+ 103.04205322265625
1564
+ ]
1565
+ ]
1566
+ },
1567
+ {
1568
+ "title": "IFEval Answers:",
1569
+ "heading_level": null,
1570
+ "page_id": 27,
1571
+ "polygon": [
1572
+ [
1573
+ 68.73046875,
1574
+ 82.24456787109375
1575
+ ],
1576
+ [
1577
+ 141.4833221435547,
1578
+ 82.24456787109375
1579
+ ],
1580
+ [
1581
+ 141.4833221435547,
1582
+ 92.2071533203125
1583
+ ],
1584
+ [
1585
+ 68.73046875,
1586
+ 92.2071533203125
1587
+ ]
1588
+ ]
1589
+ }
1590
+ ],
1591
+ "page_stats": [
1592
+ {
1593
+ "page_id": 0,
1594
+ "text_extraction_method": "pdftext",
1595
+ "block_counts": [
1596
+ [
1597
+ "Span",
1598
+ 286
1599
+ ],
1600
+ [
1601
+ "Line",
1602
+ 133
1603
+ ],
1604
+ [
1605
+ "SectionHeader",
1606
+ 16
1607
+ ],
1608
+ [
1609
+ "Text",
1610
+ 9
1611
+ ],
1612
+ [
1613
+ "PageHeader",
1614
+ 1
1615
+ ],
1616
+ [
1617
+ "Footnote",
1618
+ 1
1619
+ ],
1620
+ [
1621
+ "Picture",
1622
+ 1
1623
+ ],
1624
+ [
1625
+ "Caption",
1626
+ 1
1627
+ ],
1628
+ [
1629
+ "PageFooter",
1630
+ 1
1631
+ ],
1632
+ [
1633
+ "PictureGroup",
1634
+ 1
1635
+ ],
1636
+ [
1637
+ "Reference",
1638
+ 1
1639
+ ]
1640
+ ],
1641
+ "block_metadata": {
1642
+ "llm_request_count": 0,
1643
+ "llm_error_count": 0,
1644
+ "llm_tokens_used": 0,
1645
+ "previous_text": "",
1646
+ "previous_type": "",
1647
+ "previous_order": 0
1648
+ }
1649
+ },
1650
+ {
1651
+ "page_id": 1,
1652
+ "text_extraction_method": "pdftext",
1653
+ "block_counts": [
1654
+ [
1655
+ "Span",
1656
+ 525
1657
+ ],
1658
+ [
1659
+ "Line",
1660
+ 156
1661
+ ],
1662
+ [
1663
+ "Text",
1664
+ 11
1665
+ ],
1666
+ [
1667
+ "PageHeader",
1668
+ 1
1669
+ ],
1670
+ [
1671
+ "SectionHeader",
1672
+ 1
1673
+ ],
1674
+ [
1675
+ "PageFooter",
1676
+ 1
1677
+ ]
1678
+ ],
1679
+ "block_metadata": {
1680
+ "llm_request_count": 0,
1681
+ "llm_error_count": 0,
1682
+ "llm_tokens_used": 0,
1683
+ "previous_text": "",
1684
+ "previous_type": "",
1685
+ "previous_order": 0
1686
+ }
1687
+ },
1688
+ {
1689
+ "page_id": 2,
1690
+ "text_extraction_method": "surya",
1691
+ "block_counts": [
1692
+ [
1693
+ "Line",
1694
+ 216
1695
+ ],
1696
+ [
1697
+ "Span",
1698
+ 138
1699
+ ],
1700
+ [
1701
+ "Text",
1702
+ 62
1703
+ ],
1704
+ [
1705
+ "SectionHeader",
1706
+ 5
1707
+ ],
1708
+ [
1709
+ "Equation",
1710
+ 5
1711
+ ],
1712
+ [
1713
+ "Reference",
1714
+ 2
1715
+ ],
1716
+ [
1717
+ "PageHeader",
1718
+ 1
1719
+ ],
1720
+ [
1721
+ "PageFooter",
1722
+ 1
1723
+ ]
1724
+ ],
1725
+ "block_metadata": {
1726
+ "llm_request_count": 0,
1727
+ "llm_error_count": 0,
1728
+ "llm_tokens_used": 0,
1729
+ "previous_text": "",
1730
+ "previous_type": "",
1731
+ "previous_order": 0
1732
+ }
1733
+ },
1734
+ {
1735
+ "page_id": 3,
1736
+ "text_extraction_method": "surya",
1737
+ "block_counts": [
1738
+ [
1739
+ "Line",
1740
+ 147
1741
+ ],
1742
+ [
1743
+ "Span",
1744
+ 112
1745
+ ],
1746
+ [
1747
+ "Text",
1748
+ 20
1749
+ ],
1750
+ [
1751
+ "Reference",
1752
+ 5
1753
+ ],
1754
+ [
1755
+ "Equation",
1756
+ 3
1757
+ ],
1758
+ [
1759
+ "PageHeader",
1760
+ 2
1761
+ ],
1762
+ [
1763
+ "Figure",
1764
+ 1
1765
+ ],
1766
+ [
1767
+ "Caption",
1768
+ 1
1769
+ ],
1770
+ [
1771
+ "SectionHeader",
1772
+ 1
1773
+ ],
1774
+ [
1775
+ "PageFooter",
1776
+ 1
1777
+ ],
1778
+ [
1779
+ "FigureGroup",
1780
+ 1
1781
+ ]
1782
+ ],
1783
+ "block_metadata": {
1784
+ "llm_request_count": 0,
1785
+ "llm_error_count": 0,
1786
+ "llm_tokens_used": 0,
1787
+ "previous_text": "",
1788
+ "previous_type": "",
1789
+ "previous_order": 0
1790
+ }
1791
+ },
1792
+ {
1793
+ "page_id": 4,
1794
+ "text_extraction_method": "surya",
1795
+ "block_counts": [
1796
+ [
1797
+ "Span",
1798
+ 176
1799
+ ],
1800
+ [
1801
+ "Line",
1802
+ 145
1803
+ ],
1804
+ [
1805
+ "Text",
1806
+ 73
1807
+ ],
1808
+ [
1809
+ "Equation",
1810
+ 8
1811
+ ],
1812
+ [
1813
+ "Reference",
1814
+ 4
1815
+ ],
1816
+ [
1817
+ "SectionHeader",
1818
+ 3
1819
+ ],
1820
+ [
1821
+ "PageHeader",
1822
+ 1
1823
+ ],
1824
+ [
1825
+ "Code",
1826
+ 1
1827
+ ],
1828
+ [
1829
+ "PageFooter",
1830
+ 1
1831
+ ]
1832
+ ],
1833
+ "block_metadata": {
1834
+ "llm_request_count": 0,
1835
+ "llm_error_count": 0,
1836
+ "llm_tokens_used": 0,
1837
+ "previous_text": "",
1838
+ "previous_type": "",
1839
+ "previous_order": 0
1840
+ }
1841
+ },
1842
+ {
1843
+ "page_id": 5,
1844
+ "text_extraction_method": "surya",
1845
+ "block_counts": [
1846
+ [
1847
+ "Span",
1848
+ 171
1849
+ ],
1850
+ [
1851
+ "Line",
1852
+ 145
1853
+ ],
1854
+ [
1855
+ "Text",
1856
+ 28
1857
+ ],
1858
+ [
1859
+ "TableCell",
1860
+ 28
1861
+ ],
1862
+ [
1863
+ "SectionHeader",
1864
+ 3
1865
+ ],
1866
+ [
1867
+ "Reference",
1868
+ 2
1869
+ ],
1870
+ [
1871
+ "PageHeader",
1872
+ 1
1873
+ ],
1874
+ [
1875
+ "Caption",
1876
+ 1
1877
+ ],
1878
+ [
1879
+ "Table",
1880
+ 1
1881
+ ],
1882
+ [
1883
+ "Footnote",
1884
+ 1
1885
+ ],
1886
+ [
1887
+ "PageFooter",
1888
+ 1
1889
+ ],
1890
+ [
1891
+ "TableGroup",
1892
+ 1
1893
+ ]
1894
+ ],
1895
+ "block_metadata": {
1896
+ "llm_request_count": 0,
1897
+ "llm_error_count": 0,
1898
+ "llm_tokens_used": 0,
1899
+ "previous_text": "",
1900
+ "previous_type": "",
1901
+ "previous_order": 0
1902
+ }
1903
+ },
1904
+ {
1905
+ "page_id": 6,
1906
+ "text_extraction_method": "pdftext",
1907
+ "block_counts": [
1908
+ [
1909
+ "Span",
1910
+ 444
1911
+ ],
1912
+ [
1913
+ "Line",
1914
+ 122
1915
+ ],
1916
+ [
1917
+ "TableCell",
1918
+ 96
1919
+ ],
1920
+ [
1921
+ "Text",
1922
+ 8
1923
+ ],
1924
+ [
1925
+ "Caption",
1926
+ 6
1927
+ ],
1928
+ [
1929
+ "Reference",
1930
+ 6
1931
+ ],
1932
+ [
1933
+ "Table",
1934
+ 4
1935
+ ],
1936
+ [
1937
+ "TableGroup",
1938
+ 4
1939
+ ],
1940
+ [
1941
+ "PageHeader",
1942
+ 2
1943
+ ],
1944
+ [
1945
+ "Figure",
1946
+ 1
1947
+ ],
1948
+ [
1949
+ "SectionHeader",
1950
+ 1
1951
+ ],
1952
+ [
1953
+ "Picture",
1954
+ 1
1955
+ ],
1956
+ [
1957
+ "PageFooter",
1958
+ 1
1959
+ ],
1960
+ [
1961
+ "FigureGroup",
1962
+ 1
1963
+ ],
1964
+ [
1965
+ "PictureGroup",
1966
+ 1
1967
+ ]
1968
+ ],
1969
+ "block_metadata": {
1970
+ "llm_request_count": 0,
1971
+ "llm_error_count": 0,
1972
+ "llm_tokens_used": 0,
1973
+ "previous_text": "",
1974
+ "previous_type": "",
1975
+ "previous_order": 0
1976
+ }
1977
+ },
1978
+ {
1979
+ "page_id": 7,
1980
+ "text_extraction_method": "pdftext",
1981
+ "block_counts": [
1982
+ [
1983
+ "Span",
1984
+ 343
1985
+ ],
1986
+ [
1987
+ "Line",
1988
+ 142
1989
+ ],
1990
+ [
1991
+ "Text",
1992
+ 12
1993
+ ],
1994
+ [
1995
+ "PageHeader",
1996
+ 6
1997
+ ],
1998
+ [
1999
+ "Figure",
2000
+ 3
2001
+ ],
2002
+ [
2003
+ "Caption",
2004
+ 3
2005
+ ],
2006
+ [
2007
+ "FigureGroup",
2008
+ 3
2009
+ ],
2010
+ [
2011
+ "Reference",
2012
+ 3
2013
+ ],
2014
+ [
2015
+ "SectionHeader",
2016
+ 2
2017
+ ],
2018
+ [
2019
+ "PageFooter",
2020
+ 1
2021
+ ]
2022
+ ],
2023
+ "block_metadata": {
2024
+ "llm_request_count": 0,
2025
+ "llm_error_count": 0,
2026
+ "llm_tokens_used": 0,
2027
+ "previous_text": "",
2028
+ "previous_type": "",
2029
+ "previous_order": 0
2030
+ }
2031
+ },
2032
+ {
2033
+ "page_id": 8,
2034
+ "text_extraction_method": "pdftext",
2035
+ "block_counts": [
2036
+ [
2037
+ "Span",
2038
+ 441
2039
+ ],
2040
+ [
2041
+ "Line",
2042
+ 141
2043
+ ],
2044
+ [
2045
+ "PageHeader",
2046
+ 19
2047
+ ],
2048
+ [
2049
+ "ListItem",
2050
+ 16
2051
+ ],
2052
+ [
2053
+ "Reference",
2054
+ 16
2055
+ ],
2056
+ [
2057
+ "Text",
2058
+ 6
2059
+ ],
2060
+ [
2061
+ "ListGroup",
2062
+ 2
2063
+ ],
2064
+ [
2065
+ "SectionHeader",
2066
+ 1
2067
+ ],
2068
+ [
2069
+ "PageFooter",
2070
+ 1
2071
+ ]
2072
+ ],
2073
+ "block_metadata": {
2074
+ "llm_request_count": 0,
2075
+ "llm_error_count": 0,
2076
+ "llm_tokens_used": 0,
2077
+ "previous_text": "",
2078
+ "previous_type": "",
2079
+ "previous_order": 0
2080
+ }
2081
+ },
2082
+ {
2083
+ "page_id": 9,
2084
+ "text_extraction_method": "pdftext",
2085
+ "block_counts": [
2086
+ [
2087
+ "Span",
2088
+ 375
2089
+ ],
2090
+ [
2091
+ "Line",
2092
+ 138
2093
+ ],
2094
+ [
2095
+ "ListItem",
2096
+ 22
2097
+ ],
2098
+ [
2099
+ "Reference",
2100
+ 22
2101
+ ],
2102
+ [
2103
+ "Text",
2104
+ 10
2105
+ ],
2106
+ [
2107
+ "ListGroup",
2108
+ 3
2109
+ ],
2110
+ [
2111
+ "PageHeader",
2112
+ 1
2113
+ ],
2114
+ [
2115
+ "PageFooter",
2116
+ 1
2117
+ ]
2118
+ ],
2119
+ "block_metadata": {
2120
+ "llm_request_count": 0,
2121
+ "llm_error_count": 0,
2122
+ "llm_tokens_used": 0,
2123
+ "previous_text": "",
2124
+ "previous_type": "",
2125
+ "previous_order": 0
2126
+ }
2127
+ },
2128
+ {
2129
+ "page_id": 10,
2130
+ "text_extraction_method": "pdftext",
2131
+ "block_counts": [
2132
+ [
2133
+ "Span",
2134
+ 389
2135
+ ],
2136
+ [
2137
+ "Line",
2138
+ 138
2139
+ ],
2140
+ [
2141
+ "ListItem",
2142
+ 22
2143
+ ],
2144
+ [
2145
+ "Reference",
2146
+ 22
2147
+ ],
2148
+ [
2149
+ "Text",
2150
+ 10
2151
+ ],
2152
+ [
2153
+ "ListGroup",
2154
+ 2
2155
+ ],
2156
+ [
2157
+ "PageHeader",
2158
+ 1
2159
+ ],
2160
+ [
2161
+ "PageFooter",
2162
+ 1
2163
+ ]
2164
+ ],
2165
+ "block_metadata": {
2166
+ "llm_request_count": 0,
2167
+ "llm_error_count": 0,
2168
+ "llm_tokens_used": 0,
2169
+ "previous_text": "",
2170
+ "previous_type": "",
2171
+ "previous_order": 0
2172
+ }
2173
+ },
2174
+ {
2175
+ "page_id": 11,
2176
+ "text_extraction_method": "pdftext",
2177
+ "block_counts": [
2178
+ [
2179
+ "Span",
2180
+ 408
2181
+ ],
2182
+ [
2183
+ "Line",
2184
+ 147
2185
+ ],
2186
+ [
2187
+ "ListItem",
2188
+ 22
2189
+ ],
2190
+ [
2191
+ "Reference",
2192
+ 22
2193
+ ],
2194
+ [
2195
+ "Text",
2196
+ 3
2197
+ ],
2198
+ [
2199
+ "ListGroup",
2200
+ 2
2201
+ ],
2202
+ [
2203
+ "PageHeader",
2204
+ 1
2205
+ ],
2206
+ [
2207
+ "PageFooter",
2208
+ 1
2209
+ ]
2210
+ ],
2211
+ "block_metadata": {
2212
+ "llm_request_count": 0,
2213
+ "llm_error_count": 0,
2214
+ "llm_tokens_used": 0,
2215
+ "previous_text": "",
2216
+ "previous_type": "",
2217
+ "previous_order": 0
2218
+ }
2219
+ },
2220
+ {
2221
+ "page_id": 12,
2222
+ "text_extraction_method": "pdftext",
2223
+ "block_counts": [
2224
+ [
2225
+ "Span",
2226
+ 255
2227
+ ],
2228
+ [
2229
+ "TableCell",
2230
+ 100
2231
+ ],
2232
+ [
2233
+ "Line",
2234
+ 78
2235
+ ],
2236
+ [
2237
+ "ListItem",
2238
+ 26
2239
+ ],
2240
+ [
2241
+ "PageHeader",
2242
+ 1
2243
+ ],
2244
+ [
2245
+ "TableOfContents",
2246
+ 1
2247
+ ],
2248
+ [
2249
+ "SectionHeader",
2250
+ 1
2251
+ ],
2252
+ [
2253
+ "PageFooter",
2254
+ 1
2255
+ ],
2256
+ [
2257
+ "Text",
2258
+ 1
2259
+ ],
2260
+ [
2261
+ "ListGroup",
2262
+ 1
2263
+ ]
2264
+ ],
2265
+ "block_metadata": {
2266
+ "llm_request_count": 0,
2267
+ "llm_error_count": 0,
2268
+ "llm_tokens_used": 0,
2269
+ "previous_text": "",
2270
+ "previous_type": "",
2271
+ "previous_order": 0
2272
+ }
2273
+ },
2274
+ {
2275
+ "page_id": 13,
2276
+ "text_extraction_method": "surya",
2277
+ "block_counts": [
2278
+ [
2279
+ "Line",
2280
+ 161
2281
+ ],
2282
+ [
2283
+ "Span",
2284
+ 79
2285
+ ],
2286
+ [
2287
+ "Text",
2288
+ 53
2289
+ ],
2290
+ [
2291
+ "SectionHeader",
2292
+ 4
2293
+ ],
2294
+ [
2295
+ "Reference",
2296
+ 4
2297
+ ],
2298
+ [
2299
+ "PageHeader",
2300
+ 1
2301
+ ],
2302
+ [
2303
+ "Equation",
2304
+ 1
2305
+ ],
2306
+ [
2307
+ "PageFooter",
2308
+ 1
2309
+ ]
2310
+ ],
2311
+ "block_metadata": {
2312
+ "llm_request_count": 0,
2313
+ "llm_error_count": 0,
2314
+ "llm_tokens_used": 0,
2315
+ "previous_text": "",
2316
+ "previous_type": "",
2317
+ "previous_order": 0
2318
+ }
2319
+ },
2320
+ {
2321
+ "page_id": 14,
2322
+ "text_extraction_method": "surya",
2323
+ "block_counts": [
2324
+ [
2325
+ "Line",
2326
+ 168
2327
+ ],
2328
+ [
2329
+ "Span",
2330
+ 159
2331
+ ],
2332
+ [
2333
+ "Text",
2334
+ 55
2335
+ ],
2336
+ [
2337
+ "Equation",
2338
+ 6
2339
+ ],
2340
+ [
2341
+ "PageHeader",
2342
+ 1
2343
+ ],
2344
+ [
2345
+ "PageFooter",
2346
+ 1
2347
+ ],
2348
+ [
2349
+ "Reference",
2350
+ 1
2351
+ ]
2352
+ ],
2353
+ "block_metadata": {
2354
+ "llm_request_count": 0,
2355
+ "llm_error_count": 0,
2356
+ "llm_tokens_used": 0,
2357
+ "previous_text": "",
2358
+ "previous_type": "",
2359
+ "previous_order": 0
2360
+ }
2361
+ },
2362
+ {
2363
+ "page_id": 15,
2364
+ "text_extraction_method": "surya",
2365
+ "block_counts": [
2366
+ [
2367
+ "Line",
2368
+ 161
2369
+ ],
2370
+ [
2371
+ "Span",
2372
+ 104
2373
+ ],
2374
+ [
2375
+ "Text",
2376
+ 51
2377
+ ],
2378
+ [
2379
+ "Equation",
2380
+ 9
2381
+ ],
2382
+ [
2383
+ "PageHeader",
2384
+ 1
2385
+ ],
2386
+ [
2387
+ "SectionHeader",
2388
+ 1
2389
+ ],
2390
+ [
2391
+ "PageFooter",
2392
+ 1
2393
+ ],
2394
+ [
2395
+ "Reference",
2396
+ 1
2397
+ ]
2398
+ ],
2399
+ "block_metadata": {
2400
+ "llm_request_count": 0,
2401
+ "llm_error_count": 0,
2402
+ "llm_tokens_used": 0,
2403
+ "previous_text": "",
2404
+ "previous_type": "",
2405
+ "previous_order": 0
2406
+ }
2407
+ },
2408
+ {
2409
+ "page_id": 16,
2410
+ "text_extraction_method": "surya",
2411
+ "block_counts": [
2412
+ [
2413
+ "Line",
2414
+ 155
2415
+ ],
2416
+ [
2417
+ "Span",
2418
+ 113
2419
+ ],
2420
+ [
2421
+ "Text",
2422
+ 44
2423
+ ],
2424
+ [
2425
+ "Equation",
2426
+ 6
2427
+ ],
2428
+ [
2429
+ "SectionHeader",
2430
+ 4
2431
+ ],
2432
+ [
2433
+ "Reference",
2434
+ 4
2435
+ ],
2436
+ [
2437
+ "PageHeader",
2438
+ 1
2439
+ ],
2440
+ [
2441
+ "PageFooter",
2442
+ 1
2443
+ ]
2444
+ ],
2445
+ "block_metadata": {
2446
+ "llm_request_count": 0,
2447
+ "llm_error_count": 0,
2448
+ "llm_tokens_used": 0,
2449
+ "previous_text": "",
2450
+ "previous_type": "",
2451
+ "previous_order": 0
2452
+ }
2453
+ },
2454
+ {
2455
+ "page_id": 17,
2456
+ "text_extraction_method": "surya",
2457
+ "block_counts": [
2458
+ [
2459
+ "Line",
2460
+ 215
2461
+ ],
2462
+ [
2463
+ "Span",
2464
+ 146
2465
+ ],
2466
+ [
2467
+ "TableCell",
2468
+ 72
2469
+ ],
2470
+ [
2471
+ "Text",
2472
+ 50
2473
+ ],
2474
+ [
2475
+ "Reference",
2476
+ 6
2477
+ ],
2478
+ [
2479
+ "SectionHeader",
2480
+ 5
2481
+ ],
2482
+ [
2483
+ "Caption",
2484
+ 3
2485
+ ],
2486
+ [
2487
+ "PageHeader",
2488
+ 1
2489
+ ],
2490
+ [
2491
+ "Table",
2492
+ 1
2493
+ ],
2494
+ [
2495
+ "PageFooter",
2496
+ 1
2497
+ ],
2498
+ [
2499
+ "TableGroup",
2500
+ 1
2501
+ ]
2502
+ ],
2503
+ "block_metadata": {
2504
+ "llm_request_count": 0,
2505
+ "llm_error_count": 0,
2506
+ "llm_tokens_used": 0,
2507
+ "previous_text": "",
2508
+ "previous_type": "",
2509
+ "previous_order": 0
2510
+ }
2511
+ },
2512
+ {
2513
+ "page_id": 18,
2514
+ "text_extraction_method": "surya",
2515
+ "block_counts": [
2516
+ [
2517
+ "Line",
2518
+ 114
2519
+ ],
2520
+ [
2521
+ "TableCell",
2522
+ 110
2523
+ ],
2524
+ [
2525
+ "Span",
2526
+ 63
2527
+ ],
2528
+ [
2529
+ "Text",
2530
+ 26
2531
+ ],
2532
+ [
2533
+ "Reference",
2534
+ 3
2535
+ ],
2536
+ [
2537
+ "PageHeader",
2538
+ 2
2539
+ ],
2540
+ [
2541
+ "Caption",
2542
+ 2
2543
+ ],
2544
+ [
2545
+ "Table",
2546
+ 2
2547
+ ],
2548
+ [
2549
+ "SectionHeader",
2550
+ 2
2551
+ ],
2552
+ [
2553
+ "PageFooter",
2554
+ 1
2555
+ ]
2556
+ ],
2557
+ "block_metadata": {
2558
+ "llm_request_count": 0,
2559
+ "llm_error_count": 0,
2560
+ "llm_tokens_used": 0,
2561
+ "previous_text": "",
2562
+ "previous_type": "",
2563
+ "previous_order": 0
2564
+ }
2565
+ },
2566
+ {
2567
+ "page_id": 19,
2568
+ "text_extraction_method": "pdftext",
2569
+ "block_counts": [
2570
+ [
2571
+ "Span",
2572
+ 280
2573
+ ],
2574
+ [
2575
+ "Line",
2576
+ 93
2577
+ ],
2578
+ [
2579
+ "TableCell",
2580
+ 74
2581
+ ],
2582
+ [
2583
+ "Text",
2584
+ 12
2585
+ ],
2586
+ [
2587
+ "Caption",
2588
+ 2
2589
+ ],
2590
+ [
2591
+ "Table",
2592
+ 2
2593
+ ],
2594
+ [
2595
+ "Reference",
2596
+ 2
2597
+ ],
2598
+ [
2599
+ "PageHeader",
2600
+ 1
2601
+ ],
2602
+ [
2603
+ "PageFooter",
2604
+ 1
2605
+ ],
2606
+ [
2607
+ "TableGroup",
2608
+ 1
2609
+ ]
2610
+ ],
2611
+ "block_metadata": {
2612
+ "llm_request_count": 0,
2613
+ "llm_error_count": 0,
2614
+ "llm_tokens_used": 0,
2615
+ "previous_text": "",
2616
+ "previous_type": "",
2617
+ "previous_order": 0
2618
+ }
2619
+ },
2620
+ {
2621
+ "page_id": 20,
2622
+ "text_extraction_method": "pdftext",
2623
+ "block_counts": [
2624
+ [
2625
+ "Span",
2626
+ 276
2627
+ ],
2628
+ [
2629
+ "Line",
2630
+ 99
2631
+ ],
2632
+ [
2633
+ "TableCell",
2634
+ 45
2635
+ ],
2636
+ [
2637
+ "Text",
2638
+ 5
2639
+ ],
2640
+ [
2641
+ "Reference",
2642
+ 5
2643
+ ],
2644
+ [
2645
+ "SectionHeader",
2646
+ 3
2647
+ ],
2648
+ [
2649
+ "Caption",
2650
+ 2
2651
+ ],
2652
+ [
2653
+ "PageHeader",
2654
+ 1
2655
+ ],
2656
+ [
2657
+ "Figure",
2658
+ 1
2659
+ ],
2660
+ [
2661
+ "Table",
2662
+ 1
2663
+ ],
2664
+ [
2665
+ "PageFooter",
2666
+ 1
2667
+ ],
2668
+ [
2669
+ "FigureGroup",
2670
+ 1
2671
+ ],
2672
+ [
2673
+ "TableGroup",
2674
+ 1
2675
+ ]
2676
+ ],
2677
+ "block_metadata": {
2678
+ "llm_request_count": 0,
2679
+ "llm_error_count": 0,
2680
+ "llm_tokens_used": 0,
2681
+ "previous_text": "",
2682
+ "previous_type": "",
2683
+ "previous_order": 0
2684
+ }
2685
+ },
2686
+ {
2687
+ "page_id": 21,
2688
+ "text_extraction_method": "pdftext",
2689
+ "block_counts": [
2690
+ [
2691
+ "Span",
2692
+ 226
2693
+ ],
2694
+ [
2695
+ "Line",
2696
+ 83
2697
+ ],
2698
+ [
2699
+ "TableCell",
2700
+ 20
2701
+ ],
2702
+ [
2703
+ "Text",
2704
+ 4
2705
+ ],
2706
+ [
2707
+ "Reference",
2708
+ 4
2709
+ ],
2710
+ [
2711
+ "Caption",
2712
+ 2
2713
+ ],
2714
+ [
2715
+ "SectionHeader",
2716
+ 2
2717
+ ],
2718
+ [
2719
+ "PageHeader",
2720
+ 1
2721
+ ],
2722
+ [
2723
+ "Figure",
2724
+ 1
2725
+ ],
2726
+ [
2727
+ "Table",
2728
+ 1
2729
+ ],
2730
+ [
2731
+ "PageFooter",
2732
+ 1
2733
+ ],
2734
+ [
2735
+ "FigureGroup",
2736
+ 1
2737
+ ],
2738
+ [
2739
+ "TableGroup",
2740
+ 1
2741
+ ]
2742
+ ],
2743
+ "block_metadata": {
2744
+ "llm_request_count": 0,
2745
+ "llm_error_count": 0,
2746
+ "llm_tokens_used": 0,
2747
+ "previous_text": "",
2748
+ "previous_type": "",
2749
+ "previous_order": 0
2750
+ }
2751
+ },
2752
+ {
2753
+ "page_id": 22,
2754
+ "text_extraction_method": "pdftext",
2755
+ "block_counts": [
2756
+ [
2757
+ "Span",
2758
+ 302
2759
+ ],
2760
+ [
2761
+ "Line",
2762
+ 98
2763
+ ],
2764
+ [
2765
+ "TableCell",
2766
+ 27
2767
+ ],
2768
+ [
2769
+ "Text",
2770
+ 5
2771
+ ],
2772
+ [
2773
+ "Caption",
2774
+ 4
2775
+ ],
2776
+ [
2777
+ "Reference",
2778
+ 4
2779
+ ],
2780
+ [
2781
+ "Figure",
2782
+ 2
2783
+ ],
2784
+ [
2785
+ "Table",
2786
+ 2
2787
+ ],
2788
+ [
2789
+ "FigureGroup",
2790
+ 2
2791
+ ],
2792
+ [
2793
+ "PageHeader",
2794
+ 1
2795
+ ],
2796
+ [
2797
+ "SectionHeader",
2798
+ 1
2799
+ ],
2800
+ [
2801
+ "PageFooter",
2802
+ 1
2803
+ ],
2804
+ [
2805
+ "TableGroup",
2806
+ 1
2807
+ ]
2808
+ ],
2809
+ "block_metadata": {
2810
+ "llm_request_count": 0,
2811
+ "llm_error_count": 0,
2812
+ "llm_tokens_used": 0,
2813
+ "previous_text": "",
2814
+ "previous_type": "",
2815
+ "previous_order": 0
2816
+ }
2817
+ },
2818
+ {
2819
+ "page_id": 23,
2820
+ "text_extraction_method": "pdftext",
2821
+ "block_counts": [
2822
+ [
2823
+ "Span",
2824
+ 497
2825
+ ],
2826
+ [
2827
+ "Line",
2828
+ 101
2829
+ ],
2830
+ [
2831
+ "TableCell",
2832
+ 96
2833
+ ],
2834
+ [
2835
+ "Text",
2836
+ 7
2837
+ ],
2838
+ [
2839
+ "Reference",
2840
+ 5
2841
+ ],
2842
+ [
2843
+ "Caption",
2844
+ 3
2845
+ ],
2846
+ [
2847
+ "Table",
2848
+ 3
2849
+ ],
2850
+ [
2851
+ "SectionHeader",
2852
+ 2
2853
+ ],
2854
+ [
2855
+ "TableGroup",
2856
+ 2
2857
+ ],
2858
+ [
2859
+ "PageHeader",
2860
+ 1
2861
+ ],
2862
+ [
2863
+ "PageFooter",
2864
+ 1
2865
+ ]
2866
+ ],
2867
+ "block_metadata": {
2868
+ "llm_request_count": 0,
2869
+ "llm_error_count": 0,
2870
+ "llm_tokens_used": 0,
2871
+ "previous_text": "",
2872
+ "previous_type": "",
2873
+ "previous_order": 0
2874
+ }
2875
+ },
2876
+ {
2877
+ "page_id": 24,
2878
+ "text_extraction_method": "pdftext",
2879
+ "block_counts": [
2880
+ [
2881
+ "Span",
2882
+ 311
2883
+ ],
2884
+ [
2885
+ "Line",
2886
+ 107
2887
+ ],
2888
+ [
2889
+ "Text",
2890
+ 10
2891
+ ],
2892
+ [
2893
+ "Equation",
2894
+ 2
2895
+ ],
2896
+ [
2897
+ "PageHeader",
2898
+ 1
2899
+ ],
2900
+ [
2901
+ "SectionHeader",
2902
+ 1
2903
+ ],
2904
+ [
2905
+ "PageFooter",
2906
+ 1
2907
+ ],
2908
+ [
2909
+ "Reference",
2910
+ 1
2911
+ ]
2912
+ ],
2913
+ "block_metadata": {
2914
+ "llm_request_count": 0,
2915
+ "llm_error_count": 0,
2916
+ "llm_tokens_used": 0,
2917
+ "previous_text": "",
2918
+ "previous_type": "",
2919
+ "previous_order": 0
2920
+ }
2921
+ },
2922
+ {
2923
+ "page_id": 25,
2924
+ "text_extraction_method": "pdftext",
2925
+ "block_counts": [
2926
+ [
2927
+ "Span",
2928
+ 136
2929
+ ],
2930
+ [
2931
+ "Line",
2932
+ 66
2933
+ ],
2934
+ [
2935
+ "Text",
2936
+ 3
2937
+ ],
2938
+ [
2939
+ "SectionHeader",
2940
+ 1
2941
+ ],
2942
+ [
2943
+ "PageFooter",
2944
+ 1
2945
+ ]
2946
+ ],
2947
+ "block_metadata": {
2948
+ "llm_request_count": 0,
2949
+ "llm_error_count": 0,
2950
+ "llm_tokens_used": 0,
2951
+ "previous_text": "",
2952
+ "previous_type": "",
2953
+ "previous_order": 0
2954
+ }
2955
+ },
2956
+ {
2957
+ "page_id": 26,
2958
+ "text_extraction_method": "pdftext",
2959
+ "block_counts": [
2960
+ [
2961
+ "Span",
2962
+ 227
2963
+ ],
2964
+ [
2965
+ "Line",
2966
+ 95
2967
+ ],
2968
+ [
2969
+ "Text",
2970
+ 45
2971
+ ],
2972
+ [
2973
+ "SectionHeader",
2974
+ 6
2975
+ ],
2976
+ [
2977
+ "PageHeader",
2978
+ 1
2979
+ ],
2980
+ [
2981
+ "PageFooter",
2982
+ 1
2983
+ ],
2984
+ [
2985
+ "Reference",
2986
+ 1
2987
+ ]
2988
+ ],
2989
+ "block_metadata": {
2990
+ "llm_request_count": 0,
2991
+ "llm_error_count": 0,
2992
+ "llm_tokens_used": 0,
2993
+ "previous_text": "",
2994
+ "previous_type": "",
2995
+ "previous_order": 0
2996
+ }
2997
+ },
2998
+ {
2999
+ "page_id": 27,
3000
+ "text_extraction_method": "pdftext",
3001
+ "block_counts": [
3002
+ [
3003
+ "Span",
3004
+ 202
3005
+ ],
3006
+ [
3007
+ "Line",
3008
+ 99
3009
+ ],
3010
+ [
3011
+ "Text",
3012
+ 20
3013
+ ],
3014
+ [
3015
+ "SectionHeader",
3016
+ 3
3017
+ ],
3018
+ [
3019
+ "PageFooter",
3020
+ 2
3021
+ ],
3022
+ [
3023
+ "PageHeader",
3024
+ 1
3025
+ ],
3026
+ [
3027
+ "Reference",
3028
+ 1
3029
+ ]
3030
+ ],
3031
+ "block_metadata": {
3032
+ "llm_request_count": 0,
3033
+ "llm_error_count": 0,
3034
+ "llm_tokens_used": 0,
3035
+ "previous_text": "",
3036
+ "previous_type": "",
3037
+ "previous_order": 0
3038
+ }
3039
+ }
3040
+ ],
3041
+ "debug_data_path": "debug_data/4c97921d-90ed-40e8-a5e2-c99a0f2081e7"
3042
+ }
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/model_text_v3.txt ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [p. 1 | section: Abstract | type: Text]
2
+ Self-attention in Transformers relies on globally normalized softmax weights, causing all tokens to compete for influence at every layer. When composed across depth, this interaction pattern induces strong synchronization dynamics that favor convergence toward a dominant mode, a behavior associated with representation collapse and attention sink phenomena. We introduce Krause Attention, a principled attention mechanism inspired by bounded-confidence consensus dynamics. Krause Attention replaces similaritybased global aggregation with distance-based, localized, and selectively sparse interactions, promoting structured local synchronization instead of global mixing. We relate this behavior to recent theory modeling Transformer dynamics as interacting particle systems, and show how bounded-confidence interactions naturally moderate attention concentration and alleviate attention sinks. Restricting interactions to local neighborhoods also reduces runtime complexity from quadratic to linear in sequence length. Experiments across vision (ViT on CIFAR/ImageNet), autoregressive generation (MNIST/CIFAR-10), and large language models (Llama/Qwen) demonstrate consistent gains with substantially reduced computation, highlighting bounded-confidence dynamics as a scalable and effective inductive bias for attention. Project page is available at io/krause-sync-transformers/ .
3
+
4
+ [p. 1 | section: 1. Introduction | type: Text]
5
+ Transformers (Vaswani et al., 2017) have become the de facto architecture across a wide range of domains, including vision, language, and generative modeling. Their success is
6
+
7
+ [p. 1 | section: 1. Introduction | type: PictureGroup]
8
+ Figure 1. Krause Attention, grounded in bounded-confidence interactions, promotes localized multi-cluster synchronization (top). In contrast, standard self-attention tends to induce globally coupled dynamics that concentrate attention onto a dominant mode, often manifesting as attention sinks (Xiao et al., 2024) (bottom).
9
+
10
+ [p. 1 | section: 1. Introduction | type: Text]
11
+ primarily attributed to the self-attention mechanism, which enables flexible, content-dependent interactions among tokens. Despite their immense success, it remains an open question to deeply understand why self-attention is so effective, and how its design can be systematically improved.
12
+
13
+ [p. 1 | section: 1. Introduction | type: Text]
14
+ Recent theoretical work characterizes self-attention dynamics as interacting particle systems, such as how tokens as particles evolve through the layers (Geshkovski et al., 2023) . Global normalization induces globally coupled dynamics that exhibit strong synchronization tendencies: in the meanfield limit, token representations contract toward a single dominant mode (Chen et al., 2025) . This dominant mode can be connected to attention sink phenomena, where attention mass collapses onto few tokens (Xiao et al., 2024) . Together, these findings suggest that globally normalized attention can induce undesirable coordination effects when iterated across depth, motivating a fundamental question:
15
+
16
+ [p. 1 | section: 1. Introduction | type: Text]
17
+ Should globally normalized similarity govern token interactions by default?
18
+
19
+ [p. 1 | section: 1. Introduction | type: Text]
20
+ To tackle this question, we propose a different principle: instead of global similarity-based aggregation, we explicitly model token interactions through bounded-confidence synchronization dynamics . Our approach draws inspira-
21
+
22
+ [p. 2 | section: 1. Introduction | type: Text]
23
+ tion from the Krause consensus model (Krause et al., 2000; Hegselmann & Krause, 2002) , where interacting agents only influence neighbors within a bounded confidence radius, leading to stable multi-cluster structures rather than global consensus. We introduce Krause Attention (Fig. 1) as a Transformer-compatible instantiation of this idea. Krause Attention replaces query–key dot-product similarity with query–key distance , which is mapped through a radial basis function (RBF) kernel to produce smooth, well-conditioned interaction weights. These distance-based affinities are further constrained by a local neighborhood and selective top-k sparsity, enforcing a bounded-confidence interaction pattern in which tokens primarily interact only with nearby representations, while distant tokens exert little or no influence.
24
+
25
+ [p. 2 | section: 1. Introduction | type: Text]
26
+ This design has direct implications for the resulting dynamics. Theoretically, we connect Krause Attention to recent analyses of Transformer clustering dynamics (Geshkovski et al., 2023; Chen et al., 2025) . While standard self-attention induces globally coupled dynamics that favor convergence toward a single consensus, the bounded-confidence structure of Krause Attention supports stable multi-cluster forma tions . This naturally moderates attention concentration and mitigates attention sink behavior. Locality further reduces computational cost from O(N2d) to O(NW d), where W is the local window size, and d the token dimension.
27
+
28
+ [p. 2 | section: 1. Introduction | type: Text]
29
+ Empirically, Krause Attention delivers consistent and substantial gains across vision, generation, and language modeling tasks. For image classification, Krause Vision Transformers (ViTs) consistently outperform standard ViTs (Dosovitskiy, 2021) on CIFAR-10/100 and ImageNet-1K, achieving an average accuracy improvement of +3.7% while reducing FLOPs by approximately 30% across model scales. In autoregressive image generation (Parmar et al., 2018) , Krause-based models achieve lower negative log-likelihood than standard Transformers while enabling more than 2× faster inference. For LLMs (Yang et al., 2024a; Grattafiori et al., 2024) , integrating Krause Attention as an auxiliary pathway consistently improves zero-shot evaluation performance over LoRAfinetuned baselines (Hu et al., 2022) on a broad suite of challenging language reasoning benchmarks, indicating improved robustness to attention concentration effects. Together, these results demonstrate that bounded-confidence dynamics provide a scalable, computationally efficient, and practically effective inductive bias for self-attention mechanisms across diverse modalities and model regimes.
30
+
31
+ [p. 2 | section: 2. Related Work | type: Text]
32
+ Transformer Architectures. Since their introduction, Transformers (Vaswani et al., 2017) have become the prevailing architectural paradigms across a wide range of machine learning domains. Their success stems from the scalable
33
+
34
+ [p. 2 | section: 2. Related Work | type: Text]
35
+ self-attention mechanisms and flexible token representations, which have enabled consistent performance improvements with increasing model and data size. In visual recognition, Vision Transformers (Dosovitskiy, 2021) and subsequent variants (Liu et al., 2021; Touvron et al., 2021a; Rao et al., 2021; Yuan et al., 2021; Han et al., 2021; Wang et al., 2021; Touvron et al., 2022; Heo et al., 2024) have demonstrated that Transformer-based models can match or surpass convolutional networks when trained under comparable large-scale settings. Beyond discriminative tasks, Transformer-based architectures also underpin a broad spectrum of generative modeling frameworks, including autoregressive models (Parmar et al., 2018; Chen et al., 2020; Yu et al., 2022; Cao et al., 2021; Touvron et al., 2021b) and diffusion models built on Transformer backbones (He et al., 2022; Chang et al., 2022; Peebles & Xie, 2023; Wang et al., 2025) . In language modeling, decoder-only Transformers scaled to hundreds of billions of parameters form the foundation of modern LLMs (Achiam et al., 2023; Grattafiori et al., 2024; Yang et al., 2024a; Liu et al., 2025; Comanici et al., 2025) , further reinforcing the central role of Transformers.
36
+
37
+ [p. 2 | section: 2. Related Work | type: Text]
38
+ A large body of work seeks to reduce the quadratic cost of standard self-attention. Sparse attention architectures (Child et al., 2019) restrict interactions to predefined patterns, while low-rank or kernel-based approaches such as Linformer (Wang et al., 2020) , Performer (Choromanski et al., 2021) , and other linear-attention variants (Katharopoulos et al., 2020) approximate attention through low-dimensional projections or random feature kernels. Hashing-based methods like Reformer (Kitaev et al., 2020) further achieve subquadratic complexity by clustering tokens. While these approaches improve scalability, they often weaken finegrained, content-dependent interactions. Our work is orthogonal to this efficiency-oriented line of research. Instead of approximating softmax attention or imposing fixed sparsity patterns, we redesign the interaction rule itself by drawing on Krause bounded-confidence dynamics. Krause Attention introduces distance-based, content-adaptive sparsity that promotes localized multi-cluster structures. Although the use of local neighborhoods reduces the computational cost to O(NW d), our primary goal is to endow attention with an explicit and theoretically grounded synchronization prior, which is distinct from prior sub-quadratic approximations.
39
+
40
+ [p. 2 | section: 2. Related Work | type: Text]
41
+ Understanding Transformers. Substantial progress has been made toward interpreting Transformers through physical and mathematical lenses (Bruno et al., 2025b; Ren et al., 2025; Rigollet, 2025) . Self-attention mechanisms have been connected to kernel methods (Tsai et al., 2019) , continuous Hopfield networks (Ramsauer et al., 2021) , and energybased associative memories (Yang et al., 2022; Hoover et al., 2023; Hu et al., 2024) . Complementary studies have examined relationships with convolution operators (Cordonnier et al., 2020) and non-convex optimization landscapes (Hu
42
+
43
+ [p. 3 | section: 2. Related Work | type: Text]
44
+ et al., 2024; Abbe et al., 2024). From a dynamical system perspective, the evolution of token representations is often modeled as an interacting particle system (Geshkovski et al., 2023), with links to global synchronization phenomena such as Kuramoto models (Kuramoto, 2005; Acebrón et al., 2005). Recent analyses further establish quantitative bounds on convergence toward mean-field limits (Chen et al., 2025; Rigollet, 2025; Bruno et al., 2025a). Inspired by Geshkovski et al. (2023), we adopt Krause opinion dynamics (Krause et al., 2000; Hegselmann & Krause, 2002) as a principled mechanism for structuring attention. Incorporating bounded confidence into the attention kernel enforces localized, query-key distance-aware token interactions rather than global mixing. As a result, the induced dynamics align with theoretical analyses showing that restricting global coupling can instead support the emergence of multiple coherent clusters.
45
+
46
+ [p. 3 | section: 3. Preliminaries | type: Text]
47
+ 127128
48
+
49
+ [p. 3 | section: 3.1. Self-attention Mechanism | type: Text]
50
+ Self-attention is the central component underlying the Transformer architecture. Given an input sequence X \in \mathbb{R}^{N \times d} , where N denotes the sequence length and d the embedding dimension, the input is linearly projected into queries, keys, and values through learnable weight matrices:
51
+
52
+ [p. 3 | section: 3.1. Self-attention Mechanism | type: Equation]
53
+ Q = XW_Q, \quad K = XW_K, \quad V = XW_V \tag{1}
54
+
55
+ [p. 3 | section: 3.1. Self-attention Mechanism | type: Equation]
56
+ \mathbf{z}_{i} = \sum_{j=1}^{N} \operatorname{Softmax} \left( \frac{\mathbf{q}_{i} \mathbf{k}_{j}^{T}}{\sqrt{d_{k}}} \right) \cdot \mathbf{v}_{j} (2)
57
+
58
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Text]
59
+ The Krause consensus model describes a non-linear, discrete-time dynamical system over a population of N
60
+
61
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Text]
62
+ agents, where interaction strengths depend on the agents' current states. Let x(t) \in \mathbb{R}^N denotes the vector of agent states at iteration t. The system evolves according to:
63
+
64
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Equation]
65
+ x(t+1) = A(x(t),t) x(t) (3)
66
+
67
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Text]
68
+ where A\big(x(t),t\big)\in\mathbb{R}^{N\times N} is a row-stochastic influence matrix whose entries a_{i,j}\big(x(t),t\big) quantify the influence of agent j on agent i. A defining feature of the model is the bounded confidence mechanism, which restricts interactions to agents whose states lie within a confidence radius \epsilon . For each agent i, the active neighbor set is defined as
69
+
70
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Equation]
71
+ I(i, x(t)) = \{ j \in 1, \dots, N \mid |x_i(t) - x_j(t)| \le \epsilon \} (4)
72
+
73
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Text]
74
+ The influence weights are assigned uniformly within this active neighborhood, with a_{i,j}\big(x(t),t\big)=|I\big(i,x(t)\big)|^{-1} if j\in I\big(i,x(t)\big) and 0 otherwise.
75
+
76
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Text]
77
+ This state-dependent nonlinearity drives the system toward a partitioned equilibrium. In the limit, agents self-organize into disjoint, invariant clusters, within which local consensus emerges while distinct clusters remain separated.
78
+
79
+ [p. 3 | section: 3.2. Krause Consensus Model | type: Text]
80
+ Our Krause Attention mechanism instantiates the Krause consensus model within the Transformer architecture by treating tokens as agents and their value embeddings as agent states. Under this mapping, attention weights define state-dependent influence matrices that govern how token representations are updated, mirroring the bounded-confidence interactions of the classical Krause model.
81
+
82
+ [p. 3 | section: 4. Krause Attention | type: Text]
83
+ This section introduces Krause Attention (see Fig. 2), the core mechanism of Krause Synchronization Transformers. Secs. 4.1 and 4.2 detail the key components of the method, while Sec. 4.3 connects Krause Attention to clustering and synchronization analyses of Transformer models.
84
+
85
+ [p. 3 | section: 4.1. Distance-Aware Query-Key Interactions | type: Text]
86
+ To translate the Krause consensus model into an attention mechanism, a central requirement is a principled notion of similarity between Transformer tokens, which are interpreted as interacting agents. In our framework, token interactions are governed by a distance-based measure of opinion similarity. Instead of the dot-product similarity used in standard Transformers, we explicitly quantify dissimilarity using the Euclidean distance between query and key representations. Formally, given query vectors q_i and key vectors k_i , we define the pairwise query–key distance as
87
+
88
+ [p. 3 | section: 4.1. Distance-Aware Query-Key Interactions | type: Equation]
89
+ \Delta_{i,j} = ||q_i - k_j|| = \sqrt{\sum_{n=1}^{d_k} (q_{i,n} - k_{j,n})^2} (5)
90
+
91
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: FigureGroup]
92
+ Figure~2. Krause Attention computes RBF affinity scores, restricts updates to local neighborhoods, and applies top-k selective interactions.
93
+
94
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: Text]
95
+ which serves as a measure of opinion dissimilarity between tokens i and j. To map distances into interaction strengths, we apply a radial basis function (RBF) kernel to the squared distances, yielding unnormalized affinity scores
96
+
97
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: Equation]
98
+ s_{i,j} = \exp(-\frac{\Delta_{i,j}^2}{2\sigma^2}) \tag{6}
99
+
100
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: Text]
101
+ where \sigma is a learnable scale parameter which controls the effective interaction range. This kernel assigns higher affinity to tokens with similar query–key representations, while naturally suppressing interactions between distant tokens. Importantly, the RBF distance admits a separable formulation with respect to queries and keys. This distance-based structure induces localized, distance-aware interactions and naturally facilitates structured sparsity (see Sec. 4.2).
102
+
103
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: Text]
104
+ The final dense attention weights are obtained by normalizing these affinities across the sequence dimension:
105
+
106
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: Equation]
107
+ a_{i,j} = \frac{s_{i,j}}{\sum_{n=1}^{N} s_{i,n}},\tag{7}
108
+
109
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: Text]
110
+ which ensures a valid probability distribution over tokens and induces global interactions across all tokens.
111
+
112
+ [p. 4 | section: 4.1. Distance-Aware Query-Key Interactions | type: Text]
113
+ Notably, unlike standard attention, we do not apply an additional softmax operation to the RBF scores. The exponential nonlinearity and temperature-like scaling introduced by softmax are already captured by the RBF kernel through its exponential form and variance parameter \sigma^2 . As a result, this formulation produces distance-aware, smoothly decaying attention weights that naturally align with the bounded-confidence interactions characteristic of Krause dynamics.
114
+
115
+ [p. 4 | section: 4.2. Local and Selective Interactions | type: Text]
116
+ The dense attention in Eq. (7) enables unrestricted global information exchange, allowing every token to interact with all others. While effective in standard Transformers, such fully coupled mixing induces strong global competition and
117
+
118
+ [p. 4 | section: 4.2. Local and Selective Interactions | type: Text]
119
+ repeated averaging, which can obscure local structure and amplify dominant tokens across layers. To address this, we introduce local and selective interaction constraints, which are central to bounded-confidence consensus models.
120
+
121
+ [p. 4 | section: 4.2. Local and Selective Interactions | type: Text]
122
+ Local Interactions. We introduce a local interaction mechanism that restricts attention to a pre-defined neighborhood \mathcal{N}_i for each token i. Instead of normalizing attention weights over the full sequence, the resultant normalization is performed only within this neighborhood:
123
+
124
+ [p. 4 | section: 4.2. Local and Selective Interactions | type: Equation]
125
+ \tilde{a}_{i,j} = \frac{s_{i,j}}{\sum_{\ell \in \mathcal{N}_i} s_{i,\ell}}, \qquad j \in \mathcal{N}_i, (8)
126
+
127
+ [p. 4 | section: 4.2. Local and Selective Interactions | type: Text]
128
+ so that token i aggregates information exclusively from locally admissible neighbors. This explicit locality constraint enforces a hard interaction cutoff analogous to bounded confidence in Krause dynamics, suppressing global averaging while preserving meaningful local interactions.
129
+
130
+ [p. 4 | section: 4.2. Local and Selective Interactions | type: Text]
131
+ The neighborhood \mathcal{N}_i is defined according to the underlying task structure. In vision models, tokens are associated with spatial locations, and \mathcal{N}_i corresponds to a local spatial window centered at token i. In autoregressive settings, including image generation and language modeling, \mathcal{N}_i is instead specified as a causal window that limits attention to a fixed number of preceding tokens.
132
+
133
+ [p. 4 | section: 4.2. Local and Selective Interactions | type: Text]
134
+ Selective Interactions. While local interaction enforces spatial or temporal locality, bounded-confidence dynamics further imply that influence should be both limited and competitive, with only a small subset of neighbors exerting meaningful impact. To operationalize this principle, we introduce a mechanism that sparsifies attention within each local neighborhood. Specifically, for each token i, we select the top-k neighbors in \mathcal{N}_i according to their similarity scores. Let \xi_i^k \subseteq \mathcal{N}_i, |\xi_i^k| = k denote the set of indices corresponding to the k largest values of s_{i,j} for j \in \mathcal{N}_i .
135
+
136
+ [p. 5 | section: 4.2. Local and Selective Interactions | type: Text]
137
+ Attention weights are then normalized over this reduced set:
138
+
139
+ [p. 5 | section: 4.2. Local and Selective Interactions | type: Equation]
140
+ \tilde{a}_{i,j}^* = \frac{s_{i,j}}{\sum_{\ell \in \xi_i^k} s_{i,\ell}}, \qquad j \in \xi_i^k, (9)
141
+
142
+ [p. 5 | section: 4.2. Local and Selective Interactions | type: Text]
143
+ ensuring that token i interacts only with its most relevant local neighbors. This top-k formulation constitutes the final attention rule used in Krause Attention. The output representation is then computed as z_i = \sum_{j \in \mathcal{N}_i} \tilde{a}_{i,j}^* \cdot v_j .
144
+
145
+ [p. 5 | section: 4.2. Local and Selective Interactions | type: Text]
146
+ This selective interaction mechanism induces a functionally sparse-attention pattern, in which each token aggregates information from a small, adaptively chosen subset of neighbors. By combining local interaction with the selective topk selection, our Krause Attention departs from the dense, fully connected interaction graph of standard self-attention and instead recovers the characteristic local clustering with global coherence behavior of Krause consensus dynamics.
147
+
148
+ [p. 5 | section: 4.2. Local and Selective Interactions | type: Text]
149
+ Time Complexity and Training Algorithm. Krause Attention restricts interactions to a local neighborhood and further prunes them via top-k selective sparsity. As a result, each token attends only to W nearby candidates and retains k < W of them during selective mixing. This yields an overall time complexity \mathcal{O}(NWd) , which is strictly lower than the \mathcal{O}(N^2) cost of self-attention while preserving contentadaptive token interactions. Algorithm 1 summarizes the forward pass of a Krause Attention layer. The procedure follows three steps: (1) compute query-key distances and transform them using an RBF kernel, (2) apply locality masking followed by top-k selection within the local region, and (3) normalize affinities and aggregate values.
150
+
151
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Code]
152
+ 1: Input: token sequence x, local neighborhoods \{\mathcal{N}_i\}, sparsity level k, kernel scales \{\sigma_h\}
153
+
154
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
155
+ 2: Output: updated token sequence z
156
+
157
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
158
+ 3: Compute projections: (q, k, v) \leftarrow \text{Linear}(x)
159
+
160
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
161
+ 4: for each head h = 1, \ldots, H do
162
+
163
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
164
+ (1) Distance-based affinity
165
+
166
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Equation]
167
+ 6: \Delta_{i,j} \leftarrow \|q_i - k_j\|^2 Eq. (5) 7: s_{i,j} \leftarrow \exp\left(-\Delta_{i,j}/(2\sigma_h^2)\right) Eq. (6)
168
+
169
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Equation]
170
+ 7: s_{i,j} \leftarrow \exp\left(-\Delta_{i,j}/(2\sigma_h^2)\right) Eq. (6)
171
+
172
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
173
+ (2) Apply locality
174
+
175
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Equation]
176
+ 263 9: s_{i,j} \leftarrow 0 if j \notin \mathcal{N}_i Eq. (8)
177
+
178
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
179
+ (3) Selective top-k sparsity
180
+
181
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
182
+ \xi_i^k \leftarrow \operatorname{TopK}\left(s_{i,\mathcal{N}_i}, \hat{k}\right) s_{i,j} \leftarrow 0 \quad \text{if } j \notin \xi_i^k
183
+
184
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
185
+ (4) Normalization and aggregation
186
+
187
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Equation]
188
+ 14: a_{i,j} \leftarrow s_{i,j} / \sum_{\ell \in \xi_i^k} s_{i,\ell} Eq. (9) 15: o_h[i] \leftarrow \sum_{j \in \xi_i^k} a_{i,j} v_j
189
+
190
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
191
+ 16: end for
192
+
193
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
194
+ 17: z \leftarrow \text{Linear}_{out}(\text{Concat}(o_1, \dots, o_H))
195
+
196
+ [p. 5 | section: Algorithm 1 Forward pass of a Krause Attention layer | type: Text]
197
+ 18: return z
198
+
199
+ [p. 5 | section: 4.3. Connection to Transformer Clustering Dynamics | type: Text]
200
+ This subsection situates Krause Attention within recent theoretical analyses of Transformers' clustering behavior. Within this perspective, Krause Attention can be understood as a modification of standard self-attention flows, converting the fragile, architecture-dependent clustering behavior into a more explicit and controllable mechanism.
201
+
202
+ [p. 5 | section: 4.3. Connection to Transformer Clustering Dynamics | type: Text]
203
+ Background: Clustering in Self-attention. Recent work has established that self-attention layers can be interpreted as dynamical systems acting on token representations. In particular, Geshkovski et al. (2023) model the evolution of token states \{x_i(t)\}_{i=1}^N as a continuous-time flow
204
+
205
+ [p. 5 | section: 4.3. Connection to Transformer Clustering Dynamics | type: Equation]
206
+ V\dot{x}_{i}(t) = \sum_{j=1}^{N} a_{i,j}(t) Vx_{j}(t), (10)
207
+
208
+ [p. 5 | section: 4.3. Connection to Transformer Clustering Dynamics | type: Text]
209
+ where the attention weights a_{i,j}(t) are typically defined through dot-product similarity,
210
+
211
+ [p. 5 | section: 4.3. Connection to Transformer Clustering Dynamics | type: Equation]
212
+ a_{i,j}(t) = \frac{\exp(\langle Qx_i(t), Kx_j(t)\rangle)}{\sum_{\ell=1}^{N} \exp(\langle Qx_i(t), Kx_\ell(t)\rangle)}. (11)
213
+
214
+ [p. 5 | section: 4.3. Connection to Transformer Clustering Dynamics | type: Text]
215
+ Under this formulation, tokens behave as interacting particles whose trajectories are mediated by globally normalized similarity scores. While such systems may exhibit clustering behavior under specific spectral conditions on the value matrix V, such behavior is highly sensitive to initialization and architectural details (Geshkovski et al., 2023).
216
+
217
+ [p. 5 | section: 4.3. Connection to Transformer Clustering Dynamics | type: Text]
218
+ More generally, global normalization induces a strong tendency toward synchronization. In the mean-field limit N \to \infty , Chen et al. (2025) show that dot-product attention corresponds to a Wasserstein gradient flow that contracts exponentially toward a single consensus state. Consequently, the empirical token distribution \mu_t = \frac{1}{N} \sum_{i=1}^{N} \delta_{x_i(t)} tends toward a dominant mode, reflecting global synchronization and potential representational collapse (Xiao et al., 2024). From this dynamical perspective, attention sink behavior can be interpreted as a discrete manifestation of this global synchronization and representation contraction process.
219
+
220
+ [p. 5 | section: Bounded-confidence Dynamics in Krause Attention. | type: Text]
221
+ Krause Attention fundamentally modifies this dynamical system by replacing global dot-product coupling with distance-based, bounded-confidence interactions. Concretely, the attention weights take the form
222
+
223
+ [p. 5 | section: Bounded-confidence Dynamics in Krause Attention. | type: Equation]
224
+ a_{i,j}(t) = \frac{\exp\left(-\frac{\|Qx_i(t) - Kx_j(t)\|^2}{2\sigma^2}\right)}{\sum_{\ell \in \xi_i^k} \exp\left(-\frac{\|Qx_i(t) - Kx_\ell(t)\|^2}{2\sigma^2}\right)}, \qquad j \in \xi_i^k, (12)
225
+
226
+ [p. 5 | section: Bounded-confidence Dynamics in Krause Attention. | type: Text]
227
+ where \xi_i^k denotes a selected sparse, local neighborhood associated with token i.
228
+
229
+ [p. 5 | section: Bounded-confidence Dynamics in Krause Attention. | type: Text]
230
+ This construction induces a bounded-confidence interaction structure: tokens influence each other only when they are
231
+
232
+ [p. 6 | section: Bounded-confidence Dynamics in Krause Attention. | type: Text]
233
+ sufficiently close in representation space and belong to a shared local support. As a result, the time evolution of token states no longer follows a globally coupled flow, but instead decomposes into locally interacting subsystems.
234
+
235
+ [p. 6 | section: Bounded-confidence Dynamics in Krause Attention. | type: Text]
236
+ Asymptotic Multi-cluster Behavior. This structural modification influences the long-term behavior of the dynamics. By attenuating long-range interactions, Krause Attention reduces the global synchronization effects characteristic of standard self-attention and instead encourages the formation of multiple locally coherent groups. Under suitable choices of the scale parameter \sigma and sparsity support \xi_i^k , the empirical distribution \mu_t can tend\ toward a multi-atomic structure \mu_t \rightharpoonup \sum_{k=1}^m \pi_k \delta_{\mathcal{L}_k} where \mathcal{L}_k denote stable cluster representatives and \pi_k their relative masses. This behavior closely parallels the classical Krause consensus model, where agents self-organize into multiple invariant opinion groups instead of converging to a global average. Crucially, in Krause Attention this multi-cluster tendency is induced by the architectural constraints of the attention mechanism itself, rather than any implicit biases arising from depth, initialization, or optimization-specific effects.
237
+
238
+ [p. 6 | section: Bounded-confidence Dynamics in Krause Attention. | type: Text]
239
+ Implications. By encoding locality and selective interactions into the design, Krause Attention turns clustering from a fragile, emergent phenomenon into a more stable architectural inductive bias. This helps preserve token diversity and improve robustness against representation collapse.
240
+
241
+ [p. 6 | section: Bounded-confidence Dynamics in Krause Attention. | type: Text]
242
+ Appendix C provides supporting theoretical analyses that illuminate this multi-cluster behavior from two complementary viewpoints: clustering-emergence dynamics (Geshkovski et al., 2023) and mean-field Wasserstein gradient flows (Chen et al., 2025). These analyses characterize the behavior of Krause Attention under appropriate bounded-confidence and separation assumptions, offering mechanistic insight into why multiple clusters can arise.
243
+
244
+ [p. 6 | section: 5.1. Experimental Setup | type: Text]
245
+ Image Recognition. We adopt the Vision Transformer (Dosovitskiy, 2021; Touvron et al., 2021a; Liu et al., 2021) with Krause Attention replacing the self-attention, and assess performance on CIFAR-10/100 (Krizhevsky et al., 2009), Fashion-MNIST (Xiao et al., 2017), and ImageNet-1K (Deng et al., 2009).
246
+
247
+ [p. 6 | section: 5.1. Experimental Setup | type: Text]
248
+ Image Generation. We integrate Krause Attention into autoregressive Transformers (Parmar et al., 2018) and evaluate on MNIST (LeCun et al., 2002) and CIFAR-10.
249
+
250
+ [p. 6 | section: 5.1. Experimental Setup | type: Text]
251
+ Language Modeling. We incorporate Krause Attention into Qwen (Yang et al., 2024a) and Llama (Grattafiori et al., 2024) as an auxiliary shortcut rather than a replacement
252
+
253
+ [p. 6 | section: 5.1. Experimental Setup | type: Text]
254
+ for self-attention, and trained via LoRA (Hu et al., 2022).<sup>1</sup> We evaluate these hybrid models on established language understanding benchmarks (Clark et al., 2019; Bisk et al., 2020; Wang et al., 2024; De Marneffe et al., 2019; Williams et al., 2018; Zhou et al., 2023; Nie et al., 2020).
255
+
256
+ [p. 6 | section: 5.1. Experimental Setup | type: Text]
257
+ Additional Details. Across all tasks, standard Transformers serve as primary baselines. Krause Attention is mainly controlled by two task-dependent hyperparameters: the local neighborhood size \mathcal{N}_i and the top-k sparsity level. Additional experimental results, implementation details, and ablation studies are provided in Appendix D.
258
+
259
+ [p. 6 | section: 5.2. Vision Transformers for Image Recognition | type: Text]
260
+ Krause Vision Transformers. We construct Krause Vision Transformers (KViTs) by replacing self-attention in standard ViT architectures with Krause Attention, leaving all other architectural components unchanged. For small- and medium-scale datasets, we evaluate ViT-Tiny/Small/Base backbones. For ImageNet-1K, we consider the commonly used ViT-S/B families. We additionally study Krause Attention within SwinTransformers. Results for KViTs on CIFAR-10/100 and ImageNet-1K are reported here.
261
+
262
+ [p. 6 | section: 5.2. Vision Transformers for Image Recognition | type: Text]
263
+ Results on CIFAR-10/100. On CIFAR-10 and CIFAR-100, we configure Krause Attention with a local spatial neighborhood corresponding to a 4-neighbor grid. The topk interaction parameter is scheduled linearly from 2 to 4 across layers. Tables 1 and 2 report classification accuracy, parameter counts, and FLOPs. Across all backbones and datasets, KViTs consistently improve accuracy while reducing FLOPs. Parameter counts remain effectively unchanged, confirming that the performance gains stem from the Krause Attention mechanism itself rather than the model capacity.
264
+
265
+ [p. 6 | section: 5.2. Vision Transformers for Image Recognition | type: TableGroup]
266
+ Table 1. Image classification results on CIFAR-10 . Models Accuracy(%) # Parameters FLOPs ViT-T 90.75 5,362,762 0.37G KViT-T 93.81 5,362,774 0.25G ViT-S 93.33 21,342,346 1.43G KViT-S 95.20 21,342,358 0.97G ViT-B 92.45 85,152,010 5.61G KViT-B 95.35 85,152,022 3.77G
267
+
268
+ [p. 6 | section: 5.2. Vision Transformers for Image Recognition | type: Text]
269
+ Results on ImageNet-1K. We further evaluate KViTs on ImageNet-1K to assess performance at scale. The local interaction neighborhood is expanded to 25 tokens, and the top-kselection parameter increases linearly from 8 to 16 across
270
+
271
+ [p. 6 | section: 5.2. Vision Transformers for Image Recognition | type: Footnote]
272
+ Since long-range connections are known to be highly valuable in language modeling, this setting is intended to examine the compatibility and complementary benefits of Krause Attention in LLMs rather than to position it as a full architectural alternative.
273
+
274
+ [p. 7 | section: 5.2. Vision Transformers for Image Recognition | type: TableGroup]
275
+ Table 2. Image classification results on CIFAR-100. Models Accuracy(%) # Parameters FLOPs ViT-T 66.07 5,380,132 0.37G KViT-T 74.34 5,380,144 0.25G ViT-S 71.74 21,376,996 1.43G KViT-S 77.05 21,377,008 0.97G ViT-B 72.28 85,221,220 5.61G KViT-B 78.03 85,221,232 3.77G
276
+
277
+ [p. 7 | section: 5.2. Vision Transformers for Image Recognition | type: Text]
278
+ layers to accommodate the larger spatial resolution. Table 3 shows that KViTs consistently outperform ViT counterparts while reducing FLOPs. These trends hold across model sizes and patch resolutions, indicating that Krause Attention scales effectively to large-scale vision tasks. Fig. 3 further illustrates this behavior, showing that Krause Attention preserves diverse, multi-cluster attention patterns across heads, in contrast to the more homogeneous attention in ViTs.
279
+
280
+ [p. 7 | section: 5.2. Vision Transformers for Image Recognition | type: TableGroup]
281
+ Table 3. Image classification results on ImageNet-1K. Models Accuracy(%) # Parameters FLOPs ViT-S-16 74.04 22,050,664 4.62G KViT-S-16 75.69 22,050,676 3.22G ViT-S-32 68.78 22,878,952 1.15G KViT-S-32 71.09 22,878,964 0.79G ViT-B-16 72.19 86,567,656 17.61G KViT-B-16 74.50 86,567,668 12.03G ViT-B-32 67.20 88,224,232 4.42G KViT-B-32 70.44 88,224,244 3.00G
282
+
283
+ [p. 7 | section: 5.2. Vision Transformers for Image Recognition | type: FigureGroup]
284
+ Figure 3. Krause Attention yields more diverse attention heads.
285
+
286
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
287
+ Krause Autoregressive Models. We further evaluate Krause Attention in the context of autoregressive image generation. Specifically, we construct Krause Autoregressive Models (KARMs) by replacing the self-attention layers in standard autoregressive Transformers with Krause Attention. We compare KARMs against two baselines:
288
+
289
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
290
+ (i) standard autoregressive models with full self-attention (ARM), and (ii) autoregressive models with linear attention (LARM) (Katharopoulos et al., 2020) . Performance is measured using bits-per-dimension (BPD) for likelihood quality and images-per-second for inference throughput.
291
+
292
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: TableGroup]
293
+ Table 4. Image generation results of KARMs on MNIST. Inference speed is measured on a single NVIDIA H100 GPU. Models BPD (↓) Images/sec (↑) Time Complexity ARM 0.5685 83.5772 O(N2d) LARM 0.5855 499.3672 O(N d2 ) KARM 0.5652 105.6037 O(NW d)
294
+
295
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
296
+ Results on MNIST. We first conduct experiments on the MNIST dataset, where each image is represented as a sequence of length 784. For KARMs, we employ a causal local window of size 128 and set the top-k interaction parameter to 96 to balance locality and expressiveness. We generate 50K samples to measure inference throughout.
297
+
298
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
299
+ As shown in Table 4, KARM achieves the best likelihood (lowest BPD) among all models while also generating images substantially faster than the full-attention ARM baseline. Although KARM is slower than the linear-attention LARM, it offers markedly better likelihood, matching expectations from the intermediate time complexity O(NW d). Fig. 4 presents unconditional samples generated by KARM, illustrating the visual quality of the generated digits.
300
+
301
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: PictureGroup]
302
+ Figure 4. Unconditional samples generated by KARM on MNIST.
303
+
304
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
305
+ Results on CIFAR-10. We next evaluate on CIFAR-10, which corresponds to a longer sequence length of 3072. We expand the window size to 256 and set the top-k parameter to 192. Inference speed is evaluated on 10K samples.
306
+
307
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: TableGroup]
308
+ Table 5. Image generation results of KARMs on CIFAR-10. Inference speed is measured on a single NVIDIA H100 GPU. Models BPD (↓) Images/sec (↑) Time Complexity ARM 3.0224 1.8933 O(N2d) LARM 3.1836 14.4032 O(N d2 ) KARM 3.0032 4.5240 O(NW d)
309
+
310
+ [p. 7 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
311
+ Quantitative results are summarized in Table 5. As in the MNIST experiments, KARMs achieve the lowest BPD
312
+
313
+ [p. 8 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
314
+ among all evaluated methods while significantly outperforming standard ARMs in generation speed. Although LARMs remain the fastest, they exhibit inferior likelihood performance compared to KARMs, highlighting the tradeoff between efficiency and modeling accuracy.
315
+
316
+ [p. 8 | section: 5.3. Autoregressive Models for Image Generation | type: FigureGroup]
317
+ Figure 5. Samples completed by KARMs on CIFAR-10.
318
+
319
+ [p. 8 | section: 5.3. Autoregressive Models for Image Generation | type: Text]
320
+ We also evaluate image completion on CIFAR-10 by masking half of each input image and autoregressively generating the missing pixels. Fig. 5 shows five completion samples generated for masked input. These results indicate that our KARMs produce semantically consistent and visually plausible completions despite the increased dataset complexity.
321
+
322
+ [p. 8 | section: 5.4. LLMs for Language Understanding and Reasoning | type: FigureGroup]
323
+ Figure 6. Our Krause-LLMs add a Krause Attention shortcut at each layer of the original LLM architecture.
324
+
325
+ [p. 8 | section: 5.4. LLMs for Language Understanding and Reasoning | type: Text]
326
+ Krause-LLMs. To explore whether Krause Attention can also improve language modeling, we integrate it into LLMs by adding a Krause-attention shortcut at every layer (Fig. 6) . Both the Krause pathway and the original attention modules are trained via LoRA adaptation. We compare this hybrid model against the base LLMs and their LoRA-finetuned variants. All models are trained on a 50K-sample subset of Flan-v2 (Longpre et al., 2023) and evaluated under a zero-shot setting on a suite of language benchmarks.
327
+
328
+ [p. 8 | section: 5.4. LLMs for Language Understanding and Reasoning | type: Text]
329
+ Results on Krause-Llama. For Llama3-8B, the Krause Attention is configured with a causal window size of 32 and a top-k sparsity value of 16. Table 6 presents the performance of Krause-Llama3-8B. Compared to both the base Llama3-8B and its LoRA finetuned variant, Krause-Llama3- 8B achieves consistent gains on most datasets, including BoolQ, CB, PIQA, MNLI, ANLI (R1–R3), and IFEval. Performance on MMLU-Pro also matches the LoRA baseline. These results suggest that incorporating a Krause-attention
330
+
331
+ [p. 8 | section: 5.4. LLMs for Language Understanding and Reasoning | type: Text]
332
+ shortcut can provide complementary inductive bias for language tasks without degrading overall model quality.
333
+
334
+ [p. 8 | section: 5.4. LLMs for Language Understanding and Reasoning | type: Text]
335
+ Alleviating Attention Sinks. LLMs often suffer from the at tention sink effect (Xiao et al., 2024) , where the softmax normalization allocates disproportionately high attention scores on early tokens, regardless of their semantic relevance. This behavior introduces positional bias, reduces model expressivity, and weakens representation robustness.
336
+
337
+ [p. 8 | section: 5.4. LLMs for Language Understanding and Reasoning | type: Text]
338
+ Krause Attention provides a complementary, boundedconfidence mechanism for mitigating this issue. By restricting attention to the local neighborhood, distant tokens can no longer allocate weight to the initial positions once they fall outside the receptive field. As shown in Fig. 7, the base Llama model exhibits large oscillations and persistent peaks across layers, whereas Krause-LLMs produce remarkably more stable attention curves. This stabilization indicates that Krause Attention reduces reliance on fixed positional anchors and supports more robust representation learning.
339
+
340
+ [p. 8 | section: 5.4. LLMs for Language Understanding and Reasoning | type: FigureGroup]
341
+ Figure 7. Layer dynamics of first-token attentions on Llama3-8B.
342
+
343
+ [p. 8 | section: 6. Conclusions | type: Text]
344
+ This paper introduces Krause Attention, a distance-based, bounded-confidence attention mechanism that embeds an explicit multi-cluster synchronization prior into Transformer architectures. By enforcing localized and selectively sparse interactions, Krause Attention promotes structured multicluster token coordination, contrasting with the global synchronization tendencies observed in standard self-attention. Theoretical analyses relate Krause Attention to recent analyses of attention dynamics and to classical boundedconfidence models, offering mechanistic insight into why such architectures support multi-cluster behavior. Empirical evaluations across vision recognition, autoregressive image generation, and LLMs demonstrate consistent performance gains together with efficient inference. Overall, Krause Attention provides a principled and scalable inductive bias for structured token coordination, opening new avenues for consensus-driven and dynamics-aware Transformer design.
345
+
346
+ [p. 9 | section: 6. Conclusions | type: Text]
347
+ Models BoolQ CB PIQA MNLI ANLI-R1 ANLI-R2 ANLI-R3 MMLU-Pro IFEval Llama3-8B 76.13 41.07/19.41 51.52 35.45/18.11 33.40/16.69 33.40/16.69 33.50/17.04 37.50 22.18 Llama3-8B (finetuned w/ LoRA) 80.41 60.71/47.81 75.16 59.53/55.29 38.70/30.62 39.90/33.37 44.92/39.57 41.67 32.72 Krause-Llama3-8B 80.59 64.29/48.04 77.77 63.27/53.72 40.30/33.01 40.50/34.27 45.67/39.84 41.67 34.01
348
+
349
+ [p. 9 | section: 6. Conclusions | type: Text]
350
+ the field of Machine Learning by proposing a new attention mechanism grounded in bounded-confidence dynamics. Krause Attention modifies internal model interactions but does not introduce application-specific decision rules or domain-specific interventions. As with most advances in Transformer architectures, the broader societal impacts depend on the tasks and systems into which such models are deployed. While our method may contribute to improved efficiency and stability in future models, we do not identify any unique ethical concerns or societal implications beyond those already well established for research in deep learning.
351
+
352
+ [p. 9 | section: 6. Conclusions | type: Text]
353
+ This paper presents work whose primary goal is to advance
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/paper.blocks.json ADDED
The diff for this file is too large to render. See raw diff
 
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/paper.md ADDED
The diff for this file is too large to render. See raw diff
 
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/parse_report.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7",
3
+ "pipeline": "marker_non_llm_v3",
4
+ "parser": "marker_single",
5
+ "formats": [
6
+ "markdown",
7
+ "chunks"
8
+ ],
9
+ "llm_enabled": false,
10
+ "pdf_path": "/network/scratch/j/jianan.zhao/ReviewAgent/data/processed_papers/icml26_20260429_1952_duequeue/raw/4c97921d-90ed-40e8-a5e2-c99a0f2081e7.pdf",
11
+ "pdf_sha256": "10a9827ff9430b2ff2c0883eb09139bb9737223514bf27a94d24be2eacdd379c",
12
+ "bytes": 35316527,
13
+ "source": "https://koala.science/storage/pdfs/4c97921d-90ed-40e8-a5e2-c99a0f2081e7.pdf",
14
+ "page_count": 28,
15
+ "ok": true,
16
+ "elapsed_seconds": 271.24,
17
+ "paper2markdown_v3": {
18
+ "paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7",
19
+ "pipeline": "Paper2Markdown-V3",
20
+ "ok": true,
21
+ "page_count": 28,
22
+ "chunk_count": 286,
23
+ "main_body_chunk_count": 118,
24
+ "appendix_chunk_count": 156,
25
+ "reference_chunk_count": 12,
26
+ "model_text_chars": 44118,
27
+ "raw_markdown_chars": 133833,
28
+ "sanitized_chars": 116859,
29
+ "page_provenance": {
30
+ "min_page": 1,
31
+ "max_page": 28,
32
+ "invalid_count": 0
33
+ },
34
+ "marker_block_type_counts": {
35
+ "Caption": 6,
36
+ "Code": 1,
37
+ "Equation": 40,
38
+ "FigureGroup": 9,
39
+ "Footnote": 2,
40
+ "ListGroup": 10,
41
+ "PageFooter": 29,
42
+ "PageHeader": 53,
43
+ "PictureGroup": 2,
44
+ "SectionHeader": 69,
45
+ "Table": 5,
46
+ "TableGroup": 12,
47
+ "TableOfContents": 1,
48
+ "Text": 643
49
+ },
50
+ "asset_count_raw": 11,
51
+ "asset_count_model_kept": 11,
52
+ "asset_count_rejected": 0,
53
+ "asset_reject_reasons": {
54
+ "kept": 11
55
+ },
56
+ "artifact_leak_audit": {
57
+ "ok": true,
58
+ "hits": {
59
+ "Anonymous Authors": [],
60
+ "ACKNOWLEDGMENT": [],
61
+ "OpenReview": [],
62
+ "\"accept_label\"": [],
63
+ "\"decision\"": [],
64
+ "\"decision_tier\"": [],
65
+ "\"source_status\"": [],
66
+ "Meta-review": [],
67
+ "Official Review": [],
68
+ "official_reviews": [],
69
+ "meta_reviews": [],
70
+ "suggested_verdict_score": []
71
+ },
72
+ "artifact_count": 2
73
+ },
74
+ "default_model_input": "model_text_v3.txt",
75
+ "appendix_input": "appendix_text_v3.txt",
76
+ "reference_input": "reference_text_v3.txt"
77
+ }
78
+ }
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/reference_chunks.jsonl ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0118", "section": "References", "page_start": 9, "page_end": 9, "type": "Text", "text": "Impact Statement", "source": "marker_v2", "marker_block_id": "/page/8/Text/41"}
2
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0119", "section": "References", "page_start": 9, "page_end": 9, "type": "ListGroup", "text": "Abbe, E., Bengio, S., Lotfi, A., Sandon, C., and Saremi, O. How far can transformers reason? the globality barrier and inductive scratchpad. NeurIPS , 2024. URL https: //arxiv.org/abs/2406.06467 . Acebron, J. A., Bonilla, L. L., P ´ erez Vicente, C. J., ´ Ritort, F., and Spigler, R. The kuramoto model: A simple paradigm for synchronization phenomena. Reviews of modern physics , 77(1):137–185, 2005. URL abstract/10.1103/RevModPhys.77.137 . Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., Aleman, F. L., Almeida, D., Altenschmidt, J., Altman, S., Anadkat, S., et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 , 2023. URL https: //arxiv.org/abs/2303.08774 . Amari, S.-i. Dynamics of pattern formation in lateralinhibition type neural fields. Biological cybernetics , 1977. URL 10.1007/BF00337259 . Bisk, Y., Zellers, R., Gao, J., Choi, Y., et al. Piqa: Reasoning about physical commonsense in natural language. In AAAI , 2020. URL 1911.11641 . Blondel, V. D., Hendrickx, J. M., and Tsitsiklis, J. N. On krause's multi-agent consensus model with statedependent connectivity. IEEE transactions on Auto-", "source": "marker_v2", "marker_block_id": "/page/8/ListGroup/625"}
3
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0120", "section": "References", "page_start": 9, "page_end": 9, "type": "ListGroup", "text": "matic Control , 54(11):2586–2597, 2009. URL https: //arxiv.org/abs/0807.2028 . Bruno, G., Pasqualotto, F., and Agazzi, A. Emergence of meta-stable clustering in mean-field transformer models. ICLR , 2025a. URL 2410.23228 . Bruno, G., Pasqualotto, F., and Agazzi, A. A multiscale analysis of mean-field transformers in the moderate interaction regime. NeurIPS , 2025b. URL https: //arxiv.org/abs/2509.25040 . Cao, C., Hong, Y., Li, X., Wang, C., Xu, C., Fu, Y., and Xue, X. The image local autoregressive transformer. NeurIPS , 2021. URL 2106.02514 . Chang, H., Zhang, H., Jiang, L., Liu, C., and Freeman, W. T. Maskgit: Masked generative image transformer. In CVPR , 2022. URL 2202.04200 . Chen, M., Radford, A., Child, R., Wu, J., Jun, H., Luan, D., and Sutskever, I. Generative pretraining from pixels. In ICML , 2020. URL press/v119/chen20s.html . Chen, S., Lin, Z., Polyanskiy, Y., and Rigollet, P. Quantitative clustering in mean-field transformer models. arXiv preprint arXiv:2504.14697 , 2025. URL https:// arxiv.org/abs/2504.14697 . Child, R., Gray, S., Radford, A., and Sutskever, I. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509 , 2019. URL https:// arxiv.org/abs/1904.10509 . Choromanski, K., Likhosherstov, V., Dohan, D., Song, X., Gane, A., Sarlos, T., Hawkins, P., Davis, J., Mohiuddin, A., Kaiser, L., et al. Rethinking attention with performers. ICLR , 2021. URL 2009.14794 . Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. Boolq: Exploring the surprising difficulty of natural yes/no questions. In NAACL , 2019. URL .", "source": "marker_v2", "marker_block_id": "/page/8/ListGroup/626"}
4
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0121", "section": "References", "page_start": 10, "page_end": 10, "type": "ListGroup", "text": "495 496 497 498 499 500 501 Comanici, G., Bieber, E., Schaekermann, M., Pasupat, I., Sachdeva, N., Dhillon, I., Blistein, M., Ram, O., Zhang, D., Rosen, E., et al. Gemini 2.5: Pushing the frontier with advanced reasoning, multimodality, long context, and next generation agentic capabilities. arXiv preprint arXiv:2507.06261 , 2025. URL https: //arxiv.org/abs/2507.06261 . Cordonnier, J.-B., Loukas, A., and Jaggi, M. On the relationship between self-attention and convolutional layers. ICLR , 2020. URL 1911.03584 .", "source": "marker_v2", "marker_block_id": "/page/9/ListGroup/547"}
5
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0122", "section": "References", "page_start": 10, "page_end": 10, "type": "ListGroup", "text": "Dao, T. and Gu, A. Transformers are ssms: Generalized models and efficient algorithms through structured state space duality. ICML , 2024. URL org/abs/2405.21060 . De, S., Smith, S. L., Fernando, A., Botev, A., Cristian-Muraru, G., Gu, A., Haroun, R., Berrada, L., Chen, Y., Srinivasan, S., et al. Griffin: Mixing gated linear recurrences with local attention for efficient language models. arXiv preprint arXiv:2402.19427 , 2024. URL . De Marneffe, M.-C., Simons, M., and Tonhauser, J. The commitmentbank: Investigating projection in naturally occurring discourse. In pro ceedings of Sinn und Bedeutung , 2019. URL index.php/sub/article/view/601 . Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. Imagenet: A large-scale hierarchical image database. In CVPR , 2009. URL files/papers/imagenet_cvpr09.pdf . Dosovitskiy, A. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR , 2021. URL . Ermentrout, B. Neural networks as spatio-temporal patternforming systems. Reports on progress in physics , 1998. URL article/10.1088/0034-4885/61/4/002 . Geshkovski, B., Letrouit, C., Polyanskiy, Y., and Rigollet, P. The emergence of clusters in self-attention dynamics. NeurIPS , 2023. URL 2305.05465 . Glorioso, P., Anthony, Q., Tokpanov, Y., Whittington, J., Pilault, J., Ibrahim, A., and Millidge, B. Zamba: A compact 7b ssm hybrid model. arXiv preprint arXiv:2405.16712 , 2024. URL org/abs/2405.16712 .", "source": "marker_v2", "marker_block_id": "/page/9/ListGroup/548"}
6
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0123", "section": "References", "page_start": 10, "page_end": 10, "type": "ListGroup", "text": "Grattafiori, A., Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Vaughan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783 , 2024. URL . Gray, R. Vector quantization. IEEE Assp Magazine , 1984. URL document/1162229 . Grossberg, S. Nonlinear neural networks: Principles, mechanisms, and architectures. Neu ral networks , 1988. URL sciencedirect.com/science/article/ abs/pii/0893608088900214 . Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752 , 2023. URL org/abs/2312.00752 . Gu, A., Goel, K., and Re, C. Efficiently modeling long ´ sequences with structured state spaces. ICLR , 2022. URL . Han, K., Xiao, A., Wu, E., Guo, J., Xu, C., and Wang, Y. Transformer in transformer. NeurIPS , 2021. URL . He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, ´ R. Masked autoencoders are scalable vision learners. In CVPR , 2022. URL 2111.06377 . Hegselmann, R. and Krause, U. Opinion dynamics and bounded confidence models, analysis and simulation. Journal of Artificial Societies and Social Simulation , 2002. URL pdf . Heo, B., Park, S., Han, D., and Yun, S. Rotary position embedding for vision transformer. In ECCV , 2024. URL . Hoover, B., Liang, Y., Pham, B., Panda, R., Strobelt, H., Chau, D. H., Zaki, M., and Krotov, D. Energy transformer. NeurIPS , 2023. URL 2302.07253 . Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., Chen, W., et al. Lora: Low-rank adaptation of large language models. ICLR , 2022. URL https: //arxiv.org/abs/2106.09685v2 . Hu, J. Y.-C., Wu, D., and Liu, H. Provably optimal memory capacity for modern hopfield models: Transformercompatible dense associative memories as spherical codes. NeurIPS , 2024. URL abs/2410.23126 .", "source": "marker_v2", "marker_block_id": "/page/9/ListGroup/549"}
7
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0124", "section": "References", "page_start": 11, "page_end": 11, "type": "Text", "text": "550 Jaegle, A., Gimeno, F., Brock, A., Vinyals, O., Zisserman, A., and Carreira, J. Perceiver: General perception with iterative attention. In ICML , 2021. URL https:// arxiv.org/abs/2103.03206 .", "source": "marker_v2", "marker_block_id": "/page/10/Text/562"}
8
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0125", "section": "References", "page_start": 11, "page_end": 11, "type": "ListGroup", "text": "Katharopoulos, A., Vyas, A., Pappas, N., and Fleuret, F. Transformers are rnns: Fast autoregressive transformers with linear attention. In ICML , 2020. URL https: //arxiv.org/abs/2006.16236 . Kitaev, N., Kaiser, Ł., and Levskaya, A. Reformer: The efficient transformer. ICLR , 2020. URL https:// arxiv.org/abs/2001.04451 . Kohonen, T. The self-organizing map. Proceedings of the IEEE , 2002. URL org/document/58325 . Krause, U. et al. A discrete nonlinear and nonautonomous model of consensus formation. Com munications in difference equations , 2000:227–236, 2000. URL publication/329027305 . Krizhevsky, A., Hinton, G., et al. Learning multiple layers of features from tiny images. 2009. URL learning-features-2009-TR.pdf . Kuramoto, Y. Self-entrainment of a population of coupled non-linear oscillators. In Interna tional symposium on mathematical problems in theoretical physics: January 23–29, 1975, ky oto university, kyoto/Japan , pp. 420–422. Springer, 2005. URL org/CorpusID:123276626 . LeCun, Y., Bottou, L., Bengio, Y., and Haffner, P. Gradient-based learning applied to document recognition. Proceedings of the IEEE , 86(11):2278–2324, 2002. URL pdf/lecun-01a.pdf . Lieber, O., Lenz, B., Bata, H., Cohen, G., Osin, J., Dalmedigos, I., Safahi, E., Meirom, S., Belinkov, Y., Shalev-Shwartz, S., et al. Jamba: A hybrid transformer-mamba language model. arXiv preprint arXiv:2403.19887 , 2024. URL . Liu, A., Mei, A., Lin, B., Xue, B., Wang, B., Xu, B., Wu, B., Zhang, B., Lin, C., Dong, C., et al. Deepseek-v3. 2: Pushing the frontier of open large language models. arXiv preprint arXiv:2512.02556 , 2025. URL https: //arxiv.org/abs/2512.02556 . Liu, Y., Tian, Y., Zhao, Y., Yu, H., Xie, L., Wang, Y., Ye, Q., Jiao, J., and Liu, Y. Vmamba: Visual state space model. NeurIPS , 2024. URL 2401.10166 .", "source": "marker_v2", "marker_block_id": "/page/10/ListGroup/560"}
9
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0126", "section": "References", "page_start": 11, "page_end": 11, "type": "ListGroup", "text": "Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., and Guo, B. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV , 2021. URL . Longpre, S., Hou, L., Vu, T., Webson, A., Chung, H. W., Tay, Y., Zhou, D., Le, Q. V., Zoph, B., Wei, J., et al. The flan collection: Designing data and methods for effective instruction tuning. In ICML , 2023. URL https:// arxiv.org/abs/2301.13688 . Lorenz, J. Continuous opinion dynamics under bounded confidence: A survey. International Journal of Modern Physics C , 18(12):1819–1838, 2007. URL https:// arxiv.org/abs/0707.1762 . Munir, M., Zhang, A., and Marculescu, R. Vcmamba: Bridging convolutions with multi-directional mamba for efficient visual representation. In ICCV , 2025. URL . Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., and Kiela, D. Adversarial nli: A new benchmark for natural language understanding. In ACL , 2020. URL https: //aclanthology.org/2020.acl-main.441/ . Parmar, N., Vaswani, A., Uszkoreit, J., Kaiser, L., Shazeer, N., Ku, A., and Tran, D. Image transformer. In ICML , 2018. URL v80/parmar18a.html . Peebles, W. and Xie, S. Scalable diffusion models with transformers. In ICCV , 2023. URL org/abs/2212.09748 . Qiu, Z., Wang, Z., Zheng, B., Huang, Z., Wen, K., Yang, S., Men, R., Yu, L., Huang, F., Huang, S., et al. Gated attention for large language models: Non-linearity, sparsity, and attention-sink-free. NeurIPS , 2025. URL . Ramsauer, H., Schafl, B., Lehner, J., Seidl, P., Widrich, ¨ M., Adler, T., Gruber, L., Holzleitner, M., Pavlovic,´ M., Sandve, G. K., et al. Hopfield networks is all you need. ICLR , 2021. URL abs/2008.02217 . Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., and Hsieh, C.-J. Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS , 2021. URL https:// arxiv.org/abs/2106.02034 . Ren, R., Ouyang, S., Tang, H., and Liu, Y. Transformers as intrinsic optimizers: Forward inference through the energy principle. arXiv preprint arXiv:2511.00907 , 2025. URL .", "source": "marker_v2", "marker_block_id": "/page/10/ListGroup/561"}
10
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0127", "section": "References", "page_start": 12, "page_end": 12, "type": "Text", "text": "605 606 607 Rigollet, P. The mean-field dynamics of transformers. arXiv preprint arXiv:2512.01868 , 2025. URL https: //arxiv.org/abs/2512.01868 .", "source": "marker_v2", "marker_block_id": "/page/11/Text/583"}
11
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0128", "section": "References", "page_start": 12, "page_end": 12, "type": "ListGroup", "text": "609 610 611 612 Rumelhart, D. E. and Zipser, D. Feature discovery by competitive learning. Cognitive science , 1985. URL https: // article/abs/pii/S0364021385800100 . Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., and Jegou, H. Training data-efficient image trans- ´ formers & distillation through attention. In ICML , 2021a. URL . Touvron, H., Cord, M., Sablayrolles, A., Synnaeve, G., and Jegou, H. Going deeper with image transformers. ´ In ICCV , 2021b. URL 2103.17239 . Touvron, H., Cord, M., and Jegou, H. Deit iii: Revenge of ´ the vit. In ECCV , 2022. URL abs/2204.07118 . Tsai, Y.-H. H., Bai, S., Yamada, M., Morency, L.-P., and Salakhutdinov, R. Transformer dissection: An unified understanding for transformer's attention via the lens of kernel. In EMNLP , 2019. URL org/abs/1908.11775 . Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, Ł., and Polosukhin, I. Attention is all you need. NeurIPS , 2017. URL org/abs/1706.03762 . Wang, C., Li, Q., E, W., and Chazelle, B. Noisy hegselmannkrause systems: Phase transition and the 2 r-conjecture. Journal of Statistical Physics , 166(5):1209–1225, 2017. URL . Wang, S., Li, B. Z., Khabsa, M., Fang, H., and Ma, H. Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768 , 2020. URL https: //arxiv.org/abs/2006.04768 . Wang, W., Xie, E., Li, X., Fan, D.-P., Song, K., Liang, D., Lu, T., Luo, P., and Shao, L. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In ICCV , 2021. URL https: //arxiv.org/abs/2102.12122 . Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., et al. Mmlupro: A more robust and challenging multi-task language understanding benchmark. Advances in Neural Information Processing Systems , 2024. URL https: //arxiv.org/abs/2406.01574 .", "source": "marker_v2", "marker_block_id": "/page/11/ListGroup/581"}
12
+ {"paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7", "chunk_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7:0129", "section": "References", "page_start": 12, "page_end": 12, "type": "ListGroup", "text": "Wang, Z., Xia, X., Chen, R., Yu, D., Wang, C., Gong, M., and Liu, T. Lavin-dit: Large vision diffusion transformer. In CVPR , 2025. URL 2411.11505 . Williams, A., Nangia, N., and Bowman, S. A broadcoverage challenge corpus for sentence understanding through inference. In NAACL , 2018. URL https: //arxiv.org/abs/1704.05426 . Xiao, G., Tian, Y., Chen, B., Han, S., and Lewis, M. Efficient streaming language models with attention sinks. ICLR , 2024. URL 2309.17453 . Xiao, H., Rasul, K., and Vollgraf, R. Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv preprint arXiv:1708.07747 , 2017. URL . Yang, A., Yang, B., Hui, B., et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671 , 2024a. URL https: //arxiv.org/abs/2407.10671 . Yang, C., Chen, Z., Espinosa, M., Ericsson, L., Wang, Z., Liu, J., and Crowley, E. J. Plainmamba: Improving nonhierarchical mamba in visual recognition. CoRR , 2024b. URL . Yang, Y., Wipf, D. P., et al. Transformers from an optimization perspective. NeurIPS , 2022. URL https: //arxiv.org/abs/2205.13891 . Yu, J., Xu, Y., Koh, J. Y., Luong, T., Baid, G., Wang, Z., Vasudevan, V., Ku, A., et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789 , 2022. URL https: //arxiv.org/abs/2206.10789 . Yuan, L., Chen, Y., Wang, T., Yu, W., Shi, Y., Jiang, Z.-H., Tay, F. E., Feng, J., and Yan, S. Tokens-to-token vit: Training vision transformers from scratch on imagenet. In ICCV , 2021. URL 2101.11986 . Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. Instruction-following evaluation for large language models. CoRR , 2023. URL https: //arxiv.org/abs/2311.07911 . Zhu, L., Liao, B., Zhang, Q., Wang, X., Liu, W., and Wang, X. Vision mamba: Efficient visual representation learning with bidirectional state space model. ICML , 2024. URL .", "source": "marker_v2", "marker_block_id": "/page/11/ListGroup/582"}
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/reference_text_v3.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [p. 9 | section: References | type: Text]
2
+ Impact Statement
3
+
4
+ [p. 9 | section: References | type: ListGroup]
5
+ Abbe, E., Bengio, S., Lotfi, A., Sandon, C., and Saremi, O. How far can transformers reason? the globality barrier and inductive scratchpad. NeurIPS , 2024. URL https: //arxiv.org/abs/2406.06467 . Acebron, J. A., Bonilla, L. L., P ´ erez Vicente, C. J., ´ Ritort, F., and Spigler, R. The kuramoto model: A simple paradigm for synchronization phenomena. Reviews of modern physics , 77(1):137–185, 2005. URL abstract/10.1103/RevModPhys.77.137 . Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., Aleman, F. L., Almeida, D., Altenschmidt, J., Altman, S., Anadkat, S., et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 , 2023. URL https: //arxiv.org/abs/2303.08774 . Amari, S.-i. Dynamics of pattern formation in lateralinhibition type neural fields. Biological cybernetics , 1977. URL 10.1007/BF00337259 . Bisk, Y., Zellers, R., Gao, J., Choi, Y., et al. Piqa: Reasoning about physical commonsense in natural language. In AAAI , 2020. URL 1911.11641 . Blondel, V. D., Hendrickx, J. M., and Tsitsiklis, J. N. On krause's multi-agent consensus model with statedependent connectivity. IEEE transactions on Auto-
6
+
7
+ [p. 9 | section: References | type: ListGroup]
8
+ matic Control , 54(11):2586–2597, 2009. URL https: //arxiv.org/abs/0807.2028 . Bruno, G., Pasqualotto, F., and Agazzi, A. Emergence of meta-stable clustering in mean-field transformer models. ICLR , 2025a. URL 2410.23228 . Bruno, G., Pasqualotto, F., and Agazzi, A. A multiscale analysis of mean-field transformers in the moderate interaction regime. NeurIPS , 2025b. URL https: //arxiv.org/abs/2509.25040 . Cao, C., Hong, Y., Li, X., Wang, C., Xu, C., Fu, Y., and Xue, X. The image local autoregressive transformer. NeurIPS , 2021. URL 2106.02514 . Chang, H., Zhang, H., Jiang, L., Liu, C., and Freeman, W. T. Maskgit: Masked generative image transformer. In CVPR , 2022. URL 2202.04200 . Chen, M., Radford, A., Child, R., Wu, J., Jun, H., Luan, D., and Sutskever, I. Generative pretraining from pixels. In ICML , 2020. URL press/v119/chen20s.html . Chen, S., Lin, Z., Polyanskiy, Y., and Rigollet, P. Quantitative clustering in mean-field transformer models. arXiv preprint arXiv:2504.14697 , 2025. URL https:// arxiv.org/abs/2504.14697 . Child, R., Gray, S., Radford, A., and Sutskever, I. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509 , 2019. URL https:// arxiv.org/abs/1904.10509 . Choromanski, K., Likhosherstov, V., Dohan, D., Song, X., Gane, A., Sarlos, T., Hawkins, P., Davis, J., Mohiuddin, A., Kaiser, L., et al. Rethinking attention with performers. ICLR , 2021. URL 2009.14794 . Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. Boolq: Exploring the surprising difficulty of natural yes/no questions. In NAACL , 2019. URL .
9
+
10
+ [p. 10 | section: References | type: ListGroup]
11
+ 495 496 497 498 499 500 501 Comanici, G., Bieber, E., Schaekermann, M., Pasupat, I., Sachdeva, N., Dhillon, I., Blistein, M., Ram, O., Zhang, D., Rosen, E., et al. Gemini 2.5: Pushing the frontier with advanced reasoning, multimodality, long context, and next generation agentic capabilities. arXiv preprint arXiv:2507.06261 , 2025. URL https: //arxiv.org/abs/2507.06261 . Cordonnier, J.-B., Loukas, A., and Jaggi, M. On the relationship between self-attention and convolutional layers. ICLR , 2020. URL 1911.03584 .
12
+
13
+ [p. 10 | section: References | type: ListGroup]
14
+ Dao, T. and Gu, A. Transformers are ssms: Generalized models and efficient algorithms through structured state space duality. ICML , 2024. URL org/abs/2405.21060 . De, S., Smith, S. L., Fernando, A., Botev, A., Cristian-Muraru, G., Gu, A., Haroun, R., Berrada, L., Chen, Y., Srinivasan, S., et al. Griffin: Mixing gated linear recurrences with local attention for efficient language models. arXiv preprint arXiv:2402.19427 , 2024. URL . De Marneffe, M.-C., Simons, M., and Tonhauser, J. The commitmentbank: Investigating projection in naturally occurring discourse. In pro ceedings of Sinn und Bedeutung , 2019. URL index.php/sub/article/view/601 . Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. Imagenet: A large-scale hierarchical image database. In CVPR , 2009. URL files/papers/imagenet_cvpr09.pdf . Dosovitskiy, A. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR , 2021. URL . Ermentrout, B. Neural networks as spatio-temporal patternforming systems. Reports on progress in physics , 1998. URL article/10.1088/0034-4885/61/4/002 . Geshkovski, B., Letrouit, C., Polyanskiy, Y., and Rigollet, P. The emergence of clusters in self-attention dynamics. NeurIPS , 2023. URL 2305.05465 . Glorioso, P., Anthony, Q., Tokpanov, Y., Whittington, J., Pilault, J., Ibrahim, A., and Millidge, B. Zamba: A compact 7b ssm hybrid model. arXiv preprint arXiv:2405.16712 , 2024. URL org/abs/2405.16712 .
15
+
16
+ [p. 10 | section: References | type: ListGroup]
17
+ Grattafiori, A., Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Vaughan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783 , 2024. URL . Gray, R. Vector quantization. IEEE Assp Magazine , 1984. URL document/1162229 . Grossberg, S. Nonlinear neural networks: Principles, mechanisms, and architectures. Neu ral networks , 1988. URL sciencedirect.com/science/article/ abs/pii/0893608088900214 . Gu, A. and Dao, T. Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752 , 2023. URL org/abs/2312.00752 . Gu, A., Goel, K., and Re, C. Efficiently modeling long ´ sequences with structured state spaces. ICLR , 2022. URL . Han, K., Xiao, A., Wu, E., Guo, J., Xu, C., and Wang, Y. Transformer in transformer. NeurIPS , 2021. URL . He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, ´ R. Masked autoencoders are scalable vision learners. In CVPR , 2022. URL 2111.06377 . Hegselmann, R. and Krause, U. Opinion dynamics and bounded confidence models, analysis and simulation. Journal of Artificial Societies and Social Simulation , 2002. URL pdf . Heo, B., Park, S., Han, D., and Yun, S. Rotary position embedding for vision transformer. In ECCV , 2024. URL . Hoover, B., Liang, Y., Pham, B., Panda, R., Strobelt, H., Chau, D. H., Zaki, M., and Krotov, D. Energy transformer. NeurIPS , 2023. URL 2302.07253 . Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., Chen, W., et al. Lora: Low-rank adaptation of large language models. ICLR , 2022. URL https: //arxiv.org/abs/2106.09685v2 . Hu, J. Y.-C., Wu, D., and Liu, H. Provably optimal memory capacity for modern hopfield models: Transformercompatible dense associative memories as spherical codes. NeurIPS , 2024. URL abs/2410.23126 .
18
+
19
+ [p. 11 | section: References | type: Text]
20
+ 550 Jaegle, A., Gimeno, F., Brock, A., Vinyals, O., Zisserman, A., and Carreira, J. Perceiver: General perception with iterative attention. In ICML , 2021. URL https:// arxiv.org/abs/2103.03206 .
21
+
22
+ [p. 11 | section: References | type: ListGroup]
23
+ Katharopoulos, A., Vyas, A., Pappas, N., and Fleuret, F. Transformers are rnns: Fast autoregressive transformers with linear attention. In ICML , 2020. URL https: //arxiv.org/abs/2006.16236 . Kitaev, N., Kaiser, Ł., and Levskaya, A. Reformer: The efficient transformer. ICLR , 2020. URL https:// arxiv.org/abs/2001.04451 . Kohonen, T. The self-organizing map. Proceedings of the IEEE , 2002. URL org/document/58325 . Krause, U. et al. A discrete nonlinear and nonautonomous model of consensus formation. Com munications in difference equations , 2000:227–236, 2000. URL publication/329027305 . Krizhevsky, A., Hinton, G., et al. Learning multiple layers of features from tiny images. 2009. URL learning-features-2009-TR.pdf . Kuramoto, Y. Self-entrainment of a population of coupled non-linear oscillators. In Interna tional symposium on mathematical problems in theoretical physics: January 23–29, 1975, ky oto university, kyoto/Japan , pp. 420–422. Springer, 2005. URL org/CorpusID:123276626 . LeCun, Y., Bottou, L., Bengio, Y., and Haffner, P. Gradient-based learning applied to document recognition. Proceedings of the IEEE , 86(11):2278–2324, 2002. URL pdf/lecun-01a.pdf . Lieber, O., Lenz, B., Bata, H., Cohen, G., Osin, J., Dalmedigos, I., Safahi, E., Meirom, S., Belinkov, Y., Shalev-Shwartz, S., et al. Jamba: A hybrid transformer-mamba language model. arXiv preprint arXiv:2403.19887 , 2024. URL . Liu, A., Mei, A., Lin, B., Xue, B., Wang, B., Xu, B., Wu, B., Zhang, B., Lin, C., Dong, C., et al. Deepseek-v3. 2: Pushing the frontier of open large language models. arXiv preprint arXiv:2512.02556 , 2025. URL https: //arxiv.org/abs/2512.02556 . Liu, Y., Tian, Y., Zhao, Y., Yu, H., Xie, L., Wang, Y., Ye, Q., Jiao, J., and Liu, Y. Vmamba: Visual state space model. NeurIPS , 2024. URL 2401.10166 .
24
+
25
+ [p. 11 | section: References | type: ListGroup]
26
+ Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., and Guo, B. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV , 2021. URL . Longpre, S., Hou, L., Vu, T., Webson, A., Chung, H. W., Tay, Y., Zhou, D., Le, Q. V., Zoph, B., Wei, J., et al. The flan collection: Designing data and methods for effective instruction tuning. In ICML , 2023. URL https:// arxiv.org/abs/2301.13688 . Lorenz, J. Continuous opinion dynamics under bounded confidence: A survey. International Journal of Modern Physics C , 18(12):1819–1838, 2007. URL https:// arxiv.org/abs/0707.1762 . Munir, M., Zhang, A., and Marculescu, R. Vcmamba: Bridging convolutions with multi-directional mamba for efficient visual representation. In ICCV , 2025. URL . Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., and Kiela, D. Adversarial nli: A new benchmark for natural language understanding. In ACL , 2020. URL https: //aclanthology.org/2020.acl-main.441/ . Parmar, N., Vaswani, A., Uszkoreit, J., Kaiser, L., Shazeer, N., Ku, A., and Tran, D. Image transformer. In ICML , 2018. URL v80/parmar18a.html . Peebles, W. and Xie, S. Scalable diffusion models with transformers. In ICCV , 2023. URL org/abs/2212.09748 . Qiu, Z., Wang, Z., Zheng, B., Huang, Z., Wen, K., Yang, S., Men, R., Yu, L., Huang, F., Huang, S., et al. Gated attention for large language models: Non-linearity, sparsity, and attention-sink-free. NeurIPS , 2025. URL . Ramsauer, H., Schafl, B., Lehner, J., Seidl, P., Widrich, ¨ M., Adler, T., Gruber, L., Holzleitner, M., Pavlovic,´ M., Sandve, G. K., et al. Hopfield networks is all you need. ICLR , 2021. URL abs/2008.02217 . Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., and Hsieh, C.-J. Dynamicvit: Efficient vision transformers with dynamic token sparsification. NeurIPS , 2021. URL https:// arxiv.org/abs/2106.02034 . Ren, R., Ouyang, S., Tang, H., and Liu, Y. Transformers as intrinsic optimizers: Forward inference through the energy principle. arXiv preprint arXiv:2511.00907 , 2025. URL .
27
+
28
+ [p. 12 | section: References | type: Text]
29
+ 605 606 607 Rigollet, P. The mean-field dynamics of transformers. arXiv preprint arXiv:2512.01868 , 2025. URL https: //arxiv.org/abs/2512.01868 .
30
+
31
+ [p. 12 | section: References | type: ListGroup]
32
+ 609 610 611 612 Rumelhart, D. E. and Zipser, D. Feature discovery by competitive learning. Cognitive science , 1985. URL https: // article/abs/pii/S0364021385800100 . Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., and Jegou, H. Training data-efficient image trans- ´ formers & distillation through attention. In ICML , 2021a. URL . Touvron, H., Cord, M., Sablayrolles, A., Synnaeve, G., and Jegou, H. Going deeper with image transformers. ´ In ICCV , 2021b. URL 2103.17239 . Touvron, H., Cord, M., and Jegou, H. Deit iii: Revenge of ´ the vit. In ECCV , 2022. URL abs/2204.07118 . Tsai, Y.-H. H., Bai, S., Yamada, M., Morency, L.-P., and Salakhutdinov, R. Transformer dissection: An unified understanding for transformer's attention via the lens of kernel. In EMNLP , 2019. URL org/abs/1908.11775 . Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, Ł., and Polosukhin, I. Attention is all you need. NeurIPS , 2017. URL org/abs/1706.03762 . Wang, C., Li, Q., E, W., and Chazelle, B. Noisy hegselmannkrause systems: Phase transition and the 2 r-conjecture. Journal of Statistical Physics , 166(5):1209–1225, 2017. URL . Wang, S., Li, B. Z., Khabsa, M., Fang, H., and Ma, H. Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768 , 2020. URL https: //arxiv.org/abs/2006.04768 . Wang, W., Xie, E., Li, X., Fan, D.-P., Song, K., Liang, D., Lu, T., Luo, P., and Shao, L. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In ICCV , 2021. URL https: //arxiv.org/abs/2102.12122 . Wang, Y., Ma, X., Zhang, G., Ni, Y., Chandra, A., Guo, S., Ren, W., Arulraj, A., He, X., Jiang, Z., et al. Mmlupro: A more robust and challenging multi-task language understanding benchmark. Advances in Neural Information Processing Systems , 2024. URL https: //arxiv.org/abs/2406.01574 .
33
+
34
+ [p. 12 | section: References | type: ListGroup]
35
+ Wang, Z., Xia, X., Chen, R., Yu, D., Wang, C., Gong, M., and Liu, T. Lavin-dit: Large vision diffusion transformer. In CVPR , 2025. URL 2411.11505 . Williams, A., Nangia, N., and Bowman, S. A broadcoverage challenge corpus for sentence understanding through inference. In NAACL , 2018. URL https: //arxiv.org/abs/1704.05426 . Xiao, G., Tian, Y., Chen, B., Han, S., and Lewis, M. Efficient streaming language models with attention sinks. ICLR , 2024. URL 2309.17453 . Xiao, H., Rasul, K., and Vollgraf, R. Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv preprint arXiv:1708.07747 , 2017. URL . Yang, A., Yang, B., Hui, B., et al. Qwen2 technical report. arXiv preprint arXiv:2407.10671 , 2024a. URL https: //arxiv.org/abs/2407.10671 . Yang, C., Chen, Z., Espinosa, M., Ericsson, L., Wang, Z., Liu, J., and Crowley, E. J. Plainmamba: Improving nonhierarchical mamba in visual recognition. CoRR , 2024b. URL . Yang, Y., Wipf, D. P., et al. Transformers from an optimization perspective. NeurIPS , 2022. URL https: //arxiv.org/abs/2205.13891 . Yu, J., Xu, Y., Koh, J. Y., Luong, T., Baid, G., Wang, Z., Vasudevan, V., Ku, A., et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789 , 2022. URL https: //arxiv.org/abs/2206.10789 . Yuan, L., Chen, Y., Wang, T., Yu, W., Shi, Y., Jiang, Z.-H., Tay, F. E., Feng, J., and Yan, S. Tokens-to-token vit: Training vision transformers from scratch on imagenet. In ICCV , 2021. URL 2101.11986 . Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. Instruction-following evaluation for large language models. CoRR , 2023. URL https: //arxiv.org/abs/2311.07911 . Zhu, L., Liao, B., Zhang, Q., Wang, X., Liu, W., and Wang, X. Vision mamba: Efficient visual representation learning with bidirectional state space model. ICML , 2024. URL .
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/sanitization_report.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "paper_id": "4c97921d-90ed-40e8-a5e2-c99a0f2081e7",
3
+ "pipeline": "Paper2Markdown-V3",
4
+ "ok": true,
5
+ "page_count": 28,
6
+ "chunk_count": 286,
7
+ "main_body_chunk_count": 118,
8
+ "appendix_chunk_count": 156,
9
+ "reference_chunk_count": 12,
10
+ "model_text_chars": 44118,
11
+ "raw_markdown_chars": 133833,
12
+ "sanitized_chars": 116859,
13
+ "page_provenance": {
14
+ "min_page": 1,
15
+ "max_page": 28,
16
+ "invalid_count": 0
17
+ },
18
+ "marker_block_type_counts": {
19
+ "Caption": 6,
20
+ "Code": 1,
21
+ "Equation": 40,
22
+ "FigureGroup": 9,
23
+ "Footnote": 2,
24
+ "ListGroup": 10,
25
+ "PageFooter": 29,
26
+ "PageHeader": 53,
27
+ "PictureGroup": 2,
28
+ "SectionHeader": 69,
29
+ "Table": 5,
30
+ "TableGroup": 12,
31
+ "TableOfContents": 1,
32
+ "Text": 643
33
+ },
34
+ "asset_count_raw": 11,
35
+ "asset_count_model_kept": 11,
36
+ "asset_count_rejected": 0,
37
+ "asset_reject_reasons": {
38
+ "kept": 11
39
+ },
40
+ "artifact_leak_audit": {
41
+ "ok": true,
42
+ "hits": {
43
+ "Anonymous Authors": [],
44
+ "ACKNOWLEDGMENT": [],
45
+ "OpenReview": [],
46
+ "\"accept_label\"": [],
47
+ "\"decision\"": [],
48
+ "\"decision_tier\"": [],
49
+ "\"source_status\"": [],
50
+ "Meta-review": [],
51
+ "Official Review": [],
52
+ "official_reviews": [],
53
+ "meta_reviews": [],
54
+ "suggested_verdict_score": []
55
+ },
56
+ "artifact_count": 2
57
+ },
58
+ "default_model_input": "model_text_v3.txt",
59
+ "appendix_input": "appendix_text_v3.txt",
60
+ "reference_input": "reference_text_v3.txt"
61
+ }
icml26/4c97921d-90ed-40e8-a5e2-c99a0f2081e7/sanitized_v3.txt ADDED
The diff for this file is too large to render. See raw diff