Add Batch fda7a2d5-4c7b-431e-81da-63a102504a46
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/5bfae17e-3bc5-4648-88a1-4e45450a8139_content_list.json +3 -0
- 112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/5bfae17e-3bc5-4648-88a1-4e45450a8139_model.json +3 -0
- 112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/5bfae17e-3bc5-4648-88a1-4e45450a8139_origin.pdf +3 -0
- 112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/full.md +479 -0
- 112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/images.zip +3 -0
- 112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/layout.json +3 -0
- 2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/19f169d2-5a3f-44da-a763-0066f91f1d99_content_list.json +3 -0
- 2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/19f169d2-5a3f-44da-a763-0066f91f1d99_model.json +3 -0
- 2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/19f169d2-5a3f-44da-a763-0066f91f1d99_origin.pdf +3 -0
- 2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/full.md +0 -0
- 2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/images.zip +3 -0
- 2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/layout.json +3 -0
- 3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/80f389b6-aa5b-482f-a032-c21a0b53f78c_content_list.json +3 -0
- 3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/80f389b6-aa5b-482f-a032-c21a0b53f78c_model.json +3 -0
- 3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/80f389b6-aa5b-482f-a032-c21a0b53f78c_origin.pdf +3 -0
- 3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/full.md +611 -0
- 3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/images.zip +3 -0
- 3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/layout.json +3 -0
- abenchmarkforhindiverbargumentstructurealternations/2c2d34ad-8bf4-4c30-a41d-2a44809ffb5f_content_list.json +3 -0
- abenchmarkforhindiverbargumentstructurealternations/2c2d34ad-8bf4-4c30-a41d-2a44809ffb5f_model.json +3 -0
- abenchmarkforhindiverbargumentstructurealternations/2c2d34ad-8bf4-4c30-a41d-2a44809ffb5f_origin.pdf +3 -0
- abenchmarkforhindiverbargumentstructurealternations/full.md +235 -0
- abenchmarkforhindiverbargumentstructurealternations/images.zip +3 -0
- abenchmarkforhindiverbargumentstructurealternations/layout.json +3 -0
- abenchmarkfortranslationsacrossstylesandlanguagevariants/c2e36fb1-0ec3-4187-926d-19b581e20525_content_list.json +3 -0
- abenchmarkfortranslationsacrossstylesandlanguagevariants/c2e36fb1-0ec3-4187-926d-19b581e20525_model.json +3 -0
- abenchmarkfortranslationsacrossstylesandlanguagevariants/c2e36fb1-0ec3-4187-926d-19b581e20525_origin.pdf +3 -0
- abenchmarkfortranslationsacrossstylesandlanguagevariants/full.md +401 -0
- abenchmarkfortranslationsacrossstylesandlanguagevariants/images.zip +3 -0
- abenchmarkfortranslationsacrossstylesandlanguagevariants/layout.json +3 -0
- acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/be9000e2-57eb-4fbd-ba11-37716b55c35b_content_list.json +3 -0
- acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/be9000e2-57eb-4fbd-ba11-37716b55c35b_model.json +3 -0
- acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/be9000e2-57eb-4fbd-ba11-37716b55c35b_origin.pdf +3 -0
- acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/full.md +950 -0
- acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/images.zip +3 -0
- acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/layout.json +3 -0
- acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/152df871-1fb9-468c-a897-efb303997b08_content_list.json +3 -0
- acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/152df871-1fb9-468c-a897-efb303997b08_model.json +3 -0
- acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/152df871-1fb9-468c-a897-efb303997b08_origin.pdf +3 -0
- acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/full.md +337 -0
- acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/images.zip +3 -0
- acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/layout.json +3 -0
- acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/b3b5090c-0c68-4816-a5b9-b46a98c166d2_content_list.json +3 -0
- acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/b3b5090c-0c68-4816-a5b9-b46a98c166d2_model.json +3 -0
- acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/b3b5090c-0c68-4816-a5b9-b46a98c166d2_origin.pdf +3 -0
- acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/full.md +622 -0
- acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/images.zip +3 -0
- acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/layout.json +3 -0
- acebenchacomprehensiveevaluationofllmtoolusage/3bea8f8a-7404-4fd2-8b58-05b614620c68_content_list.json +3 -0
- acebenchacomprehensiveevaluationofllmtoolusage/3bea8f8a-7404-4fd2-8b58-05b614620c68_model.json +3 -0
112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/5bfae17e-3bc5-4648-88a1-4e45450a8139_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:392be3c1b5e5bd6fd8f099ecedf9f0ee09d5c2c955a067095dd4daa06024e6f0
|
| 3 |
+
size 112757
|
112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/5bfae17e-3bc5-4648-88a1-4e45450a8139_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:015e3c00786f9056efdd90c515181e21dd402cf07ad2b5d8e1372f73548f1bfe
|
| 3 |
+
size 135613
|
112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/5bfae17e-3bc5-4648-88a1-4e45450a8139_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:680850d20263e81f5a7b019208888a5ab76e07cdc273b28f2959a40536a6cf59
|
| 3 |
+
size 1499266
|
112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/full.md
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $1 + 1 > 2$ : A Synergistic Sparse and Low-Rank Compression Method for Large Language Models
|
| 2 |
+
|
| 3 |
+
Zeliang Zong $^{1*}$ , Kai Zhang $^{1*}$ , Zheyang Li $^{1}$ , Wenming Tan $^{1}$ , Ye Ren $^{1\dagger}$ , Yiyan Zhai $^{1}$ , Jilin Hu $^{2}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> Hikvision Research Institute
|
| 6 |
+
|
| 7 |
+
$^{2}$ School of Data Science and Engineering, East China Normal University {zongzeliang, zhangkai, lizheyang, tanwenming, renye}@hikvision.com zhaiyiyan@163.com, jlhu@dase.ecnu.edu.cn
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Large Language Models (LLMs) have demonstrated remarkable proficiency in language comprehension and generation; however, their widespread adoption is constrained by substantial bandwidth and computational demands. While pruning and low-rank approximation have each demonstrated promising performance individually, their synergy for LLMs remains underexplored. We introduce Synergistic Sparse and Low-Rank Compression (SSLC) methods for LLMs, which leverages the strengths of both techniques: low-rank approximation compresses the model by retaining its essential structure with minimal information loss, whereas sparse optimization eliminates non-essential weights, preserving those crucial for generalization. Based on theoretical analysis, we first formulate the low-rank approximation and sparse optimization as a unified problem and solve it by iterative optimization algorithm. Experiments on LLaMA and Qwen2.5 models (7B-70B) show that SSLC, without any additional training steps, consistently surpasses standalone methods, achieving state-of-the-arts results. Notably, SSLC compresses Qwen2.5 by $50\%$ with no performance drop and achieves at least $1.63 \times$ speedup, offering a practical solution for efficient LLM deployment.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
In the research field of natural language processing (NLP), large language models (LLMs) (Zhang et al., 2022; Scao et al., 2022; Touvron et al., 2023a), as an emerging technology, have achieved remarkable success in handling complex linguistic tasks and have significantly influenced the evolutionary direction of NLP (Bubeck et al., 2023; Wei et al., 2022; Achiam et al., 2023). However, their vast parameters require extensive computational resources and substantial memory band
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
(a) The Salience of the raw weight W.
|
| 19 |
+
(b) The Salience of the residual $\Delta$ after low-rank approximation (where $\Delta = W - L$ ).
|
| 20 |
+
Figure 1: Weight salience (Huang et al., 2024) in LLaMA2-7B before and after synergistic low-rank approximation. Compared to Figure (a), Figure (b) not only shows a substantial reduction in extreme high values, but also reveals a decrease in prunable low values, thus mitigating the performance degradation caused by pruning.
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+
width, thereby constraining their deployment in practical applications.
|
| 25 |
+
|
| 26 |
+
To address the memory consumption issues of LLMs, various post-training compression (PTC) techniques that do not require retraining have been explored. These include model quantization (Dettmers et al., 2022; Xiao et al., 2023; Frantar et al., 2023; Liu et al., 2025), pruning (Frantar and Alistarh, 2023; Sun et al., 2023; Ma et al., 2023) and low-rank approximation (Hsu et al., 2022; Yuan et al., 2023; Wang et al., 2024). Pruning simplifies the network by removing non-critical weights or structures, while low-rank approximation methods reduces the model's complexity by decomposing the weight matrix into two orthogonal low-dimensional matrices.
|
| 27 |
+
|
| 28 |
+
Recent studies (Frantar and Alistarh, 2023; Sun et al., 2023; Zhang et al., 2024b; Dong et al., 2024; Meng et al., 2024) have formulated LLM pruning as a layer-wise reconstruction problem and pruned redundant neurons using a metric derived from the second Taylor approximation of reconstruction error (Hassibi et al., 1993). This metric, referred to
|
| 29 |
+
|
| 30 |
+
as weight salience (Huang et al., 2024) and detailed in the preliminaries section, evaluates the quadratic error associated with changes in matrix elements, which directly correlates with model performance: higher salience indicate a greater impact on performance. As illustrated in Figure 1(a), the original weight salience, approximated from the calibration dataset that is conventionally employed by prevailing methodologies (Frantar and Alistarh, 2023; Sun et al., 2023), exhibits a discrete distribution of outliers against a consistent pattern of moderate values. Unfortunately, existing pruning approaches retain neurons with high salience from a discrete perspective, failing to maximize the extraction of the coherent part in salience space. In contrast, low-rank approximation (LRA) methods, such as Singular Value Decomposition (SVD) (Hsu et al., 2022; Yuan et al., 2023; Wang et al., 2024), are particularly suitable for compressing the coherent components within the salience and extracting a set of orthogonal bases that form a subspace, maximizing the preservation of the energy of the original space. However, these methods for LLMs still lead to severe performance degradation at a high compression ratio (Yuan et al., 2023; Wang et al., 2024). This degradation arises because low-rank approximation effectively preserves the weight-sharing common basis, but fails to retain the full-rank, noncoherent parts that are crucial for maintaining the model's knowledge and performance.
|
| 31 |
+
|
| 32 |
+
Given these insights, there is an urgent need to combine sparsification and low-rank approximation techniques. This integration can enhance compression efficiency while ensuring that critical information is preserved. Figure 1 demonstrates that the outliers in salience space are effectively extracted after low-rank approximation, and this phenomenon is quantitatively analyzed in Section 5.1. Consequently, with the same compression rate, the synergistic method, by truncating at a smaller salience threshold and increasing the proportion of neurons with less salience, leads to fewer reconstruction errors and thus less performance degradation.
|
| 33 |
+
|
| 34 |
+
Inspired by these experimental observations, we propose the Synergistic Sparse and Low-Rank Compression (SSLC) method. SSLC decouples the coherent and non-coherent parts of the neuron, allowing the model to benefit from both sparse and low-rank approximation. The low-rank approximation uses orthogonal bases to maximize the extraction of energy from the salience space,
|
| 35 |
+
|
| 36 |
+
while the sparse part preserves key incoherent neurons to maintain the network's essential expressive power. By synergizing these two techniques, SSLC ensures a dense, expressive layer with the low-rank part, mitigating the loss of expressive capacity caused by pure pruning/sparsification. Furthermore, we model the joint compression problem as a unified data-aware mathematical optimization objective, considering the effect of low-rank and sparse components on reconstruction loss. Then, a synergistic optimization algorithm has been proposed to solve the problem. Consequently, our method possesses the orthogonality property of low-rank approximation and the full-rank property of sparsification mathematically, ensuring effective preservation of the model's expressive capacity while reducing redundant information. Another advantage, based on the assumption that weight changes during model adaptation exhibit a low "intrinsic rank" (Aghajanyan et al., 2020; Hu et al., 2021), the low-rank component can effectively adapts to downstream tasks. Through comprehensive experiments on the LLaMA (Touvron et al., 2023a,b; Grattafori et al., 2024) and Qwen2.5 (Yang et al., 2025) models with 7B to 70B parameters, the results demonstrate that SSLC achieves state-of-the-art performance.
|
| 37 |
+
|
| 38 |
+
The main contributions are summarized as follows:
|
| 39 |
+
|
| 40 |
+
- We propose SSLC, a novel joint compression algorithm that integrates low-rank approximation with pruning techniques. Mathematically, our method demonstrates the benefits of both orthogonality from low-rank approximation and full-rank preservation via sparse reconstruction.
|
| 41 |
+
- Extensive experiments have shown that SSLC without fine-tuning achieves state-of-the-art performance on various models and datasets. In addition, SSLC provides an optimized initialization for subsequent low-rank part fine-tuning. Specifically, SSLC yields a $1.63 \times$ speedup on Qwen2.5-7B (within about 3 GPU hours of pruning and fine-tuning) without performance drop across various zero-shot tasks.
|
| 42 |
+
|
| 43 |
+
# 2 Related Works
|
| 44 |
+
|
| 45 |
+
# 2.1 Large Language Models Pruning
|
| 46 |
+
|
| 47 |
+
SparseGPT (Frantar and Alistarh, 2023) pioneers LLM pruning using a metric derived from the
|
| 48 |
+
|
| 49 |
+
second-order term in the Taylor expansion of the reconstruction error, employing classical Optimal Brain Surgeon (OBS) techniques (Hassibi and Stork, 1992) to iteratively prune the network and update residual weights. Wanda (Sun et al., 2023) simplifies the Hessian matrix inversion process, focusing on pruning the smallest magnitudes multiplied by the corresponding input activation. RIA (Zhang et al., 2024b) introduces the Relative Importance and Activation metric and channel swapping to maximize the retention of salience under N:M sparsity constraints. DSNoT (Zhang et al., 2024c) iteratively prunes and grows weights to minimize reconstruction loss without the computational expense of back-propagation or weight updates. ALPS (Meng et al., 2024) utilizes an ADMM-based optimization framework to alternately optimize remaining weights through iterative closed-form updates, minimizing layer-wise reconstruction error while satisfying sparsity constraints. Pruner-Zero (Dong et al., 2024), automatically generate symbolic pruning metrics, exploring correlations with post-pruning performance. These methods focus on model compression purely from a pruning perspective. In contrast, our approach emphasizes the synergy between pruning and low-rank approximation, effectively minimizing the impact of pruning on reconstruction loss.
|
| 50 |
+
|
| 51 |
+
# 2.2 Sparse and Low-Rank Integration
|
| 52 |
+
|
| 53 |
+
Early joint decomposition research, including Robust Principal Component Analysis (RPCA) (Wright et al., 2009) and GoDec (Zhou and Tao, 2011), effectively decoupled low-rank structures and sparse noise from data matrices. LoSparse (Li et al., 2023b) decomposes model weights into low-rank and sparse components via iterative pruning, yet remains impractical for LLMs due to full-network training demands. Techniques like LoRAshear (Chen et al., 2023) and LoRAPrune (Zhang et al., 2024a) integrate pruning with LoRA, performing parameter pruning based on gradient information from LoRA, primarily designed for structured pruning, but still face challenges for severe performance degradation at a high compression ratio. Meanwhile, LoSA (Huang et al., 2025) further enhances compressed LLM performance by unifying LoRA with sparsity optimization. Additionally, LoRaP (Li et al., 2024) applies separate low-rank estimation and pruning to MHA and MLP layers independently; however, it lacks joint optimization and requires additional
|
| 54 |
+
|
| 55 |
+
LoRA branch fine-tuning during knowledge recovery, limiting its efficiency. In contrast to these paradigms that conditionally adapt Low-rank either for gradient approximation or fine-tuning, our SSLC framework pioneers a unified matrix-level decomposition where both low-rank and sparse components are jointly optimized via second-order reconstruction loss, enabling data-aware compression and direct mining of latent low-rank representations to drive efficient compression.
|
| 56 |
+
|
| 57 |
+
# 3 Preliminaries
|
| 58 |
+
|
| 59 |
+
Current post-training compression methods focus on compressing pre-trained weights without retraining, ensuring model performance by minimizing the output discrepancy between the compressed and original models. Due to the computational infeasibility of global minimization, this task is typically framed as a layer-wise reconstruction problem for LLMs. Let $W \in \mathbb{R}^{(m,n)}$ and $W' \in \mathbb{R}^{(m,n)}$ denote the original and compressed weights of a given layer, where $m$ and $n$ represent the number of output and input channels, respectively. The input activation is represented as $X \in \mathbb{R}^{(n,N \times L)}$ , where $N$ is the number of calibration samples and $L$ is the sequence length respectively. This problem can be expressed as follows:
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\underset {W ^ {\prime}} {\arg \min } \left\| \left(W - W ^ {\prime}\right) X \right\| _ {F} \tag {1}
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
where $\| \cdot \| _F$ is the Frobenius norm. To prune or quantize weights with minimal impact on the optimization objective, rigorous mathematical derivations from works such as Optimal Brain Surgeon (OBS) (Hassibi and Stork, 1992) and Optimal Brain Quantization (OBQ) (Frantar and Alistarh, 2022), as well as applications like SparseGPT (Frantar and Alistarh, 2023) and GPTQ (Frantar et al., 2023) on LLMs, suggest that the change of the element at $(i,j)$ induces a quadratic error to the cost function Eq. 1. Specifically, the error $\delta_{i,j}$ is approximated by: $\frac{\Delta W_{ij}^2}{[H^{-1}]_{j,j}^2}$ . The Hessian matrix is approximated as $H\approx X^T X$ for a weight matrix. For instance, in quantization, $\Delta w_{ij} = w_{ij} - \text{quant}(w_{ij})$ ; in pruning, $\Delta w_{ij} = w_{ij} - 0$ . Here, $[H^{-1}]_{j,j}^2$ denotes the $j$ -th diagonal entry of the inverse Hessian matrix.
|
| 66 |
+
|
| 67 |
+
# 4 Method
|
| 68 |
+
|
| 69 |
+
The section presents our proposed method, Synergistic Sparse and Low-Rank Compression (SSLC)
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
Figure 2: The pipeline of our proposed SSLC method involves the following steps: Initially, the SVD step performs a low-rank approximation on the scaled matrix. Subsequently, the pruning step converts the dense matrix into a sparse one. In essence, SSLC executes $T$ -step SVD and pruning iterations on the scaled matrix, decomposing the original weight matrix $W$ into a sparse matrix $S_{t}$ and low-dimensional matrices $V_{t}$ and $U_{t}$ . After the final iteration, the method multiplies $V_{t}$ and $S_{t}$ by the scaling matrix $\| X\|_2^{-1}$ , to revert to the original matrix state before scaling.
|
| 73 |
+
|
| 74 |
+
for LLMs, as illustrated in Figure 2. The method comprises three principal sections: the proposed low-rank aware optimization objective, the synergistic optimization algorithm, and the process of low-rank fine-tuning recovery.
|
| 75 |
+
|
| 76 |
+
# 4.1 Joint Low-rank and Sparse Compression
|
| 77 |
+
|
| 78 |
+
Low-rank decomposition and pruning methods based solely on weight magnitudes have been shown empirically ineffective (Frantar and Alistarh, 2023; Yuan et al., 2023). Unlike existing methods (Li et al., 2023a) that directly decompose a matrix $W$ , our method employs a data-aware synergistic optimization strategy. We decompose the original outputs into a low-rank part $L \in \mathbb{R}^{(m,n)}$ with rank $r$ and a sparse part $S \in \mathbb{R}^{(m,n)}$ with sparsity $k\%$ , minimizing the following objective:
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
\min _ {L, S} \| (W - L - S) X \| _ {F}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
s.t. $\operatorname{rank}(L) = r$ , sparsity $(S) = k\%$
|
| 85 |
+
|
| 86 |
+
(4)
|
| 87 |
+
|
| 88 |
+
The functions $\mathrm{rank}(\cdot)$ and sparsity $(\cdot)$ are used to obtain the rank and sparsity of a matrix, respectively. This optimization objective jointly accounts for the contributions of both low-rank and sparse components to output reconstruction loss. In contrast, prior approaches optimize only one aspect—either designing better pruning metrics or singular values mapped to the objective—while ignoring the synergistic benefits of combining both.
|
| 89 |
+
|
| 90 |
+
# 4.2 Synergistic Optimization Algorithm
|
| 91 |
+
|
| 92 |
+
Unlike RPCA (Wright et al., 2009) which decomposes data matrices into low-rank and sparse components based on pure mathematical objectives, SSLC introduces data-awareness through layerwise reconstruction error minimization, explicitly
|
| 93 |
+
|
| 94 |
+
aligning decomposition with LLM performance preservation. Decomposing a low-rank matrix and a sparse matrix simultaneously from Eq. 2 is a NP-hard problem. To facilitate the synergistic optimization, we break down the optimization problem into two manageable sub-problems, enabling efficient alternation between sparsification and singular value decomposition (SVD):
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\left\{ \begin{array}{l} S _ {t} = \underset {\text {sparsity} (S) = k \%} {\arg \min } \| \left(W - L _ {t} - S\right) X \| _ {F} \\ L _ {t} = \underset {\operatorname {rank} (L) = r} {\arg \min } \| \left(W - L - S _ {t - 1}\right) X \| _ {F} \end{array} \right. \tag{3}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
Here, $L_{t}$ and $S_{t}$ denote the low-rank and sparse matrices at the $t$ -th iteration step, respectively.
|
| 101 |
+
|
| 102 |
+
# 4.2.1 Sparsification
|
| 103 |
+
|
| 104 |
+
When solving for the sparse matrix in Eq. 3 at the $t$ -th iteration, the low-rank matrix $L_{t}$ is computed in advance, allowing us to sparsify the residual of the low-rank approximation $(R_{t}^{L} = W - L_{t})$ . Nevertheless, directly solving for the binary mask corresponding to the weight matrix of LLM using a differentiable approach is impractical due to the immense size of the solution space. Recently, Methods (Frantar and Alistarh, 2023; Sun et al., 2023; Zhang et al., 2024c) following OBD (LeCun et al., 1989) and OBS (Hassibi et al., 1993) has gained traction in the field of LLM pruning, which use calibration data to select the most salient weights and to minimize block reconstruction errors effectively. The salience $(\delta)$ of residual weights for pruning is approximated as follows:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\delta_ {i j} = \left[ \left| R _ {t} ^ {L} \right| ^ {2} / \operatorname {d i a g} \left(\left(X ^ {T} X\right) ^ {- 1}\right) \right] _ {i j} \tag {4}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\underset {a p p r o x.} {\overset {d i a g o n a l} {=}} \left(\left| R _ {t} ^ {L} \right| \cdot \| X _ {j} \| _ {2}\right) _ {i j} ^ {2}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
Then, the residual matrix are pruning according to $\theta$ , which is the $k$ -th percentile of the sorted salience in descending order.
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
[ S _ {t} ] _ {i j} = \left\{ \begin{array}{c l} {[ R _ {t} ^ {S} ] _ {i j}} & {\text {i f} \delta_ {i j} \geq \theta} \\ 0 & \text {o t h e r w i s e} \end{array} \right. \tag {5}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
# 4.2.2 SVD
|
| 121 |
+
|
| 122 |
+
After obtaining the sparse matrix, the sparse residual $R_{t}^{S} = W - S_{t-1}$ can be calculated, the SVD sub-problem now be $L_{t} = \arg \min_{\mathrm{rank}(L) = r} \left\| (R_{t}^{S} - L)X \right\|_{F}$ . Although the SVD subrank $(L) = r$ problem can be directly solved by means of closed-form solutions as presented in (Xiang et al., 2012; Saha et al., 2024), the computational burden of performing two full SVD for large-scale matrices, such as those of dimensions $4096 \times 4096$ and $4096 \times 11008$ , during the iterative process is prohibitively high. Accordingly, by referring to Section 3 and Eq. 4, the impact of weight changes on the reconstruction loss following SVD compression can be approximated efficiently. To minimize this impact, we construct a matrix that multiplies $L_{t}'$ with rank $r$ by the inverse of $||X||^2$ as part of low-rank approximation. The optimization objective of this sub-problem can be approximated in the following form:
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
\begin{array}{l} L _ {t} ^ {\prime} = \arg \min _ {L _ {t} ^ {\prime}} \sum \left(\left| R _ {t} ^ {S} - L _ {t} ^ {\prime} \cdot | | X | | _ {2} ^ {- 1} \right| \cdot \| X \| _ {2}\right) ^ {2} \\ = \arg \min _ {L _ {t} ^ {\prime}} \sum \left(\left| R _ {t} ^ {S} \cdot \| X \| _ {2} - L _ {t} ^ {\prime} \right|\right) ^ {2} \tag {6} \\ \end{array}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
Hence, to improve efficiency while maintaining performance, a randomized SVD approach is adopted (Zhou and Tao, 2011). After applying randomized SVD for $R_{t}^{S} \cdot \| X\|_{2}$ , we obtain $L_{t}^{\prime}$ . $L_{t}^{\prime}$ is represented as:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\tilde {L} = R _ {t} ^ {S} \cdot \| X \| _ {2};
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
Y _ {1} = \tilde {L} A _ {1}, Y _ {2} = \tilde {L} ^ {T} A _ {2}; \tag {7}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
L _ {t} ^ {\prime} = Y _ {1} \left(A _ {2} ^ {T} Y _ {1}\right) ^ {- 1} Y _ {2} ^ {T}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
Obtaining $Y_{1}$ and $Y_{2}$ as the bilateral random projections (BRP) of matrix $\tilde{L}$ through the application of random matrices $A_{1}$ and $A_{2}$ , where $A_{1} \in \mathbb{R}^{(n,r)}$
|
| 143 |
+
|
| 144 |
+
# Algorithm 1 SSLC Algorithm
|
| 145 |
+
|
| 146 |
+
Input: Pre-trained weight matrix $W$ with the top 1% significant values preserved
|
| 147 |
+
|
| 148 |
+
Parameter: Target rank $r$ , target sparsity $(k - 1)\%$ , sparse algorithm $\mathrm{Sparse}(\cdot)$ , alternating step $T$
|
| 149 |
+
|
| 150 |
+
Output: Sparse and low rank matrix $S_{t}, L_{t}$
|
| 151 |
+
|
| 152 |
+
1: Let $S_0 = 0$ .
|
| 153 |
+
2: for $t = 1$ to $T$ do
|
| 154 |
+
3: Obtain $L_{t} \gets \mathrm{SVD}(W - S_{t-1}, r)$ by Eq.7
|
| 155 |
+
4: Obtain $S_{t} \gets \text{Sparse}(W - L_{t}, (k - 1)\%)$ by Eq.4
|
| 156 |
+
|
| 157 |
+
5: $t = t + 1$
|
| 158 |
+
6: end for
|
| 159 |
+
7: return solution
|
| 160 |
+
|
| 161 |
+
and $A_{2}\in \mathbb{R}^{(r,m)}$ . Consequently, the two subproblem within Eq.3 can be resolved efficiently as delineated below:
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\left\{ \begin{array}{r} {[ S _ {t} ] _ {i j} = \left\{ \begin{array}{c c} {[ R _ {t} ^ {S} ] _ {i j}} & {\text {i f} \delta_ {i j} \geq \theta} \\ 0 & \text {o t h e r w i s e} \end{array} \right.} \\ L _ {t} = L _ {t} ^ {\prime} \cdot \| X \| _ {2} ^ {- 1} = Y _ {1} \left(A _ {2} ^ {T} Y _ {1}\right) ^ {- 1} Y _ {2} ^ {T} \cdot \| X \| _ {2} ^ {- 1} \end{array} \right. \tag {8}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
# 4.2.3 Preserving Most Important Weights
|
| 168 |
+
|
| 169 |
+
Recognizing the importance of the top significant weights (Dettmers et al., 2023; Yuan et al., 2024; Huang et al., 2024), we preserve the top $1\%$ of weights with highest salience (Eq. 4) and exclude them from the synergistic decomposition process. To achieve an overall compression rate of $p\%$ , we allocate $(k - 1)\%$ to the sparse part and $r \times \frac{m + n}{m \times n}$ to the low-rank part, ensuring the sum of these proportions and the top $1\%$ preserved parameters equates $p\%$ .
|
| 170 |
+
|
| 171 |
+
Optimizing each matrix independently allows for parallel execution, enhancing computational efficiency. Throughout the iteration process, we maintain the column norm $||X||^2$ of the input vectors constant, while updating the residual matrices $R_{t}^{S}$ and $R_{t}^{L}$ dynamically. The overall algorithmic flow is depicted in Algorithm 1.
|
| 172 |
+
|
| 173 |
+
# 4.3 Low-rank Fine-tuning Recovery
|
| 174 |
+
|
| 175 |
+
Instead of directly inserting LoRA side, we use the $U_{t}$ and $V_{t}$ matrices decomposed from $L_{t}$ for performance recovery. This approach maintains the sparse matrix $S_{t}$ frozen and updates only the $U_{t}$ and $V_{t}$ matrices during fine-tuning, as shown in
|
| 176 |
+
|
| 177 |
+
<table><tr><td rowspan="2">Task</td><td rowspan="2">Methods</td><td rowspan="2">Type</td><td colspan="5">LLaMA</td><td colspan="3">Qwen2.5</td></tr><tr><td>1-7B</td><td>2-7B</td><td>3-8B</td><td>1-13B</td><td>2-13B</td><td>3-70B</td><td>7B</td><td>14B</td></tr><tr><td rowspan="6">C4</td><td>Dense</td><td>-</td><td>7.34</td><td>7.26</td><td>9.54</td><td>6.70</td><td>6.73</td><td>7.17</td><td>11.86</td><td>10.35</td></tr><tr><td>SparseGPT</td><td>S</td><td>9.31</td><td>9.23</td><td>14.25</td><td>8.12</td><td>8.22</td><td>9.66</td><td>13.89</td><td>12.41</td></tr><tr><td>Wanda</td><td>S</td><td>9.30</td><td>9.24</td><td>14.87</td><td>8.13</td><td>8.30</td><td>9.96</td><td>14.24</td><td>12.40</td></tr><tr><td>DSnoT</td><td>S</td><td>9.13</td><td>9.11</td><td>14.58</td><td>8.06</td><td>8.13</td><td>9.92</td><td>14.19</td><td>12.23</td></tr><tr><td>SVD-LLM</td><td>LRA</td><td>127.25</td><td>161.27</td><td>413.74</td><td>53.41</td><td>87.20</td><td>154.19</td><td>379.64</td><td>307.18</td></tr><tr><td>Ours</td><td>S+LRA</td><td>8.91</td><td>8.87</td><td>13.90</td><td>7.91</td><td>8.02</td><td>9.39</td><td>13.59</td><td>12.02</td></tr><tr><td rowspan="6">Wiki2</td><td>Dense</td><td>-</td><td>5.68</td><td>5.47</td><td>6.24</td><td>5.09</td><td>4.88</td><td>2.86</td><td>6.85</td><td>5.29</td></tr><tr><td>SparseGPT</td><td>S</td><td>7.22</td><td>6.99</td><td>9.29</td><td>6.21</td><td>6.02</td><td>5.77</td><td>8.43</td><td>7.28</td></tr><tr><td>Wanda</td><td>S</td><td>7.24</td><td>6.92</td><td>9.65</td><td>6.15</td><td>5.97</td><td>5.82</td><td>8.62</td><td>7.32</td></tr><tr><td>DSnoT</td><td>S</td><td>7.15</td><td>6.84</td><td>9.52</td><td>6.09</td><td>5.87</td><td>5.79</td><td>8.58</td><td>7.23</td></tr><tr><td>SVD-LLM</td><td>LRA</td><td>24.52</td><td>27.82</td><td>42.63</td><td>13.71</td><td>15.76</td><td>12.65</td><td>38.64</td><td>26.13</td></tr><tr><td>Ours</td><td>S+LRA</td><td>6.92</td><td>6.61</td><td>8.95</td><td>5.96</td><td>5.79</td><td>5.36</td><td>8.36</td><td>7.11</td></tr><tr><td rowspan="6">Zero-shot</td><td>Dense</td><td>-</td><td>66.31</td><td>66.96</td><td>71.41</td><td>68.91</td><td>69.95</td><td>76.91</td><td>70.83</td><td>73.93</td></tr><tr><td>SparseGPT</td><td>S</td><td>63.12</td><td>63.71</td><td>65.44</td><td>65.98</td><td>67.22</td><td>74.19</td><td>67.81</td><td>71.19</td></tr><tr><td>Wanda</td><td>S</td><td>62.77</td><td>64.13</td><td>65.51</td><td>66.58</td><td>68.01</td><td>74.39</td><td>66.70</td><td>71.15</td></tr><tr><td>DSnoT</td><td>S</td><td>62.91</td><td>63.22</td><td>64.91</td><td>66.41</td><td>67.78</td><td>74.27</td><td>66.89</td><td>71.23</td></tr><tr><td>SVD-LLM</td><td>LRA</td><td>39.07</td><td>38.13</td><td>36.65</td><td>43.12</td><td>39.32</td><td>44.86</td><td>36.11</td><td>40.77</td></tr><tr><td>Ours</td><td>S+LRA</td><td>63.59</td><td>65.24</td><td>65.97</td><td>66.99</td><td>68.55</td><td>74.79</td><td>68.68</td><td>71.93</td></tr></table>
|
| 178 |
+
|
| 179 |
+
Table 1: Performance comparison of unstructured compression methods on LLaMA & Qwen2.5 (50% parameters remaining) without finetuning across three task categories: (S means Sparsification; C4 & Wiki2 [WikiText-2] evaluated by perplexity $[PPL\downarrow]$ ; Zero-shot tasks reported as accuracy [%] averaged over {HellaSwag, Winogrande, BoolQ, PIQA, ARC-Easy, ARC-Challenge}), with detailed per-dataset results in Appendix D.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
Figure 3: Fine-tuning under different types of pruning. (a) introduces an additional LoRA parameter. In contrast, the low-dimensional matrix $(D_{low} \leq 128)$ from SSLC framework can be directly used for fine-tuning.
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 3, which can be expressed as:
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\begin{array}{l} h = (U _ {t} V _ {t} ^ {T} + S _ {t} + \Delta W) X + b \\ = \left(U _ {t} ^ {\prime} V _ {t} ^ {T \prime} + S _ {t}\right) X + b \tag {9} \\ \end{array}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
where $h$ and $b$ represent the output and bias of the layer, respectively. By integrating both low-rank and sparse components, our method outperforms pruning-only approach, enhancing feature extraction and achieving higher accuracy after finetuning.
|
| 192 |
+
|
| 193 |
+
# 5 Evaluation
|
| 194 |
+
|
| 195 |
+
A comprehensive evaluation of the LLaMA and Qwen2.5 model family has been conducted to as
|
| 196 |
+
|
| 197 |
+
sess the effectiveness of SSLC. Detailed experimental setups, pre-trained models, datasets, and baselines are provided in Appendix B. Here, we present the performance analysis of the compressed models, focusing on perplexity and zero-shot capability. Additionally, we performed ablation studies to illustrate the impact of key hyperparameters such as rank, iteration count and weight preservation strategy. Finally, we evaluated the acceleration potential of our method using the simulated ViT-COD (You et al., 2023) accelerator, as detailed in Appendix C.
|
| 198 |
+
|
| 199 |
+
# 5.1 Compression Rate Efficiency Comparison
|
| 200 |
+
|
| 201 |
+
As quantified in Figure 4, when retaining $80\%$ of the original weight salience (as measured by Eq. 4), our synergistic method requires only $38.6\%$ parameter retention. This represents a $3.7\%$ absolute reduction compared to the pure pruning baseline $(42.3\%)$ . The efficiency gain originates from decoupling parameters into complementary components: a $32.3\%$ sparse matrix preserves the most crucial full-rank components for knowledge retention, while an additional $6.25\%$ from the low-rank approximation encodes the essential structure.
|
| 202 |
+
|
| 203 |
+
<table><tr><td>Model</td><td>Method</td><td>PIQA</td><td>BoolQ</td><td>HellaS</td><td>Wino</td><td>ARC-e</td><td>ARC-c</td><td>Ave</td><td>Δ</td></tr><tr><td rowspan="4">LLaMA2-7B</td><td>Dense</td><td>78.07</td><td>77.71</td><td>57.14</td><td>68.90</td><td>76.35</td><td>43.60</td><td>66.96</td><td>-</td></tr><tr><td>SparseGPT*</td><td>76.09</td><td>76.94</td><td>55.63</td><td>68.35</td><td>73.32</td><td>41.04</td><td>65.22</td><td>-1.74</td></tr><tr><td>Wanda*</td><td>77.69</td><td>76.82</td><td>54.57</td><td>67.75</td><td>74.28</td><td>41.21</td><td>65.39</td><td>-1.57</td></tr><tr><td>Ours</td><td>78.18</td><td>77.03</td><td>57.09</td><td>67.72</td><td>75.17</td><td>43.26</td><td>66.41</td><td>-0.55</td></tr><tr><td rowspan="4">LLaMA3-8B</td><td>Dense</td><td>80.14</td><td>82.08</td><td>60.02</td><td>73.64</td><td>81.40</td><td>51.19</td><td>71.41</td><td>-</td></tr><tr><td>SparseGPT*</td><td>78.51</td><td>81.91</td><td>57.40</td><td>71.82</td><td>79.22</td><td>48.14</td><td>69.50</td><td>-1.91</td></tr><tr><td>Wanda*</td><td>78.18</td><td>78.75</td><td>56.95</td><td>72.22</td><td>79.01</td><td>48.82</td><td>68.99</td><td>-2.42</td></tr><tr><td>Ours</td><td>79.32</td><td>80.75</td><td>58.67</td><td>72.48</td><td>80.60</td><td>50.68</td><td>70.42</td><td>-0.99</td></tr><tr><td rowspan="4">Qwen2.5-7B</td><td>Dense</td><td>78.51</td><td>84.52</td><td>72.77</td><td>60.01</td><td>80.56</td><td>48.63</td><td>70.83</td><td>-</td></tr><tr><td>SparseGPT*</td><td>79.03</td><td>84.54</td><td>71.69</td><td>57.13</td><td>80.44</td><td>51.21</td><td>70.67</td><td>-0.16</td></tr><tr><td>Wanda*</td><td>79.11</td><td>84.71</td><td>70.17</td><td>56.64</td><td>79.80</td><td>50.09</td><td>70.09</td><td>-0.74</td></tr><tr><td>Ours</td><td>78.84</td><td>85.44</td><td>72.06</td><td>58.20</td><td>81.82</td><td>52.64</td><td>71.50</td><td>+0.67</td></tr><tr><td rowspan="4">Qwen2.5-14B</td><td>Dense</td><td>81.12</td><td>85.54</td><td>75.37</td><td>63.39</td><td>82.37</td><td>55.80</td><td>73.93</td><td>-</td></tr><tr><td>SparseGPT*</td><td>80.45</td><td>87.63</td><td>73.52</td><td>60.78</td><td>82.42</td><td>55.03</td><td>73.31</td><td>-0.62</td></tr><tr><td>Wanda*</td><td>79.71</td><td>87.70</td><td>73.48</td><td>60.44</td><td>82.62</td><td>54.78</td><td>73.12</td><td>-0.81</td></tr><tr><td>Ours</td><td>81.39</td><td>87.74</td><td>74.03</td><td>61.58</td><td>84.34</td><td>56.06</td><td>74.19</td><td>+0.26</td></tr></table>
|
| 204 |
+
|
| 205 |
+
Table 2: Zero-shot tasks accuracy (%) of LLaMA and Qwen2.5 models at $50\%$ compression rate after fine-tuning with different pruning methods. * indicates models with LoRA fine-tuning, which introduces an additional parameter.
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
(a) Prue pruning.
|
| 209 |
+
Figure 4: Retaining $80\%$ of the total salience, the pure pruning method necessitates keeping the top $42.3\%$ of parameters, which compresses $57.7\%$ parameters. In contrast, the synergistic method requires only the top $32.3\%$ of parameters to form a sparse matrix, and with the additional $6.25\%$ from the low-rank matrix. The overall reserved parameter ratio $(38.6\%)$ remains lower than that of the pure pruning method $(42.3\%)$ , which shows the compression "rate spread" of $3.7\%$ .
|
| 210 |
+
|
| 211 |
+
(b) Pruning $+$ Low-rank.
|
| 212 |
+

|
| 213 |
+
Parameters of sparse part
|
| 214 |
+
Parameters of low-rank part
|
| 215 |
+
Parameters of pruned part
|
| 216 |
+
|
| 217 |
+
# 5.2 Language Modeling and Zero-shot Tasks
|
| 218 |
+
|
| 219 |
+
Table 1 shows the performance of sparse LLM models at a uniform sparsity rate of $50\%$ . Our method, SSLC, achieves state-of-the-art results across both language modeling and zero-shot tasks, significantly outperforming baselines such as Wanda and DSnoT on various datasets, including C4 and WikiText-2. Moreover, our experiments demonstrate that the compressed models such as Qwen2.5-14B with SSLC (approximately 7B effective parameters) outperforms the native dense Qwen2.5-7B on zero-shot tasks, achieving an average improvement of $1.1\%$ on benchmarks. These results highlighting that sparsity-based compression not only reduces parameter counts but better preserves the original
|
| 220 |
+
|
| 221 |
+
models's capabilities compared to architecturally constrained smaller models.
|
| 222 |
+
|
| 223 |
+
# 5.3 Fine-tuning Sparse LLMs
|
| 224 |
+
|
| 225 |
+
To bridge the remaining performance gap, we further explore parameter-efficient fine-tuning strategies. As shown in Figure 3, unlike other methods such as Wanda and SparseGPT, which introduce additional parameters during adaptation, SSLC leverages its low-rank structure for parameter-efficient fine-tuning. As detailed in Table 2, after fine-tuning on alpaca datasets, SSLC not only surpasses Wanda and SparseGPT with LoRA but also nearly recovers the full accuracy of the original dense model, particularly on LLaMA2-7B and Qwen 2.5 models. This demonstrates that SSLC enables sparse LLMs to retain high performance under tight parameter budgets, making it especially suitable for practical deployment scenarios where storage and efficiency are critical.
|
| 226 |
+
|
| 227 |
+
# 5.4 Ablation Study
|
| 228 |
+
|
| 229 |
+
We conduct ablation studies to assess the contribution of key hyperparameters in our SSLC method. As shown in Figure 5, the reconstruction error decreases rapidly across network layers when $T$ increases from 0 to 20, and notably stabilizes after 40 iterations, indicating robust convergence behavior of our method. Our experiments on C4 and WikiText-2 datasets (Table 3) further confirm that the model achieves stable performance after 40 it
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
Figure 5: The current decomposition loss, denoted as $\| (W - L_t - S_t)X\| _F$ , for the down projection matrices of different layers in LLaMA2-7B varies as a percentage of the initial loss with respect to the number of iterations.
|
| 233 |
+
|
| 234 |
+
erations, with optimal results appearing at $T = 60$ . After balancing computational efficiency with performance requirements, we ultimately selected 40 iterations as the experimental setting. This choice maintains model effectiveness while significantly reducing computational overhead (40 iterations consume $33\%$ less resources than 60 iterations).
|
| 235 |
+
|
| 236 |
+
<table><tr><td>Iteration</td><td>Wikitext-2</td><td>C4</td><td>Average</td></tr><tr><td>0</td><td>7.35</td><td>9.75</td><td>8.55</td></tr><tr><td>10</td><td>6.84</td><td>9.16</td><td>8.00</td></tr><tr><td>20</td><td>6.74</td><td>8.99</td><td>7.87</td></tr><tr><td>30</td><td>6.67</td><td>8.91</td><td>7.79</td></tr><tr><td>40</td><td>6.61</td><td>8.87</td><td>7.74</td></tr><tr><td>50</td><td>6.59</td><td>8.85</td><td>7.72</td></tr><tr><td>60</td><td>6.58</td><td>8.83</td><td>7.71</td></tr></table>
|
| 237 |
+
|
| 238 |
+
To rigorously validate the effectiveness of our SSLC framework, we performed systematic evaluations across various sparsity configurations. As evidenced by the experimental results presented in Figure 6, our method demonstrates consistent superiority over baseline approaches under varying pruning intensities, ranging from $10\%$ to $50\%$ sparsity levels. The performance gap becomes particularly pronounced at higher sparsity rates, highlighting the efficiency of our approach in preserving model capabilities even under aggressive compression. Furthermore, by integrating our SSLC framework with existing pruning techniques, the enhanced approaches achieve significantly better performance than their vanilla implementations.
|
| 239 |
+
|
| 240 |
+
For detailed ablation studies on the other three key hyperparameters: (1) the number of retained ranks, (2) the salience-based weight preservation
|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
Figure 6: Performance of LLaMA2-7B on the WikiText-2 dataset under varying pruning ratios. Hollow markers denote standalone pruning methods, while solid markers represent our synergistic compression approach.
|
| 244 |
+
|
| 245 |
+
strategy, and (3) random seed initialization, alongside a comparative analysis of pruning methods under the SSLC framework, refer to Appendix E.
|
| 246 |
+
|
| 247 |
+
# 5.5 Acceleration Performance
|
| 248 |
+
|
| 249 |
+
To evaluate the acceleration of unstructured pruning, we employ the ViTCoD accelerator simulator to assess SSLC at a $50\%$ compression ratio. As detailed in Table 4, our method achieves speedups of $1.74 \times$ (MHA) and $1.84 \times$ (FFN) for LLaMA2-7B, and $1.63 \times$ (MHA) and $1.85 \times$ (FFN) for Qwen2.5-7B.
|
| 250 |
+
|
| 251 |
+
Table 3: Perplexity for LLaMA2-7B with $50\%$ parameters remaining at different numbers of iterations.
|
| 252 |
+
|
| 253 |
+
<table><tr><td>Model</td><td colspan="2">LLaMA2-7B</td><td colspan="2">Qwen2.5-7B</td></tr><tr><td>Module</td><td>MHA</td><td>FFN</td><td>MHA</td><td>FFN</td></tr><tr><td>Dense</td><td>16384</td><td>33024</td><td>7168</td><td>49728</td></tr><tr><td>Sparse</td><td>8364.2</td><td>16535.3</td><td>3705.7</td><td>24764.5</td></tr><tr><td>Low-rank</td><td>1024</td><td>1416</td><td>704</td><td>2112</td></tr><tr><td>Sum</td><td>9388.2</td><td>17951.3</td><td>4409.7</td><td>26876.5</td></tr><tr><td>Speedup</td><td>1.74×</td><td>1.84×</td><td>1.63×</td><td>1.85×</td></tr></table>
|
| 254 |
+
|
| 255 |
+
Table 4: Runtime (cycles) and speedup across modules in LLaMA2-7B and Qwen2.5-7B. "Cycles" denotes computational cycles required by the ViTCoD accelerator.
|
| 256 |
+
|
| 257 |
+
<table><tr><td>Model</td><td>Dense</td><td>50%</td><td>60%</td><td>70%</td></tr><tr><td>LLaMA2-7B</td><td>53.79</td><td>72.12</td><td>77.87</td><td>89.87</td></tr><tr><td>LLaMA1-7B</td><td>54.07</td><td>73.02</td><td>79.14</td><td>91.25</td></tr></table>
|
| 258 |
+
|
| 259 |
+
Table 5: Real-world throughput (tokens/sec) at varying sparsity levels
|
| 260 |
+
|
| 261 |
+
For real-world memory-bound inference, we evaluate SSLC across sparsity levels from $50\%$ to $70\%$ using nm-vLLM (NeuralMagic, 2024). With 1024-token generation over 5 prompts, SSLC
|
| 262 |
+
|
| 263 |
+
achieves throughput speedups of $1.34 \times -1.69 \times$ in bandwidth bottleneck.
|
| 264 |
+
|
| 265 |
+
# 6 Conclusion
|
| 266 |
+
|
| 267 |
+
In this paper, we systematically analyze the strengths and weaknesses of two previously independent compression techniques for LLMs: pruning and low-rank approximation. Based on the theoretical analysis, SSLC (Synergistic Sparse and Low-Rank Compression) is introduced for efficient LLM deployment, which maximizes the energy in the low-rank component using orthogonal bases, while simultaneously achieving discrete full-rank information in the sparse part. By modeling the joint compression for LLMs as a unified optimization problem, we apply an iterative optimization algorithm that offers a novel theoretical perspective and achieves significant performance improvements in practice. Experiments on language modeling and zero-shot tasks show that our method significantly outperforms previous compression approaches. Furthermore, comprehensive fine-tuning experiments demonstrate SSLC's effectiveness in restoring model accuracy, validating its practicality for real-world deployment.
|
| 268 |
+
|
| 269 |
+
# Limitations
|
| 270 |
+
|
| 271 |
+
Our proposed synergistic sparse and low-rank compression method is formulated as an iterative optimization problem. While this approach necessitates additional computation during the pruning phase, we have strategically optimized the algorithm to minimize both time and memory consumption. As a result, the pruning process completes in approximately 30 minutes for 7B models and about 1 hour for 14B models on standard hardware configurations. Despite these efficiency gains, our method currently applies uniform compression ratios across all Transformer layers, which may not fully exploit the varying sensitivities of different layers. Future work will focus on exploring theoretically grounded metrics for assessing layer criticality—potentially through gradient-weighted Hessian analysis—to enable dynamic, layer-wise compression policies that achieves Pareto-efficient trade-offs between accuracy and computational cost.
|
| 272 |
+
|
| 273 |
+
# References
|
| 274 |
+
|
| 275 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman,
|
| 276 |
+
|
| 277 |
+
Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 278 |
+
Armen Aghajanyan, Luke Zettlemoyer, and Sonal Gupta. 2020. Intrinsic dimensionality explains the effectiveness of language model fine-tuning. arXiv preprint arXiv:2012.13255.
|
| 279 |
+
Yonatan Bisk, Rowan Zellers, Ronan Le Bras, Jianfeng Gao, and Yejin Choi. 2020. Piqa: Reasoning about physical commonsense in natural language. In Thirty-Fourth AAAI Conference on Artificial Intelligence.
|
| 280 |
+
Sebastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, Harsha Nori, Hamid Palangi, Marco Tulio Ribeiro, and Yi Zhang. 2023. Sparks of artificial general intelligence: Early experiments with gpt-4.
|
| 281 |
+
Tianyi Chen, Tianyu Ding, Badal Yadav, Ilya Zharkov, and Luming Liang. 2023. Lorashear: Efficient large language model structured pruning and knowledge recovery. arXiv preprint arXiv:2310.18356.
|
| 282 |
+
Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. 2019. BoolQ: Exploring the surprising difficulty of natural yes/no questions. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2924–2936, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 283 |
+
Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question answering? try arc, the ai2 reasoning challenge. arXiv:1803.05457v1.
|
| 284 |
+
Tim Dettmers, Mike Lewis, Younes Belkada, and Luke Zettlemoyer. 2022. LLM.int8(): 8-bit matrix multiplication for transformers at scale. In Advances in Neural Information Processing Systems.
|
| 285 |
+
Tim Dettmers, Ruslan Svirschevski, Vage Egiazarian, Denis Kuznedelev, Elias Frantar, Saleh Ashkboos, Alexander Borzunov, Torsten Hoefler, and Dan Alistarh. 2023. Spqr: A sparse-quantized representation for near-lossless llm weight compression.
|
| 286 |
+
Peijie Dong, Lujun Li, Zhenheng Tang, Xiang Liu, Xinglin Pan, Qiang Wang, and Xiaowen Chu. 2024. Pruner-zero: Evolving symbolic pruning metric from scratch for large language models. arXiv preprint arXiv:2406.02924.
|
| 287 |
+
Elias Frantar and Dan Alistarh. 2022. Optimal brain compression: A framework for accurate post-training quantization and pruning. Advances in Neural Information Processing Systems, 35:4475-4488.
|
| 288 |
+
Elias Frantar and Dan Alistarh. 2023. SparseGPT: Massive language models can be accurately pruned in one-shot.
|
| 289 |
+
|
| 290 |
+
Elias Frantar, Saleh Ashkboos, Torsten Hoefler, and Dan Alistarh. 2023. GPTQ: Accurate post-training compression for generative pretrained transformers. In International Conference on Learning Representations.
|
| 291 |
+
Leo Gao, Jonathan Tow, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Kyle McDonell, Niklas Muennighoff, et al. 2021. A framework for few-shot language model evaluation. Version v0.0.1. Sept.
|
| 292 |
+
Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The llama 3 herd of models.
|
| 293 |
+
Babak Hassibi and David Stork. 1992. Second order derivatives for network pruning: Optimal brain surgeon. Advances in neural information processing systems, 5.
|
| 294 |
+
Babak Hassibi, David G Stork, and Gregory J Wolff. 1993. Optimal brain surgeon and general network pruning. In IEEE International Conference on Neural Networks.
|
| 295 |
+
Yen-Chang Hsu, Ting Hua, Sungen Chang, Qian Lou, Yilin Shen, and Hongxia Jin. 2022. Language model compression with weighted low-rank factorization. arXiv preprint arXiv:2207.00112.
|
| 296 |
+
Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models.
|
| 297 |
+
Wei Huang, Haotong Qin, Yangdong Liu, Yawei Li, Xianglong Liu, Luca Benini, Michele Magno, and Xiaojuan Qi. 2024. Slim-llm: Salience-driven mixed-precision quantization for large language models. arXiv preprint arXiv:2405.14917.
|
| 298 |
+
Weizhong Huang, Yuxin Zhang, Xiawu Zheng, Yang Liu, Jing Lin, Yiwu Yao, and Rongrong Ji. 2025. Dynamic low-rank sparse adaptation for large language models.
|
| 299 |
+
Yann LeCun, John S Denker, and Sara A Solla. 1989. Optimal brain damage. In Advances in Neural Information Processing Systems.
|
| 300 |
+
Guangyan Li, Yongqiang Tang, and Wensheng Zhang. 2024. Lorap: Transformer sub-layers deserve differentiated structured compression for large language models. arXiv preprint arXiv:2404.09695.
|
| 301 |
+
Yixiao Li, Yifan Yu, Chen Liang, Pengcheng He, Nikos Karampatziakis, Weizhu Chen, and Tuo Zhao. 2023a. Loftq: Lora-fine-tuning-aware quantization for large language models. arXiv preprint arXiv:2310.08659.
|
| 302 |
+
Yixiao Li, Yifan Yu, Qingru Zhang, Chen Liang, Pengcheng He, Weizhu Chen, and Tuo Zhao. 2023b. Losparse: Structured compression of large language
|
| 303 |
+
|
| 304 |
+
models based on low-rank and sparse approximation. In International Conference on Machine Learning, pages 20336-20350. PMLR.
|
| 305 |
+
Zechun Liu, Changsheng Zhao, Igor Fedorov, Bilge Soran, Dhruv Choudhary, Raghuraman Krishnamoorthi, Vikas Chandra, Yuandong Tian, and Tijmen Blankevoort. 2025. Spinquant: Llm quantization with learned rotations.
|
| 306 |
+
Xinyin Ma, Gongfan Fang, and Xinchao Wang. 2023. Llm-pruner: On the structural pruning of large language models. Version 3.
|
| 307 |
+
Xiang Meng, Kayhan Behdin, Haoyue Wang, and Rahul Mazumder. 2024. Alps: Improved optimization for highly sparse one-shot pruning for large language models.
|
| 308 |
+
Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. 2016. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843.
|
| 309 |
+
NeuralMagic. 2024. nm-vllm: Neuralmagic's inference engine for vLLM. https://github.com/neuralmagic/nm-vllm. Accessed: 2025-09-01.
|
| 310 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of machine learning research, 21(140):1-67.
|
| 311 |
+
Rajarshi Saha, Naomi Sagan, Varun Srivastava, Andrea J. Goldsmith, and Mert Pilanci. 2024. Compressing large language models using low rank and low precision decomposition.
|
| 312 |
+
Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. 2019. Winogrande: An adversarial winograd schema challenge at scale.
|
| 313 |
+
Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, Matthias Galle, et al. 2022. Bloom: A 176b-parameter open-access multilingual language model.
|
| 314 |
+
Mingjie Sun, Zhuang Liu, Anna Bair, and Zico Kolter. 2023. A simple and effective pruning approach for large language models.
|
| 315 |
+
Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca. Accessed: 2023-08-09.
|
| 316 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023a. LLaMA: Open and efficient foundation language models.
|
| 317 |
+
|
| 318 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023b. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
|
| 319 |
+
|
| 320 |
+
Xin Wang, Yu Zheng, Zhongwei Wan, and Mi Zhang. 2024. Svd-llm: Truncation-aware singular value decomposition for large language model compression. arXiv preprint arXiv:2403.07378.
|
| 321 |
+
|
| 322 |
+
Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. 2022. Emergent abilities of large language models. In *Transactions on Machine Learning Research*.
|
| 323 |
+
|
| 324 |
+
John Wright, Arvind Ganesh, Shankar Rao, Yigang Peng, and Yi Ma. 2009. Robust principal component analysis: Exact recovery of corrupted low-rank matrices via convex optimization. Advances in neural information processing systems, 22.
|
| 325 |
+
|
| 326 |
+
Shuo Xiang, Yunzhang Zhu, Xiaotong Shen, and Jieping Ye. 2012. Optimal exact least squares rank minimization. In Proceedings of the 18th ACM SIGKDD international conference on Knowledge discovery and data mining, pages 480-488.
|
| 327 |
+
|
| 328 |
+
Guangxuan Xiao, Ji Lin, Mickael Seznec, Hao Wu, Julien Demouth, and Song Han. 2023. Smoothquant: Accurate and efficient post-training quantization for large language models. In International Conference on Machine Learning.
|
| 329 |
+
|
| 330 |
+
An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2025. Qwen2.5 technical report.
|
| 331 |
+
|
| 332 |
+
Haoran You, Zhanyi Sun, Huihong Shi, Zhongzhi Yu, Yang Zhao, Yongan Zhang, Chaojian Li, Baopu Li, and Yingyan Lin. 2023. Vitcod: Vision transformer acceleration via dedicated algorithm and accelerator co-design. In 2023 IEEE International Symposium on High-Performance Computer Architecture (HPCA), pages 273-286. IEEE.
|
| 333 |
+
|
| 334 |
+
Zhihang Yuan, Yuzhang Shang, and Zhen Dong. 2024. Pb-llm: Partially binarized large language models. In The Twelfth International Conference on Learning Representations.
|
| 335 |
+
|
| 336 |
+
Zhihang Yuan, Yuzhang Shang, Yue Song, Qiang Wu, Yan Yan, and Guangyu Sun. 2023. Asvd: Activation-aware singular value decomposition for compressing large language models. arXiv preprint arXiv:2312.05821.
|
| 337 |
+
|
| 338 |
+
Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. 2019. Hellaswag: Can a machine really finish your sentence? In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics.
|
| 339 |
+
|
| 340 |
+
Mingyang Zhang, Hao Chen, Chunhua Shen, Zhen Yang, Linlin Ou, Xinyi Yu, and Bohan Zhuang. 2024a. Loraprune: Structured pruning meets low-rank parameter-efficient fine-tuning.
|
| 341 |
+
|
| 342 |
+
Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. 2022. OPT: Open pre-trained transformer language models.
|
| 343 |
+
|
| 344 |
+
Yingtao Zhang, Haoli Bai, Haokun Lin, Jialin Zhao, Lu Hou, and Carlo Vittorio Cannistraci. 2024b. Plug-and-play: An efficient post-training pruning method for large language models. In The Twelfth International Conference on Learning Representations.
|
| 345 |
+
|
| 346 |
+
Yuxin Zhang, Lirui Zhao, Mingbao Lin, Yunyun Sun, Yiwu Yao, Xingjia Han, Jared Tanner, Shiwei Liu, and Rongrong Ji. 2024c. Dynamic sparse no training: Training-free fine-tuning for sparse llms.
|
| 347 |
+
|
| 348 |
+
Tianyi Zhou and Dacheng Tao. 2011. Godec: Randomized low-rank & sparse matrix decomposition in noisy case. In Proceedings of the 28th International Conference on Machine Learning, ICML 2011.
|
| 349 |
+
|
| 350 |
+
# A Convergence Analysis
|
| 351 |
+
|
| 352 |
+
Building upon Optimal Brain Surgeon (OBS) (Hassibi et al., 1993), with extensions in SparseGPT (Frantar and Alistarh, 2023) and GPTQ (Frantar et al., 2023), the element-wise perturbation at $(i,j)$ induces quadratic error:
|
| 353 |
+
|
| 354 |
+
$$
|
| 355 |
+
\delta_ {i, j} = \frac {\Delta W _ {i j} ^ {2}}{\left[ H ^ {- 1} \right] _ {j j} ^ {2}} \approx \| \Delta W \| \cdot \| X _ {j} \| _ {2} \tag {10}
|
| 356 |
+
$$
|
| 357 |
+
|
| 358 |
+
To jointly optimize the low-rank $(L)$ and sparse $(S)$ matrices:
|
| 359 |
+
|
| 360 |
+
$$
|
| 361 |
+
\arg \min \| (W - L - S) X \| _ {F} \approx \| W - L - S \| \cdot \| X _ {j} \| _ {2} \tag {11}
|
| 362 |
+
$$
|
| 363 |
+
|
| 364 |
+
We solve $L$ and $S$ iteratively (Eq. 5 and Eq. 7 in main text), defining optimization losses:
|
| 365 |
+
|
| 366 |
+
$$
|
| 367 |
+
E _ {t} ^ {1} \approx \| (W - L _ {t} - S _ {t - 1}) \| \cdot \| X _ {j} \| _ {2}
|
| 368 |
+
$$
|
| 369 |
+
|
| 370 |
+
$$
|
| 371 |
+
E _ {t} ^ {2} \approx \| (W - L _ {t} - S _ {t}) \| \cdot \| X _ {j} \| _ {2}
|
| 372 |
+
$$
|
| 373 |
+
|
| 374 |
+
Global optimality of $S_{t}$ and $L_{t + 1}$ ensures:
|
| 375 |
+
|
| 376 |
+
$$
|
| 377 |
+
E _ {t} ^ {1} \geq E _ {t} ^ {2} \tag {12}
|
| 378 |
+
$$
|
| 379 |
+
|
| 380 |
+
$$
|
| 381 |
+
E _ {t} ^ {2} \geq E _ {t + 1} ^ {1} \tag {13}
|
| 382 |
+
$$
|
| 383 |
+
|
| 384 |
+
Thus the quadratic error $\| (W - L - S)\| \cdot \| X_j\| _2$ decreases monotonically:
|
| 385 |
+
|
| 386 |
+
$$
|
| 387 |
+
E _ {1} ^ {1} \geq E _ {1} ^ {2} \geq E _ {2} ^ {1} \geq \dots \geq E _ {t} ^ {1} \geq E _ {t} ^ {2} \geq E _ {t + 1} ^ {1} \geq \dots \tag {14}
|
| 388 |
+
$$
|
| 389 |
+
|
| 390 |
+
Complementing this theoretical framework, Figure 5 (main text) shows monotonic error reduction across layers, with $>90\%$ convergence within 40 iterations.
|
| 391 |
+
|
| 392 |
+
# B Detailed Experimental Settings
|
| 393 |
+
|
| 394 |
+
# B.1 Setup.
|
| 395 |
+
|
| 396 |
+
It is worth noting that our synergistic optimization method, is a simple and efficient way to run on consumer-grade graphics cards, where the largest computing resource is needed in fine-tuning schemes. The calibration dataset used in the experiments is the same as Wanda, sampled from the first slice of the C4 (Raffel et al., 2020) training dataset, containing 128 sequences with 2048 tokens each, which reflects the reality of the baseline approach. We use high quality instruction dataset Stanford Alpaca (Taori et al., 2023) dataset for fine-tuning the compressed models.
|
| 397 |
+
|
| 398 |
+
# B.2 Models.
|
| 399 |
+
|
| 400 |
+
Our evaluation primarily focuses on leading open-source LLM families, including the LLaMA series and Qwen2.5 models. Specifically, we validate our method across multiple architectures and scales: LLaMA-7B/13B, LLaMA2-7B/13B, LLaMA3-8B/70B, and Qwen2.5-7B/14B. The empirical results demonstrate that our approach achieves consistent performance improvements regardless of model size or architecture.
|
| 401 |
+
|
| 402 |
+
# B.3 Evaluation.
|
| 403 |
+
|
| 404 |
+
Experiments evaluated on the WikiText-2 (Meredity et al., 2016), C4 datasets for perplexity (PPL) validation. To explore the model's capabilities in depth, we follow previous methods to perform zero-shot task classification with the help of the lmeval (Gao et al., 2021) library on datasets including BoolQ (Clark et al., 2019), PIQA (Bisk et al., 2020), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2019), ARC-easy (Clark et al., 2018), and ARC-challenge (Clark et al., 2018). The licenses for the datasets and models used in this paper are as follows:
|
| 405 |
+
|
| 406 |
+
- WikiText-2: Creative Commons Attribution-ShareAlike.
|
| 407 |
+
C4: Apache License 2.0.
|
| 408 |
+
- BoolQ: Creative Commons Attribution-ShareAlike 3.0 (CC BY-SA 3.0).
|
| 409 |
+
PIQA: MIT License.
|
| 410 |
+
- HellaSwag: MIT License.
|
| 411 |
+
- WinoGrande: Creative Commons Attribution 4.0 (CC BY 4.0).
|
| 412 |
+
|
| 413 |
+
- ARC-easy / ARC-challenge: Creative Commons Attribution-ShareAlike 4.0 (CC BY-SA 4.0).
|
| 414 |
+
- LLaMA1: Non-commercial research license;
|
| 415 |
+
- LLaMA2: Meta Llama 2 Community License;
|
| 416 |
+
- LLaMA3: Meta Llama 3 Community License;
|
| 417 |
+
- Qwen2.5: Apache License 2.0;
|
| 418 |
+
|
| 419 |
+
All datasets and models were utilized in accordance with their respective licenses.
|
| 420 |
+
|
| 421 |
+
# B.4Baselines.
|
| 422 |
+
|
| 423 |
+
We have meticulously reproduced several established methodologies to serve as benchmarks: (1) SparseGPT, which ingeniously reframes the task of model pruning in LLMs as a sequential sparse regression challenge, subsequently updating the unpruned weights. (2) Wanda, a method that approximates the SparseGPT pruning metric using the product of the magnitude of weights and L2 normalization based on input activation, performing only weight pruning. (3) DSNoT, a dynamic pruning technique that expands upon the sparse methodologies like Wanda, engaging in iterative processes of weight pruning and growth, which can be seen as an iterative optimization algorithm of sparse plus sparse. (4) SVD-LLM, a novel SVD-based LLM compression method, addresses the limitations of existing SVD approaches by incorporating a truncation-aware data whitening strategy that directly maps singular values to compression loss, thereby demonstrating superior performance compared to previous SVD compression methods (Yuan et al., 2023; Hsu et al., 2022).
|
| 424 |
+
|
| 425 |
+
# C Detailed Simulated ViTCoD Accelerator
|
| 426 |
+
|
| 427 |
+
ViTCoD (You et al., 2023) is an innovative framework for algorithm and hardware co-design. It effectively reduces the demand for on-chip cache and the frequency of input matrix loading by spatially tiling sparse and dense matrices along specific dimensions and accumulating intermediate results. During the computation, ViTCoD divides the input matrices into smaller blocks and transfers them to memory buffers, then intelligently assigns computation tasks to either the Denser Engine or the
|
| 428 |
+
|
| 429 |
+
Sparser Engine based on the sparsity of the matrix columns. The partial results computed by the Denser Engine are then transferred to the Sparser Engine for accumulation. This strategy not only enhances the reuse rate of input matrices and reduces the need for on-chip buffers but also optimizes the utilization of processing elements by reasonably distributing computation tasks, thereby improving overall computational performance.
|
| 430 |
+
|
| 431 |
+
# D Detailed Zero-shot Task Performance
|
| 432 |
+
|
| 433 |
+
We evaluated a series of zero-shot learning tasks, as shown in Tables 1. We present detailed task performance metrics in Tables 10, providing a comprehensive understanding of the zero-shot capabilities of the related models.
|
| 434 |
+
|
| 435 |
+
# E Detailed Ablation Study
|
| 436 |
+
|
| 437 |
+
# E.1 Different Ranks.
|
| 438 |
+
|
| 439 |
+
With a fixed compression ratio of $50\%$ , an in-depth analysis of the effects of sparse and low-rank parameter assignments on LLaMA2-7B model are provided. As demonstrated in Table 6, the model performance improves when the rank is increased from 32 to 128; however, after 128, the performance starts to decrease. Therefore, 128 is chosen as the optimal compromise point for parameter allocation to balance model performance, which is significantly better than pure pruning methods (rank=0) or pure low-rank methods (rank=1296). The results of this study not only highlight the need to balance pruning and low rank in model design, but also provide valuable reference for the development of algorithms to find the optimal combination.
|
| 440 |
+
|
| 441 |
+
<table><tr><td>Dataset</td><td>r=0</td><td>r=64</td><td>r=128</td><td>r=256</td><td>r=1296</td></tr><tr><td>Wiki2</td><td>6.92</td><td>6.72</td><td>6.61</td><td>6.70</td><td>1.02e4</td></tr><tr><td>C4</td><td>9.24</td><td>8.97</td><td>8.87</td><td>9.03</td><td>1.85e4</td></tr></table>
|
| 442 |
+
|
| 443 |
+
Table 6: Perplexity results for LLaMA2-7B at $50\%$ compression with different number of rank. When $\mathrm{r} = 1296$ , this is a pure low-rank approximation with $0\%$ sparsity; in contrast, when $\mathrm{r} = 0$ , this corresponds to a pure pruning approach with $50\%$ sparsity.
|
| 444 |
+
|
| 445 |
+
# E.2 Preserving Most Important Weights.
|
| 446 |
+
|
| 447 |
+
We explore the effects of preserving the most important weights prior to synergistic optimization. The findings are detailed in the Table 7. The results show that incorporating this retention ratio at
|
| 448 |
+
|
| 449 |
+
a $1\%$ level leads to the best improvement in performance, while at a $10\%$ level, the performance declines sharply. Additionally, it is important to highlight that these $1\%$ weights can be seamlessly integrated into the sparse part, incurring no extra structural cost.
|
| 450 |
+
|
| 451 |
+
<table><tr><td>Models</td><td>Preserved Ratio</td><td>Wiki2</td><td>C4</td></tr><tr><td rowspan="4">LLaMA2-7B</td><td>0%</td><td>6.71</td><td>8.97</td></tr><tr><td>1%</td><td>6.61</td><td>8.87</td></tr><tr><td>3%</td><td>6.63</td><td>8.87</td></tr><tr><td>10%</td><td>6.70</td><td>8.99</td></tr><tr><td rowspan="4">LLaMA2-13B</td><td>0%</td><td>8.10</td><td>5.84</td></tr><tr><td>1%</td><td>8.02</td><td>5.79</td></tr><tr><td>3%</td><td>8.03</td><td>5.80</td></tr><tr><td>10%</td><td>8.06</td><td>5.82</td></tr></table>
|
| 452 |
+
|
| 453 |
+
Table 7: Perplexity results for LLaMA2-7B and LLaMA2-13B at $50\%$ compression with retaining different proportions of the most importance weights.
|
| 454 |
+
|
| 455 |
+
# E.3 Random Seeds.
|
| 456 |
+
|
| 457 |
+
To address potential concerns regarding the reproducibility of performance differences, we conducted a comprehensive robustness analysis across five distinct random seeds (0-4) under identical hyperparameter configurations. Our method demonstrates exceptional stability and robustness, maintaining consistent superiority over baseline approaches despite varying initialization conditions. As evidenced in Table 8, SSLC achieves statistically significant improvements across all evaluation tasks, with performance variances remaining below 0.02 standard deviation for both our method and competitors on stable benchmarks like C4 and WikiText-2, while the average accuracy on zero-shot tasks exhibit $\sigma \approx 0.1$ across all compared methods.
|
| 458 |
+
|
| 459 |
+
# E.4 SSLC with Other LLM Pruning Methods.
|
| 460 |
+
|
| 461 |
+
Our framework establishes new capabilities for model compression by simultaneously enhancing both task performance and intrinsic language modeling across diverse pruning methods. The results in Table 9 demonstrate that, as a universal plugin, it consistently improves accuracy on reasoning benchmarks $(+0.7 - 1.0\%)$ average) while reducing perplexity across all baselines.
|
| 462 |
+
|
| 463 |
+
# F Potential Risks
|
| 464 |
+
|
| 465 |
+
While our method effectively maintains model performance at moderate sparsity (e.g., $50\%$ ), excess
|
| 466 |
+
|
| 467 |
+
<table><tr><td colspan="2">Method</td><td>PIQA</td><td>Boolq</td><td>HellaS</td><td>Wino</td><td>ARC-e</td><td>ARC-c</td><td>Ave</td><td>Wiki2</td><td>C4</td></tr><tr><td rowspan="6">Wanda</td><td>Overall</td><td>76.24</td><td>76.14</td><td>52.72</td><td>67.97</td><td>72.14</td><td>39.00</td><td>64.04±0.10</td><td>6.92±0.01</td><td>9.23±0.01</td></tr><tr><td>Seed_0</td><td>76.71</td><td>76.60</td><td>52.56</td><td>68.43</td><td>72.18</td><td>38.31</td><td>64.13</td><td>6.92</td><td>9.24</td></tr><tr><td>Seed_1</td><td>76.16</td><td>75.66</td><td>52.62</td><td>68.03</td><td>72.47</td><td>39.51</td><td>64.08</td><td>6.91</td><td>9.25</td></tr><tr><td>Seed_2</td><td>76.06</td><td>76.42</td><td>52.75</td><td>67.88</td><td>71.72</td><td>39.51</td><td>64.06</td><td>6.91</td><td>9.23</td></tr><tr><td>Seed_3</td><td>76.11</td><td>76.02</td><td>52.70</td><td>68.19</td><td>72.26</td><td>38.99</td><td>64.05</td><td>6.93</td><td>9.23</td></tr><tr><td>Seed_4</td><td>76.17</td><td>75.99</td><td>52.99</td><td>67.32</td><td>72.05</td><td>38.66</td><td>63.86</td><td>6.94</td><td>9.22</td></tr><tr><td rowspan="6">DSnoT</td><td>Overall</td><td>75.94</td><td>74.04</td><td>54.89</td><td>64.09</td><td>64.91</td><td>44.86</td><td>63.12±0.09</td><td>6.85±0.02</td><td>9.12±0.01</td></tr><tr><td>Seed_0</td><td>76.28</td><td>73.58</td><td>52.01</td><td>66.93</td><td>71.68</td><td>38.82</td><td>63.22</td><td>6.83</td><td>9.13</td></tr><tr><td>Seed_1</td><td>75.95</td><td>74.77</td><td>51.84</td><td>67.32</td><td>71.21</td><td>37.71</td><td>63.13</td><td>6.85</td><td>9.11</td></tr><tr><td>Seed_2</td><td>75.90</td><td>74.46</td><td>51.91</td><td>66.77</td><td>71.25</td><td>38.05</td><td>63.06</td><td>6.86</td><td>9.11</td></tr><tr><td>Seed_3</td><td>75.73</td><td>73.58</td><td>51.84</td><td>67.01</td><td>71.67</td><td>38.22</td><td>63.01</td><td>6.87</td><td>9.12</td></tr><tr><td>Seed_4</td><td>75.84</td><td>73.82</td><td>51.94</td><td>67.32</td><td>71.59</td><td>38.65</td><td>63.19</td><td>6.84</td><td>9.11</td></tr><tr><td rowspan="6">Ours</td><td>Overall</td><td>77.15</td><td>76.93</td><td>53.89</td><td>68.40</td><td>73.94</td><td>41.19</td><td>65.25±0.10</td><td>6.62±0.02</td><td>8.87±0.00</td></tr><tr><td>Seed_0</td><td>76.55</td><td>77.68</td><td>53.81</td><td>67.32</td><td>74.41</td><td>40.96</td><td>65.12</td><td>6.61</td><td>8.87</td></tr><tr><td>Seed_1</td><td>77.47</td><td>76.33</td><td>53.89</td><td>68.82</td><td>73.93</td><td>41.88</td><td>65.39</td><td>6.61</td><td>8.87</td></tr><tr><td>Seed_2</td><td>77.21</td><td>77.73</td><td>53.99</td><td>68.35</td><td>73.19</td><td>40.70</td><td>65.20</td><td>6.64</td><td>8.87</td></tr><tr><td>Seed_3</td><td>77.42</td><td>77.83</td><td>53.87</td><td>69.46</td><td>73.15</td><td>40.10</td><td>65.31</td><td>6.59</td><td>8.87</td></tr><tr><td>Seed_4</td><td>77.09</td><td>75.08</td><td>53.89</td><td>68.03</td><td>75.04</td><td>42.32</td><td>65.24</td><td>6.64</td><td>8.87</td></tr></table>
|
| 468 |
+
|
| 469 |
+
Table 8: Accuracy on zero-shot tasks and language modeling performance $(PPL\downarrow)$ for LLaMA2-7B at $50\%$ compression rate across different pruning methods (mean±std over 5 random seeds).
|
| 470 |
+
|
| 471 |
+
<table><tr><td>Method</td><td>Conference</td><td>PIQA</td><td>BoolQ</td><td>HellaS</td><td>Wino</td><td>ARC-e</td><td>ARC-c</td><td>Ave</td><td>Wiki2</td><td>C4</td></tr><tr><td>RIA</td><td>ICLR2024</td><td>76.11</td><td>75.57</td><td>52.21</td><td>67.48</td><td>71.51</td><td>38.39</td><td>63.55</td><td>6.81</td><td>9.11</td></tr><tr><td>RIA+ours</td><td></td><td>76.93</td><td>76.12</td><td>52.95</td><td>69.61</td><td>72.81</td><td>38.14</td><td>64.42</td><td>6.54</td><td>8.77</td></tr><tr><td>ALPS</td><td>NIPS2024</td><td>76.22</td><td>75.37</td><td>53.12</td><td>68.21</td><td>72.61</td><td>41.21</td><td>64.46</td><td>6.87</td><td>9.01</td></tr><tr><td>ALPS+ours</td><td></td><td>76.44</td><td>76.64</td><td>53.87</td><td>69.22</td><td>73.19</td><td>41.32</td><td>65.11</td><td>6.60</td><td>8.73</td></tr><tr><td>Pruner-Zero</td><td>ICML2024</td><td>75.90</td><td>74.13</td><td>51.16</td><td>67.01</td><td>71.17</td><td>37.28</td><td>62.78</td><td>6.61</td><td>9.23</td></tr><tr><td>Pruner-Zero+ours</td><td></td><td>76.17</td><td>73.88</td><td>51.41</td><td>69.16</td><td>72.73</td><td>39.59</td><td>63.82</td><td>6.45</td><td>8.88</td></tr></table>
|
| 472 |
+
|
| 473 |
+
Table 9: Accuracy on zero-shot tasks and language modeling performance (PPL) for LLaMA2-7B of $50\%$ compression rate across different pruning methods.
|
| 474 |
+
|
| 475 |
+
sive pruning introduces significant performance degradation risks. This underscores a critical limitation of post-training pruning: aggressive sparsification cannot be fully remedied by fine-tuning alone, potentially compromising model reliability in high-sparsity scenarios.
|
| 476 |
+
|
| 477 |
+
<table><tr><td>Model</td><td>Method</td><td>Type</td><td>PIQA</td><td>BoolQ</td><td>HellaS</td><td>Wino</td><td>ARC-e</td><td>ARC-c</td><td>Ave</td></tr><tr><td rowspan="5">LLaMA-7B</td><td>Dense</td><td>-</td><td>78.67</td><td>75.08</td><td>56.94</td><td>70.01</td><td>75.25</td><td>41.89</td><td>66.31</td></tr><tr><td>SparseGPT</td><td>S</td><td>76.39</td><td>72.97</td><td>51.41</td><td>69.38</td><td>71.30</td><td>37.29</td><td>63.12</td></tr><tr><td>Wanda</td><td>S</td><td>76.04</td><td>71.62</td><td>52.48</td><td>68.74</td><td>70.75</td><td>37.03</td><td>62.77</td></tr><tr><td>DSnoT</td><td>S</td><td>76.01</td><td>73.09</td><td>52.87</td><td>67.40</td><td>70.95</td><td>37.12</td><td>62.91</td></tr><tr><td>Ours</td><td>S+LRA</td><td>76.33</td><td>74.95</td><td>52.97</td><td>68.82</td><td>71.68</td><td>36.77</td><td>63.59</td></tr><tr><td rowspan="5">LLaMA2-7B</td><td>Dense</td><td>-</td><td>78.07</td><td>77.71</td><td>57.14</td><td>68.90</td><td>76.35</td><td>43.60</td><td>66.96</td></tr><tr><td>SparseGPT</td><td>S</td><td>76.17</td><td>76.02</td><td>52.81</td><td>68.67</td><td>71.63</td><td>36.95</td><td>63.71</td></tr><tr><td>Wanda</td><td>S</td><td>76.71</td><td>76.60</td><td>52.56</td><td>68.43</td><td>72.18</td><td>38.31</td><td>64.13</td></tr><tr><td>DSnoT</td><td>S</td><td>76.28</td><td>73.58</td><td>52.01</td><td>66.93</td><td>71.68</td><td>38.82</td><td>63.22</td></tr><tr><td>Ours</td><td>S+LRA</td><td>77.09</td><td>75.08</td><td>53.89</td><td>68.03</td><td>75.04</td><td>42.32</td><td>65.24</td></tr><tr><td rowspan="5">LLaMA3-8B</td><td>Dense</td><td>-</td><td>80.14</td><td>82.08</td><td>60.02</td><td>73.64</td><td>81.40</td><td>51.19</td><td>71.41</td></tr><tr><td>SparseGPT</td><td>S</td><td>76.22</td><td>78.13</td><td>53.65</td><td>71.43</td><td>72.43</td><td>41.21</td><td>65.51</td></tr><tr><td>Wanda</td><td>S</td><td>75.90</td><td>79.54</td><td>51.41</td><td>70.96</td><td>73.23</td><td>41.64</td><td>65.44</td></tr><tr><td>DSnoT</td><td>S</td><td>75.52</td><td>79.05</td><td>51.51</td><td>69.38</td><td>73.15</td><td>40.87</td><td>64.91</td></tr><tr><td>Ours</td><td>S+LRA</td><td>76.39</td><td>78.57</td><td>53.18</td><td>70.64</td><td>74.71</td><td>42.32</td><td>65.97</td></tr><tr><td rowspan="5">LLaMA-13B</td><td>Dense</td><td>-</td><td>79.16</td><td>77.89</td><td>59.93</td><td>72.69</td><td>77.36</td><td>46.42</td><td>68.91</td></tr><tr><td>SparseGPT</td><td>S</td><td>78.35</td><td>76.85</td><td>54.88</td><td>71.35</td><td>72.47</td><td>41.98</td><td>65.98</td></tr><tr><td>Wanda</td><td>S</td><td>77.42</td><td>76.67</td><td>55.82</td><td>72.06</td><td>74.07</td><td>43.43</td><td>66.58</td></tr><tr><td>DSnoT</td><td>S</td><td>77.48</td><td>76.45</td><td>55.68</td><td>71.19</td><td>73.78</td><td>43.86</td><td>66.41</td></tr><tr><td>Ours</td><td>S+LRA</td><td>78.29</td><td>75.59</td><td>56.48</td><td>70.96</td><td>75.21</td><td>45.39</td><td>66.99</td></tr><tr><td rowspan="5">LLaMA2-13B</td><td>Dense</td><td>-</td><td>79.05</td><td>80.55</td><td>60.06</td><td>72.14</td><td>79.42</td><td>48.46</td><td>69.95</td></tr><tr><td>SparseGPT</td><td>S</td><td>77.69</td><td>81.41</td><td>55.93</td><td>71.59</td><td>74.66</td><td>42.06</td><td>67.22</td></tr><tr><td>Wanda</td><td>S</td><td>78.41</td><td>81.19</td><td>57.09</td><td>71.35</td><td>76.98</td><td>43.00</td><td>68.01</td></tr><tr><td>DSnoT</td><td>S</td><td>77.91</td><td>80.70</td><td>57.02</td><td>71.72</td><td>76.64</td><td>42.58</td><td>67.78</td></tr><tr><td>Ours</td><td>S+LRA</td><td>78.24</td><td>81.22</td><td>57.40</td><td>71.43</td><td>76.94</td><td>46.08</td><td>68.55</td></tr><tr><td rowspan="5">LLaMA3-70B</td><td>Dense</td><td>-</td><td>82.32</td><td>85.26</td><td>66.38</td><td>80.51</td><td>86.86</td><td>60.15</td><td>76.91</td></tr><tr><td>SparseGPT</td><td>S</td><td>81.77</td><td>84.95</td><td>62.81</td><td>76.80</td><td>83.25</td><td>55.55</td><td>74.19</td></tr><tr><td>Wanda</td><td>S</td><td>81.07</td><td>85.32</td><td>62.52</td><td>79.42</td><td>82.95</td><td>55.03</td><td>74.39</td></tr><tr><td>DSnoT</td><td>S</td><td>81.56</td><td>84.74</td><td>63.13</td><td>77.58</td><td>83.25</td><td>55.38</td><td>74.27</td></tr><tr><td>Ours</td><td>S+LRA</td><td>82.26</td><td>85.17</td><td>63.16</td><td>78.37</td><td>83.79</td><td>55.97</td><td>74.79</td></tr><tr><td rowspan="5">Qwen2.5-7B</td><td>Dense</td><td>-</td><td>78.51</td><td>84.52</td><td>72.77</td><td>60.01</td><td>80.56</td><td>48.63</td><td>70.83</td></tr><tr><td>SparseGPT</td><td>S</td><td>77.42</td><td>83.09</td><td>71.11</td><td>54.63</td><td>76.60</td><td>44.03</td><td>67.81</td></tr><tr><td>Wanda</td><td>S</td><td>77.15</td><td>83.03</td><td>70.24</td><td>53.07</td><td>75.59</td><td>41.12</td><td>66.70</td></tr><tr><td>DSnoT</td><td>S</td><td>77.04</td><td>83.21</td><td>70.95</td><td>52.96</td><td>75.72</td><td>41.46</td><td>66.89</td></tr><tr><td>Ours</td><td>S+LRA</td><td>77.81</td><td>83.30</td><td>71.35</td><td>54.44</td><td>79.00</td><td>46.16</td><td>68.68</td></tr><tr><td rowspan="5">Qwen2.5-14B</td><td>Dense</td><td>-</td><td>81.12</td><td>85.54</td><td>75.37</td><td>63.39</td><td>82.37</td><td>55.80</td><td>73.93</td></tr><tr><td>SparseGPT</td><td>S</td><td>79.00</td><td>85.69</td><td>73.24</td><td>57.25</td><td>80.85</td><td>51.11</td><td>71.19</td></tr><tr><td>Wanda</td><td>S</td><td>78.78</td><td>85.69</td><td>73.32</td><td>57.25</td><td>80.93</td><td>50.94</td><td>71.15</td></tr><tr><td>DSnoT</td><td>S</td><td>78.82</td><td>85.60</td><td>73.32</td><td>57.70</td><td>80.89</td><td>51.02</td><td>71.23</td></tr><tr><td>Ours</td><td>S+LRA</td><td>79.76</td><td>84.74</td><td>73.72</td><td>58.12</td><td>81.94</td><td>53.32</td><td>71.93</td></tr></table>
|
| 478 |
+
|
| 479 |
+
Table 10: Accuracy for zero-shot tasks on LLaMA and Qwen2.5 models of $50\%$ compression rate with different pruning methods.
|
112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51e6a9f792d77acf6313d74b18345032d54b21222008422ea7454e48fb101802
|
| 3 |
+
size 1152358
|
112asynergisticsparseandlowrankcompressionmethodforlargelanguagemodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d3531ea7ccb09df811b1140375df5545c0fe0ae963587eb5a525989ec72c3ba5
|
| 3 |
+
size 564209
|
2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/19f169d2-5a3f-44da-a763-0066f91f1d99_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:21a7ae8cd184c028a10c1409597fe70f6736731bffb4d8cc346f6737cc0eea15
|
| 3 |
+
size 144074
|
2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/19f169d2-5a3f-44da-a763-0066f91f1d99_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:508a2e6f683c3e3ae32571f03136fe24ff724ccf368af99914604db4cedf70ef
|
| 3 |
+
size 163410
|
2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/19f169d2-5a3f-44da-a763-0066f91f1d99_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a6d6624d12882aa796748f19f5bad62128fc0276237261bd2b3d176faa252fbb
|
| 3 |
+
size 14959525
|
2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f3551e0c92c0055b1f1066d58787ed1965fe0ae2d149a91ec8a178e2e6e40b7
|
| 3 |
+
size 2865161
|
2columns1rowarussianbenchmarkfortextualandmultimodaltableunderstandingandreasoning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df8ab8b4a09c720b14c376d557d63861d6d5c01e81ff66d3cd4cec5df873021e
|
| 3 |
+
size 486207
|
3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/80f389b6-aa5b-482f-a032-c21a0b53f78c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:188ad2c62195bf274f72a5018e218fb274d6d3d3e782725fd6514859215c58e7
|
| 3 |
+
size 138315
|
3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/80f389b6-aa5b-482f-a032-c21a0b53f78c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8132c5e7b07e1bc481d8030d8e9d9a48c6b5e5a748a81f75c419f2356532195a
|
| 3 |
+
size 169777
|
3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/80f389b6-aa5b-482f-a032-c21a0b53f78c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c049222f944e3ab74914f367bc4b2dc7781ef42c8cb3c8f0260cd741390f5c5
|
| 3 |
+
size 39987874
|
3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/full.md
ADDED
|
@@ -0,0 +1,611 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D-Aware Vision-Language Models Fine-Tuning with Geometric Distillation
|
| 2 |
+
|
| 3 |
+
Seonho Lee*, Jiho Choi*, Inha Kang, Jiwook Kim, Junsung Park, Hyunjung Shim†
|
| 4 |
+
|
| 5 |
+
Graduate School of Artificial Intelligence, KAIST, Republic of Korea
|
| 6 |
+
|
| 7 |
+
{glanceyes, jihochoi, rkswlsj13, tom919, jshackist, kateshim}@kaist.ac.kr
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
Figure 1: Geometric Distillation enhances 3D spatial reasoning in vision-language models. By distilling geometric cues such as correspondences, relative depth, and cost alignment from 3D foundation models, our method improves 3D visual understanding and enables accurate reasoning in tasks like answering which object is closer.
|
| 11 |
+
|
| 12 |
+
# Abstract
|
| 13 |
+
|
| 14 |
+
Vision-Language Models (VLMs) have shown remarkable performance on diverse visual and linguistic tasks, yet they remain fundamentally limited in their understanding of 3D spatial structures. We propose Geometric Distillation, a lightweight, annotation-free fine-tuning framework that injects human-inspired geometric cues into pretrained VLMs without modifying their architecture. By distilling (1) sparse correspondences, (2) relative depth relations, and (3) dense cost volumes from off-the-shelf 3D foundation models (e.g., MASt3R, VGGT), our method shapes representations to be geometry-aware while remaining compatible with natural image-text inputs. Through extensive evaluations on 3D vision-language reasoning and 3D perception benchmarks, our method consistently outperforms prior approaches, achieving improved 3D spatial reasoning with significantly lower computational cost. Our work demonstrates a scalable and efficient path to bridge 2D-trained VLMs with 3D understanding, opening up wider use in spatially grounded multimodal tasks.
|
| 15 |
+
|
| 16 |
+
# 1 Introduction
|
| 17 |
+
|
| 18 |
+
Vision-Language Models (VLMs) (e.g., CLIP (Radford et al., 2021), ALIGN (Jia et al., 2021), and BLIP (Li et al., 2022, 2023)), trained on large-scale image-text datasets, have demonstrated competitive performance on diverse multimodal tasks (Li et al., 2021; Gao et al., 2024; Lee et al., 2022). Despite their progress, these models struggle with understanding 3D spatial structures (El Banani et al., 2024; Man et al., 2024; Chen et al., 2024; Danier et al., 2024; Li et al., 2024; Kamath et al., 2023; Qiu et al., 2025). Specifically, VLMs remain limited in grounded spatial reasoning tasks such as depth ordering, occlusion, or object layout in a scene (El Banani et al., 2024; Chen et al., 2024; Kamath et al., 2023). This limitation stems from their reliance on 2D projections, which lack depth cues and multiview supervision (Eigen et al., 2014; Tulsiani et al., 2017; Qin et al., 2019). It is illustrated in Figure 1, where features of standard VLMs like CLIP incorrectly predict relative depth due to their limited 3D awareness. These shortcomings greatly hinder applications requiring spatial reasoning, including navigation, scene understanding, and robotic planning (Peng et al., 2023; Shridhar et al., 2022; Hong et al., 2023).
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
(a) Multi-view
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
(b) Correspondences
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
(c) Depth Maps
|
| 28 |
+
Figure 2: Geometric cues and PCA visualization of feature transformation through geometric distillation.
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
(d) Cost Matching
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
(e) Before Tuning
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
(f) After Tuning
|
| 38 |
+
|
| 39 |
+
To address this, recent work has explored injecting 3D priors into VLMs. FiT3D (Yue et al., 2024) reconstructs 3D scenes from multi-view images using Gaussian splatting (Kerbl et al., 2023), and then aligns VLM's features with those rendered 3D views. Multiview Equivariant Finetuning (MEF) (You et al., 2024) improves 3D equivariance by reinforcing feature consistency across rendered views of the same object. SpatialVLM (Chen et al., 2024) improves its spatial reasoning abilities by generating billions of synthetic spatial question-answer pairs to train VLMs.
|
| 40 |
+
|
| 41 |
+
Despite these advancements, existing methods suffer from notable drawbacks. FiT3D incurs a high computational cost and suffers from semantic degradation due to its reliance on explicit 3D reconstruction. MEF depends on 3D object-centric datasets, which restricts its generalizability to real-world scenes. SpatialVLM requires extensive synthetic data generation and task-specific tuning, making it resource-intensive and less flexible. These limitations motivate the need for more efficient and generalizable approaches to endow VLMs with robust 3D awareness.
|
| 42 |
+
|
| 43 |
+
We propose Geometric Distillation, a lightweight and glanannotation-free fine-tuning framework that enriches 3D spatial understanding in VLMs. Our approach introduces supervision signals aligned with human perceptual strategies, derived from pretrained 3D foundation models such as MAST3R (Leroy et al., 2024) and VGGT (Wang et al., 2025) as in Figure 2 (a) - (d). First, we supervise the VLM to align features at sparse correspondences that are visually stable and semantically meaningful regions, such as object corners or room boundaries, derived from pretrained 3D foundation models without any explicit 3D annotations. These locations provide strong geometric anchors across views, and feature-level matching at these points encourages the model to learn consistent and viewpoint-invariant representations. Second, we supervise relative
|
| 44 |
+
|
| 45 |
+
depth reasoning through ordinal comparisons both within and across views. This reflects the human tendency to reason in relative terms and aligns with the way spatial relationships are expressed in language (Zhang et al., 2022b; Auty and Mikolajczyk, 2023). Lastly, we incorporate dense cost volume alignment, which captures soft correspondences across views by fully exploiting the geometric priors and warping relationships (Weinzaepfel et al., 2022; An et al., 2024) provided by 3D foundation models, thereby enabling the model to learn fine-grained geometric consistency. These signals collectively reshape the visual representations into a geometry-aware space that better supports grounded spatial reasoning and improves VLM performance on 3D-aware tasks as shown in Figure 2 (e), (f). Additionally, since our approach operates without modifying the VLM's architecture and retains compatibility with natural image-text inputs, it preserves the strong generalization capabilities of the original model.
|
| 46 |
+
|
| 47 |
+
To overcome these limitations, we draw inspiration from human 3D spatial perception. Humans infer depth and structure from sparse relational cues such as occlusions, relative size, and perspective, rather than absolute measurements (Todd and Norman, 2003; Howard and Rogers, 1995; Landy et al., 1995). In addition, spatial relationships are often expressed in language using relative terms (e.g., "next to the table", "behind the sofa") rather than absolute metric units, suggesting that the reasoning is both perceptually and linguistically grounded. These observations suggest that incorporating human-inspired geometric cues into VLM can enhance their spatial reasoning abilities.
|
| 48 |
+
|
| 49 |
+
Our approach enhances the model's ability to infer spatial relations, such as object proximity, without explicit 3D labels or costly reconstruction. We demonstrate consistent improvements across a range of 3D-aware tasks, including semantic correspondence, depth estimation, and 3D visual question answering. Our method outper
|
| 50 |
+
|
| 51 |
+
forms strong baselines on benchmarks such as PF-PASCAL, TAP-Vid, and ScanQA, illustrating both the effectiveness and scalability of our approach.
|
| 52 |
+
|
| 53 |
+
# 2 Related Work
|
| 54 |
+
|
| 55 |
+
Fine-tuning VFMs and VLMs. Various attempts have been made to integrate 3D information into Visual Foundation Models (VFMs) or Vision-Language Models (VLMs) (Yue et al., 2024; You et al., 2024). FiT3D (Yue et al., 2024) lifts 2D visual features into a 3D Gaussian representation and re-renders them from multiple viewpoints. By fine-tuning, this approach guides the original 2D features to align with re-rendered features, which enhances 3D perception in VFMs. However, its dense L1 optimization introduces noise, which potentially leads to semantic information loss and significant computational overhead. Multiview Equivariance Finetuning (MEF) (You et al., 2024) enhances 3D correspondence understanding by maximizing cross-view feature equivariance within pretrained foundation models. This allows them to improve on tasks such as camera pose estimation, object tracking, and semantic transfer. Nevertheless, MEF requires explicit 3D annotations and does not provide direct supervision for depth understanding. SpatialVLM (Chen et al., 2024) generates extensive 3D spatial QA corpora using pretrained detectors, depth estimators, and segmentation models. Training on this large-scale data strengthens the spatial question-answering capabilities of VLMs. However, the reliance on massive synthetic datasets limits their practicality. In our work, we address these limitations by introducing a lightweight and annotation-free fine-tuning method that efficiently enhances 3D spatial reasoning in VLMs.
|
| 56 |
+
|
| 57 |
+
3D Foundation Models. Recently, geometry-based models have emerged as foundation models for 3D vision. CroCo (Weinzaepfel et al., 2022, 2023) performs self-supervised cross-view completion by reconstructing one view from another, which allows the model to acquire multiview consistent features. Based on CroCo pretraining, DUSt3R (Wang et al., 2024) introduces a unified approach to directly estimate scene point maps from two or more images taken from different viewpoints. DUSt3R effectively simplifies the Structurefrom-Motion (SfM) pipeline. MAST3R (Leroy et al., 2024) further extends these approaches by incorporating a global matching head that aligns partial reconstructions and predicts dense 3D cor
|
| 58 |
+
|
| 59 |
+
respondences. These models inherently provide 3D perceptual priors by learning scene geometry without explicit supervision or accurate dense reconstructions from limited views. Additionally, VGGT (Wang et al., 2025) introduces a large transformer-based model to jointly estimate camera poses, depth maps, and point clouds from a few images. Training VGGT on large-scale 3D datasets enables accurate depth prediction even from a single image, which significantly improves 3D downstream tasks. Consequently, these models embed critical 3D knowledge that is beneficial for robust 3D understanding. In our work, we propose a method to effectively inject these rich 3D priors into VLMs.
|
| 60 |
+
|
| 61 |
+
Bridging VLMs and 3D Understanding. Recent studies have explored analyzing or improving vision-language representations to better understand 3D scenes. Lexicon3D (Man et al., 2024) evaluates various vision foundation encoders across vision-language reasoning tasks and identifies their strengths and limitations. Notably, image-text alignment supervised models (Qiu et al., 2025; Auty and Mikolajczyk, 2023; Radford et al., 2021; Jia et al., 2021) still exhibit substantial weaknesses in complex 3D spatial reasoning and language-driven question answering tasks. This suggests that vision-language pretraining alone may not sufficiently capture comprehensive 3D concepts. These observations underscore the necessity of incorporating explicit 3D signals or specialized training strategies into VLMs. To address these limitations, various approaches have been proposed. Some studies (Hegde et al., 2023) extend CLIP via prompt tuning by prepending learnable tokens to the vision encoder and training it contrastively on rendered 3D object images paired with textual labels. Other notable efforts include PointCLIP (Zhang et al., 2022a; Zhu et al., 2023), which aligns 3D point clouds with CLIP's textual embedding space, and methods designed to enhance text-image alignment in 3D contexts (Kim et al., 2023; Zeng et al., 2021). Collectively, these studies introduce additional representations or strategies to enrich 3D understanding within VLMs. In contrast, our work directly injects robust 3D knowledge into 2D VLMs using multi-view images. This enables leveraging their inherent rich 2D vision-language priors without relying on explicit supervision from other 3D data modalities such as point clouds or 3D Gaussians.
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
Figure 3: Overview of Geometric Distillation Architecture. A 3D foundation model extracts geometric cues including (1) sparse correspondences, (2) depth maps, and (3) dense cost volumes from multi-view inputs. These cues supervise a frozen CLIP image encoder with a lightweight adapter (LoRA) via three loss branches: $\mathcal{L}_{\mathrm{match}}$ , $\mathcal{L}_{\mathrm{depth}}$ , and $\mathcal{L}_{\mathrm{cost}}$ . The distillation enables the VLM to acquire 3D spatial awareness without explicit 3D annotations.
|
| 65 |
+
|
| 66 |
+
# 3 Proposed Method
|
| 67 |
+
|
| 68 |
+
We propose a geometric knowledge distillation framework that transfers 3D spatial understanding from high-performance 3D foundation models such as MASt3R (Leroy et al., 2024) and VGGT (Wang et al., 2025) into a pretrained vision-language model (VLM) (Radford et al., 2021; Jia et al., 2021) without requiring any ground truth 3D annotations. Inspired by human perception, which infers spatial structure by integrating visual cues from multiple viewpoints, our method uses paired images, $\{I^{v_1}, I^{v_2}\}$ , of the same scene captured from different perspectives $v_1$ and $v_2$ . From these image pairs, we extract geometric signals including sparse correspondences, ordinal depth relations, and viewpoint-induced disparities, which guide the VLM to learn geometry-aware representations. An overview of our framework is illustrated in Figure 3.
|
| 69 |
+
|
| 70 |
+
Our framework obtains these geometric cues using a teacher model that generates pseudo-3D supervision from image pairs. Specifically, we utilize the following information provided by 3D foundation models: (i) sparse correspondences $\mathbb{P}^{v_1,v_2} = \{(p_i^{v_1},p_i^{v_2})\}_{i = 0}^{\lfloor \mathbb{P}^{v_1,v_2}\rfloor}$ for matching 3D points across views, (ii) estimated depth maps $\tilde{\mathbb{D}}^{v_1},\tilde{\mathbb{D}}^{v_2}$ for each viewpoint, and (iii) a dense cost volume, $\mathbb{C}^{v_1\to v_2}$ , representing patch-level features similarity between two viewpoints. These heterogeneous signals serve as supervision for three complementary objectives: sparse correspondence matching, relative depth learning using both intra-view and inter-view comparisons, and alignment of dense feature similarity. Combined, they enrich the model's multimodal representations and facilitate 3D-aware reasoning in complex scenes.
|
| 71 |
+
|
| 72 |
+
# 3.1 Sparse Correspondences
|
| 73 |
+
|
| 74 |
+
Background. Humans often rely on sparse but stable visual features, such as corners or edges, to estimate spatial layout. In a similar way, sparse correspondences across views serve as geometric anchors that help enforce cross-view consistency and identify matching 3D points. These signals are essential for enforcing consistency across viewpoints (Leroy et al., 2024; Wang et al., 2025) and have been widely adopted in multi-view geometry (Weinzaepfel et al., 2022, 2023; An et al., 2024) as well as recent representation learning methods such as MEF (You et al., 2024). To exploit these correspondences, we adopt a feature-matching objective that promotes accurate feature-level alignment between image pairs. Given a set of pseudo correspondence pairs $\mathbb{P}^{v_1,v_2} = \{(p_i^{v_1},p_i^{v_2})\}_{i = 1}^{\lvert\mathbb{P}^{v_1,v_2}\rvert}$ generated by a geometric teacher, we extract local image features $\{(f_i^{v_1},f_i^{v_2})\}_{i = 1}^{\lvert\mathbb{P}^{v_1,v_2}\rvert}$ and intermediate patch features $\{h^{v_*}\}$ from each viewpoint. We adopted a matching-based loss (Brown et al., 2020; You et al., 2024) that encourages high retrieval performance by maximizing the Smooth Average Precision (SmoothAP) (Brown et al., 2020), computed within a spatial neighborhood. For a query feature $f_{i}$ , the SmoothAP is calculated using positive matches $\mathbb{P}^{v_1,v_2}$ and negative matches (non-matches), $\mathcal{N}(i)$ , of point $p_i$ as:
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\begin{array}{l} \operatorname {S m o o t h} \mathrm {A P} _ {v _ {1} \rightarrow v _ {2}} = \\ \frac {1}{\left| \mathbb {P} ^ {v _ {1} , v _ {2}} \right|} \sum_ {i \in \mathbb {P} ^ {v _ {1}, v _ {2}}} \frac {1 + \sigma (D _ {i i})}{1 + \sigma (D _ {i i}) + \sum_ {j \in \mathcal {N} (i)} \sigma (D _ {i j})}, \tag {1} \\ \end{array}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
where $D_{ij} = f_j^{v_2}\cdot f_i^{v_1} - f_i^{v_1}\cdot f_i^{v_1}$ measures the difference in similarity between features, and $\sigma (x)$ denotes the sigmoid function. This objective promotes higher similarity for true matches than for
|
| 81 |
+
|
| 82 |
+
non-matches, thereby incorporating relative similarity into the training. To ensure symmetry across views, we apply the objective in both matching directions and define the final loss as:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\mathcal {L} _ {\text {m a t c h}} = 1 - \frac {1}{2} \left\{\operatorname {S m o o t h A P} _ {v _ {1} \rightarrow v _ {2}} + \operatorname {S m o o t h A P} _ {v _ {2} \rightarrow v _ {1}} \right\}. \tag {2}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
# 3.2 Relative Depth Understanding
|
| 89 |
+
|
| 90 |
+
To complement sparse correspondences, we enhance the VLM's geometric reasoning by supervising its understanding of relative depth. Unlike absolute depth estimation, which is fundamentally ambiguous in monocular settings due to scale uncertainty, relative depth reasoning (i.e., determining which of two points is closer) is intuitive and practically robust across domains (Todd and Norman, 2003; Howard and Rogers, 1995; Landy et al., 1995). Numerous studies (Fu et al., 2018; Chen et al., 2016; Xian et al., 2020; Zoran et al., 2015) show that models trained with ordinal depth constraints generalize better to diverse scenes and produce sharper depth maps with preserved structure.
|
| 91 |
+
|
| 92 |
+
Inspired by this, we leverage the outputs of high-capacity 3D foundation models (e.g., MASt3R (Leroy et al., 2024), VGGT (Wang et al., 2025)) to construct pseudo ground-truth relative depth labels. This approach allows us to inject 3D awareness into VLMs without explicit 3D supervision or reconstruction. The learning proceeds on two levels: intra-view and inter-view, capturing both local monocular cues and multi-view disparities, akin to human depth perception mechanisms.
|
| 93 |
+
|
| 94 |
+
Intra-view Relative Depth. Given an image $I^v$ , we sample point pairs $(x,y)\in \mathcal{P}^v$ and determine their ordinal pseudo ground-truth relation using the depth map $\tilde{\mathbb{D}}^v$ provided by a 3D foundation model (e.g., MASt3R, VGGT). The relative depth ordering is defined as:
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\mathrm {s} _ {x y} = \operatorname {s i g n} \left(\tilde {d} _ {x} - \tilde {d} _ {y}\right) \in \{- 1, + 1 \}, \tag {3}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
where $\tilde{d}_x$ and $\tilde{d}_y$ denote the estimated depths of points $x$ and $y$ from viewpoint $v$ , respectively. The VLM predicts a scalar depth ranking score $\hat{\mathbf{s}}_{xy}$ for each pair based on its encoded features, and is trained with a logistic ranking loss (Chen et al., 2009; Fu et al., 2018):
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
\mathcal {L} _ {\text {i n t r a} \cdot \text {d e p t h}} = \frac {1}{| \mathcal {P} ^ {v} | ^ {2}} \sum_ {(x, y) \in \mathcal {P} ^ {v}} \log \left(1 + \exp \left[ - \mathrm {s} _ {x y} \cdot \hat {\mathrm {s}} _ {x y} \right]\right). \tag {4}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
This loss encourages correct ordinal predictions without relying on metric depth values, allowing
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
(a) Anchor
|
| 110 |
+
|
| 111 |
+

|
| 112 |
+
Figure 4: Visualization of cost volume. (a) Anchor view with query location (yellow box). Cost volume heatmaps from (b) the teacher (MASt3R), (c) the vanilla CLIP, and (d) after geometric distillation. The proposed method better captures localized geometric similarity, closely aligning with the teacher's output.
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
(b)MASt3R
|
| 116 |
+
(c) Vanilla
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
(d) Ours
|
| 120 |
+
|
| 121 |
+
the model to learn scale-invariant depth cues from local monocular structure.
|
| 122 |
+
|
| 123 |
+
Interview Relative Depth. To further infuse geometric awareness, we supervise relative depth relationships across multiple views, as absolute depth values may differ due to scale variations between viewpoints. Unlike intra-view supervision, which assumes a consistent scale within a single image, inter-view supervision requires the model to reason about depth differences under potential scale shifts.
|
| 124 |
+
|
| 125 |
+
Given a correspondence pair $(p_i^{v_1}, p_i^{v_2}) \in \mathbb{P}^{v_1, v_2}$ that observes the same 3D point from views $v_1$ and $v_2$ , we extract the pseudo ground-truth depths $\tilde{d}_i^{v_1}$ and $\tilde{d}_i^{v_2}$ from the teacher model's depth maps $\tilde{\mathbb{D}}^{v_1}$ and $\tilde{\mathbb{D}}^{v_2}$ , respectively. To mitigate the effect of absolute scale mismatch, we define a bounded signed depth difference using the tanh function as $\delta_i^* = \tanh(\tilde{d}_i^{v_1} - \tilde{d}_i^{v_2})$ . The model is trained to regress this value using a lightweight MLP head, which is applied to the feature representations of each view. The loss is defined as:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\mathcal {L} _ {\text {i n t e r _ d e p t h}} ^ {v _ {1}, v _ {2}} = \frac {1}{| \mathbb {P} ^ {v _ {1} , v _ {2}} |} \sum_ {i \in \mathbb {P} ^ {v _ {1}, v _ {2}}} \left| \hat {\delta} _ {i} - \delta_ {i} ^ {*} \right|. \tag {5}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
This supervision encourages the model to be sensitive to viewpoint-induced disparities and relative geometry, even in the absence of explicit camera calibration or metric consistency. To jointly capture both local (intra-view) and cross-view (inter-view) depth relationships, we define the final relative depth loss as a combination of the two components: $\mathcal{L}_{\mathrm{depth}} = \sum_{p}\{\mathcal{L}_{\mathrm{intra\_depth}}^{v_p} + \sum_q\mathcal{L}_{\mathrm{inter\_depth}}^{v_p,v_q}\}$ . By unifying intra-view ordinal supervision with interview relative regression, the model learns to infer consistent and structurally-aware depth relationships. This multi-scale depth reasoning framework fosters a more human-like, scale-invariant understanding of 3D geometry, enhancing the generalization ability of vision-language models across diverse visual domains.
|
| 132 |
+
|
| 133 |
+
Table 1: Comparison of zero-shot semantic correspondence on PF-PASCAL.
|
| 134 |
+
|
| 135 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Dataset</td><td colspan="3">Different Views</td><td colspan="3">Same Views</td></tr><tr><td>PCK@0.05</td><td>PCK@0.10</td><td>PCK@0.15</td><td>PCK@0.05</td><td>PCK@0.10</td><td>PCK@0.15</td></tr><tr><td>(Vanilla) CLIP</td><td>-</td><td>16.61</td><td>26.96</td><td>37.64</td><td>18.23</td><td>32.27</td><td>43.01</td></tr><tr><td>FiT3D (Yue et al., 2024)</td><td>ScanNet++</td><td>15.90</td><td>23.40</td><td>30.34</td><td>14.93</td><td>26.52</td><td>34.56</td></tr><tr><td>MEF (You et al., 2024)</td><td>Objaverse</td><td>21.18</td><td>33.54</td><td>43.58</td><td>25.94</td><td>43.33</td><td>53.87</td></tr><tr><td>Ours</td><td>Objaverse</td><td>25.87</td><td>39.85</td><td>50.21</td><td>36.77</td><td>56.61</td><td>67.93</td></tr><tr><td>Ours</td><td>ScanNet++</td><td>28.48</td><td>43.07</td><td>53.55</td><td>42.16</td><td>61.57</td><td>72.16</td></tr><tr><td></td><td></td><td>(+11.87)</td><td>(+16.11)</td><td>(+15.91)</td><td>(+23.93)</td><td>(+29.30)</td><td>(+29.15)</td></tr></table>
|
| 136 |
+
|
| 137 |
+
1 The best score is bold and the second-best score is underlined. These are the same for all experiments.
|
| 138 |
+
|
| 139 |
+
Table 2: Comparison of video tracking on TAP-Vid and pose estimation on OnePose-LowTexture.
|
| 140 |
+
|
| 141 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Dataset</td><td colspan="2">Video Tracking</td><td colspan="3">Pose Estimation</td></tr><tr><td>Avg. Jaccard Index</td><td>Avg. Position Accuracy</td><td>1cm-1deg</td><td>3cm-3deg</td><td>5cm-5deg</td></tr><tr><td>(Vanilla) CLIP</td><td>-</td><td>27.73</td><td>42.59</td><td>2.50</td><td>19.32</td><td>33.11</td></tr><tr><td>FiT3D (Yue et al., 2024)</td><td>ScanNet++</td><td>28.45</td><td>43.51</td><td>2.86</td><td>20.14</td><td>34.75</td></tr><tr><td>MEF (You et al., 2024)</td><td>Objaverse</td><td>34.61</td><td>50.58</td><td>6.32</td><td>36.00</td><td>52.33</td></tr><tr><td>Ours</td><td>Objaverse</td><td>35.60</td><td>54.65</td><td>8.50</td><td>39.30</td><td>57.68</td></tr><tr><td>Ours</td><td>ScanNet++</td><td>40.09</td><td>57.75</td><td>10.96</td><td>44.93</td><td>63.65</td></tr><tr><td></td><td></td><td>(+12.36)</td><td>(+15.16)</td><td>(+8.46)</td><td>(+25.61)</td><td>(+30.54)</td></tr></table>
|
| 142 |
+
|
| 143 |
+
# 3.3 Dense Cost Volume Alignment
|
| 144 |
+
|
| 145 |
+
Beyond sparse matching and relative depth supervision, we introduce a dense cost volume alignment method to extract richer geometric cues from intermediate features of 3D foundation models. This alignment is further enhanced by leveraging geometric priors from cross-view completion models such as CroCo (Weinzaepfel et al., 2022, 2023), and transformer-based models using cross-attention mechanisms across multiple views like VGGT (Wang et al., 2025). Recent findings from ZeroCo (An et al., 2024) show that cross-attention maps learned through cross-view completion pretext tasks encode high-quality dense correspondences, effectively acting as self-supervised cost volumes. These maps inherently learn to warp source features to reconstruct masked target views by estimating correspondences across views. By treating these attention-derived correspondences as pseudo ground-truth warping functions, we can supervise the VLM's dense feature similarity to better reflect geometric consistency, thereby enhancing its capacity for dense 3D-aware reasoning.
|
| 146 |
+
|
| 147 |
+
To enforce dense geometric consistency across entire feature maps, we align the feature similarities produced by a vision-language model with geometrically grounded predictions from a 3D foundation model as in Figure 4. Given two views $v_{1}$ and $v_{2}$ , we construct a 4D cost volume that encodes normalized feature similarity between all spatial positions (patch index) across the views:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\mathbb {C} _ {v _ {1} \rightarrow v _ {2}} (i, j) = \frac {h _ {i} ^ {v _ {1}} \cdot h _ {j} ^ {v _ {2}}}{\| h _ {i} ^ {v _ {1}} \| \| h _ {j} ^ {v _ {2}} \|}, \tag {6}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
where $h_i^{v_*} \in \mathbb{R}$ denotes the intermediate feature vector at patch index $i$ in view $v_1$ , and $j$ is a corre
|
| 154 |
+
|
| 155 |
+
sponding patch index in view $v_{2}$ . This similarity matrix captures the VLM's inherent geometric understanding between all patch pairs across views. We convert this cost volume into a probability distribution using temperature-scaled softmax as:
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
P _ {v _ {1} \rightarrow v _ {2}} (j \mid i) = \operatorname {s o f t m a x} _ {j} \left(\mathbb {C} _ {v _ {1} \rightarrow v _ {2}} (i, j) / \tau\right), \tag {7}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
where temperature $\tau$ controls the sharpness of the matching distribution. The geometric teacher provides target distributions $\tilde{P}_{v_1\rightarrow v_2}$ derived from its robust 3D understanding. Our alignment loss minimizes the Jensen-Shannon Divergence (Menendez et al., 1997) as:
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\mathcal {L} _ {\text {c o s t}} = \frac {1}{2} \left\{D _ {\mathrm {K L}} \left(\tilde {P} _ {v _ {1} \rightarrow v _ {2}} \| P _ {v _ {1} \rightarrow v _ {2}}\right) + D _ {\mathrm {K L}} \left(\tilde {P} _ {v _ {2} \rightarrow v _ {1}} \| P _ {v _ {2} \rightarrow v _ {1}}\right)\right\}. \tag {8}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
This dense supervision compels the VLM's feature similarities to mirror the teacher's geometrically grounded predictions, enforcing subpixel-level geometric awareness.
|
| 168 |
+
|
| 169 |
+
# 3.4 Overall Objective
|
| 170 |
+
|
| 171 |
+
To jointly train the vision-language model with rich geometric supervision, we combine the proposed loss components into a single objective function. Given a pair of images $(I^{v_1}, I^{v_2})$ from the same scene, the total loss is defined as:
|
| 172 |
+
|
| 173 |
+
$$
|
| 174 |
+
\mathcal {L} _ {\text {t o t a l}} = \lambda_ {\text {m a t c h}} \mathcal {L} _ {\text {m a t c h}} + \lambda_ {\text {d e p t h}} \mathcal {L} _ {\text {d e p t h}} + \lambda_ {\text {c o s t}} \mathcal {L} _ {\text {c o s t}}. \tag {9}
|
| 175 |
+
$$
|
| 176 |
+
|
| 177 |
+
where $\lambda_{\mathrm{match}}$ , $\lambda_{\mathrm{depth}}$ , and $\lambda_{\mathrm{cost}}$ are hyperparameters for balancing each loss term.
|
| 178 |
+
|
| 179 |
+
# 4 Experiments
|
| 180 |
+
|
| 181 |
+
# 4.1 Experimental Setups
|
| 182 |
+
|
| 183 |
+
Datasets. We evaluate our method in two main sets of downstream tasks to examine the effectiveness
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
(a) Source
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
(b) MEF
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
(c) Ours
|
| 193 |
+
Figure 5: Semantic Transfer. (a) Source image with annotated keypoints. Transfer results using (b) MEF (You et al., 2024) and (c) our approach. Our method produces more accurate and spatially consistent transfers.
|
| 194 |
+
|
| 195 |
+
of our 3D-aware VLM representations: 3D visual understanding and vision-language understanding tasks. Specifically, to measure the 3D correspondence understanding, we conduct experiments on three downstream benchmarks introduced by (You et al., 2024): (1) semantic correspondence on PF-PASCAL (Ham et al., 2016), (2) video tracking on TAP-Vid (Doersch et al., 2022), and (3) object pose estimation on the OnePose-LowTexture dataset (He et al., 2022). Additionally, we perform experiments on downstream tasks for dense scene understanding via linear probing as in FiT3D (Yue et al., 2024), including semantic segmentation on ADE20K (Zhou et al., 2019) and VOC2012 (Everingham et al., 2015), and monocular depth estimation on ScanNet++ (Yeshwanth et al., 2023) and KITTI (Geiger et al., 2013). Furthermore, we assess improvements in 3D vision-language understanding by evaluating our method on the 3D visual question-answering benchmarks SQA3D (Ma et al., 2022) and ScanQA (Azuma et al., 2022).
|
| 196 |
+
|
| 197 |
+
Implementation Details. We fine-tune the ViT-based CLIP model for up to 500 epochs on either Objaverse (Deitke et al., 2023) or ScanNet++. We perform parameter-efficient fine-tuning through LoRA (Hu et al., 2022), adopting settings similar to those used in MEF (You et al., 2024). Our method primarily leverages MASt3R (Leroy et al., 2024) as a pretrained 3D foundation teacher during geometric distillation. Further implementation details, including experiments with VGGT (Wang et al., 2025), are provided in the appendix.
|
| 198 |
+
|
| 199 |
+
# 4.2 Experimental Results
|
| 200 |
+
|
| 201 |
+
# 4.2.1 3D Visual Understanding
|
| 202 |
+
|
| 203 |
+
3D Correspondence Understanding. We evaluate how effectively our distilled 3D-aware VLM representations capture robust multi-view correspondences, following established protocols from You
|
| 204 |
+
|
| 205 |
+
et al. (2024). As summarized in Tables 1 and 2, the baseline CLIP and FiT3D (Yue et al., 2024) exhibit limited performance. Specifically, FiT3D slightly degrades the ability of semantics matching, corroborating findings by (You et al., 2024). MEF (You et al., 2024) significantly improves performance as it leverages explicit 3D annotations. Nevertheless, our approach consistently outperforms MEF even without such annotations. On the Objaverse dataset, our geometric distillation yields notable improvements over the vanilla CLIP. Moreover, training on the real-world ScanNet++ dataset results in further substantial gains of $+11.87\%$ in PCK@0.05, $+12.36\%$ in average Jaccard index, and $+8.46\%$ accuracy at the 1cm-1deg threshold. This demonstrates the practical value and strong generalization power of our method. Unlike MEF, which indiscriminately uses 3D annotations, our distillation naturally selects semantically meaningful key regions, leading to more effective correspondence learning. These observations confirm that our approach effectively transfers strong geometric priors into VLM representations by improving cross-view consistency without explicit ground-truth 3D supervision. Further qualitative comparisons provided in Figure 5 support these quantitative results.
|
| 206 |
+
|
| 207 |
+
Depth Estimation and Semantic Segmentation. We demonstrate the transferability of our distilled VLM features via linear probing on monocular depth estimation and semantic segmentation tasks after fine-tuning on ScanNet++. Although traditionally 2D-oriented, performance on these tasks heavily relies on robust 3D geometric understanding (Yue et al., 2024). We measure depth prediction accuracy with RMSE and absolute relative error (Rel.), and semantic segmentation using mIoU and mAcc. As shown in Table 3, FiT3D significantly improves both tasks but requires approximately three days of training on four NVIDIA A6000 GPUs due to costly 3D Gaussian optimization across training scenes. MEF shows marginal improvements over baseline CLIP, indicating limited effectiveness for dense predictions. Our approach achieves the best depth estimation performance, reducing RMSE from 0.432 to 0.367 on ScanNet++, and obtains competitive semantic segmentation results while requiring up to 54 times less computation than FiT3D on a single GPU. Without explicit dense 3D optimization, our method effectively injects robust depth priors into VLMs, enhancing semantic scene understanding.
|
| 208 |
+
|
| 209 |
+
Table 3: Quantitative comparison with linear probing on depth estimation and semantic segmentation.
|
| 210 |
+
|
| 211 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Train Time (↓)</td><td colspan="2">ScanNet++</td><td colspan="2">KITTI</td><td colspan="2">ADE20K</td><td colspan="2">VOC2012</td></tr><tr><td>RMSE (↓)</td><td>Rel. (↓)</td><td>RMSE (↓)</td><td>Rel. (↓)</td><td>mIoU (↑)</td><td>mAcc (↑)</td><td>mIoU (↑)</td><td>mAcc (↑)</td></tr><tr><td>(Vanilla) CLIP</td><td>-</td><td>0.432</td><td>0.317</td><td>3.946</td><td>0.150</td><td>40.11</td><td>55.75</td><td>76.44</td><td>89.42</td></tr><tr><td>FiT3D (Yue et al., 2024)</td><td>~3 d</td><td>0.394</td><td>0.278</td><td>3.542</td><td>0.125</td><td>42.53</td><td>56.61</td><td>79.21</td><td>90.25</td></tr><tr><td>MEF (You et al., 2024)</td><td>~1 h</td><td>0.429</td><td>0.312</td><td>3.891</td><td>0.145</td><td>40.16</td><td>55.93</td><td>76.47</td><td>89.46</td></tr><tr><td>Ours</td><td>~1 h 20 m</td><td>0.367</td><td>0.260</td><td>3.529</td><td>0.117</td><td>41.86</td><td>57.01</td><td>78.74</td><td>90.41</td></tr><tr><td></td><td></td><td>(-0.065)</td><td>(-0.057)</td><td>(-0.417)</td><td>(-0.033)</td><td>(+1.75)</td><td>(+1.26)</td><td>(+2.30)</td><td>(+0.99)</td></tr></table>
|
| 212 |
+
|
| 213 |
+
Table 4: Comparison of 3D vision-language reasoning on SQA3D and ScanQA.
|
| 214 |
+
|
| 215 |
+
<table><tr><td rowspan="2">Method</td><td colspan="5">SQA3D</td><td colspan="5">ScanQA</td></tr><tr><td>EM-1</td><td>BLEU-1</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td><td>EM-1</td><td>BLEU-1</td><td>BLEU-4</td><td>METEOR</td><td>ROUGE</td></tr><tr><td>(Vanilla) CLIP</td><td>48.1</td><td>47.3</td><td>34.6</td><td>48.6</td><td>124.5</td><td>19.6</td><td>36.4</td><td>10.7</td><td>14.4</td><td>36.0</td></tr><tr><td>MEF (You et al., 2024)</td><td>48.2</td><td>47.4</td><td>34.6</td><td>48.7</td><td>124.7</td><td>19.0</td><td>36.1</td><td>10.4</td><td>14.3</td><td>35.1</td></tr><tr><td>Ours</td><td>48.6(+0.5)</td><td>47.7(+0.4)</td><td>35.0(+0.4)</td><td>49.0(+0.4)</td><td>125.5(+1.0)</td><td>20.7(+1.1)</td><td>36.6(+0.2)</td><td>11.6(+0.9)</td><td>14.5(+0.1)</td><td>36.3(+0.3)</td></tr></table>
|
| 216 |
+
|
| 217 |
+
Table 5: Ablation study of loss components on 3D correspondence understanding after finetuning on Objaverse.
|
| 218 |
+
|
| 219 |
+
<table><tr><td colspan="3">Loss Components</td><td colspan="6">Semantic Correspondence</td><td colspan="2">Video Tracking</td><td colspan="3">Pose Estimation</td></tr><tr><td rowspan="2">\( \mathcal{L}_{\text{match}} \)</td><td rowspan="2">\( \mathcal{L}_{\text{depth}} \)</td><td rowspan="2">\( \mathcal{L}_{\text{cost}} \)</td><td colspan="3">Different Views</td><td colspan="3">Same Views</td><td rowspan="2">Jaccard</td><td rowspan="2">Avg. Pts</td><td colspan="3">Accuracy within Thresholds</td></tr><tr><td>0.05</td><td>0.10</td><td>0.15</td><td>0.05</td><td>0.10</td><td>0.15</td><td>1cm-1deg</td><td>3cm-3deg</td><td>5cm-5deg</td></tr><tr><td>✓</td><td>✘</td><td>✘</td><td>21.18</td><td>33.54</td><td>43.58</td><td>25.94</td><td>43.33</td><td>53.87</td><td>34.61</td><td>50.58</td><td>6.32</td><td>32.00</td><td>48.33</td></tr><tr><td>✓</td><td>✓</td><td>✘</td><td>24.89</td><td>38.32</td><td>49.00</td><td>31.92</td><td>52.05</td><td>62.88</td><td>35.36</td><td>53.43</td><td>8.38</td><td>42.01</td><td>60.26</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>25.87</td><td>39.85</td><td>50.21</td><td>36.77</td><td>56.61</td><td>67.93</td><td>35.60</td><td>54.65</td><td>8.50</td><td>39.30</td><td>57.68</td></tr></table>
|
| 220 |
+
|
| 221 |
+
# 4.2.2 3D Vision-Language Understanding
|
| 222 |
+
|
| 223 |
+
To evaluate whether our distilled VLM features effectively enhance 3D vision-language understanding, we conduct experiments on two representative 3D VQA benchmarks with fine-tuned CLIP features, following the evaluation protocol from Lexicon3D (Man et al., 2024). We measure performance using EM-1, BLEU, METEOR, ROUGE, and CIDEr. Among these metrics, EM-1 is particularly crucial as it directly measures the model's exact answer prediction accuracy. For fair comparisons, we fine-tune all baselines on the Objverse dataset. As shown in Table 4, MEF does not show significant improvements over the vanilla CLIP on SQA3D and even lower performance on ScanQA. In contrast, our method consistently outperforms both CLIP and MEF across all metrics and datasets. Specifically, our approach increases EM-1 on SQA3D to $48.6\%$ , and notably improves EM-1 on ScanQA from $19.6\%$ to $20.7\%$ . These results demonstrate that our fine-tuning approach provides better 3D visual understanding which effectively leads to improvement of 3D spatial knowledge for vision-language reasoning.
|
| 224 |
+
|
| 225 |
+
# 4.3 Ablation Study
|
| 226 |
+
|
| 227 |
+
We conduct an ablation study to analyze the effectiveness of each loss component for 3D correspondence understanding as in Section 4.2.1 after fine-tuning on Objaverse. Compared to fine-tuning solely with $\mathcal{L}_{\mathrm{match}}$ equivalent to MEF, adding $\mathcal{L}_{\mathrm{depth}}$ consistently improves performance across
|
| 228 |
+
|
| 229 |
+
all metrics. Incorporating $\mathcal{L}_{\mathrm{cost}}$ further boosts PCK@0.05 by $+4.69\%$ and video tracking position accuracy by $+4.07\%$ . Although pose estimation accuracy slightly decreases at some thresholds, it maintains improved performance with a gain of $+2.18\%$ at the challenging 1cm-1deg threshold. These results demonstrate that $\mathcal{L}_{\mathrm{depth}}$ significantly enhances semantic matching and precise localization, while cost $\mathcal{L}_{\mathrm{cost}}$ further strengthens cross-view feature consistency. Additional ablation analyses are provided in the appendix.
|
| 230 |
+
|
| 231 |
+
# 5 Conclusion
|
| 232 |
+
|
| 233 |
+
We present Geometric Distillation, a lightweight and annotation-free framework that enhances 3D spatial awareness and reasoning in VLMs. By distilling rich geometric signals such as multiview correspondences, relative depth relations, and dense cost volumes from high-capacity 3D foundation models like MASt3R and VGGT, our method equips pretrained 2D VLMs with robust 3D perception. Without requiring architectural modifications or explicit 3D annotations, our approach improves state-of-the-art results across diverse spatial reasoning tasks, including semantic correspondence, depth estimation, and 3D visual question answering. Extensive experiments demonstrate that our method consistently outperforms prior approaches while offering greater scalability and generalization to real-world scenes. Our work highlights an effective pathway to bridge the gap between 2D vision-language understanding and 3D perception.
|
| 234 |
+
|
| 235 |
+
# 6 Limitations & Future Work
|
| 236 |
+
|
| 237 |
+
While our approach achieves notable improvements in 3D spatial reasoning for vision-language models without requiring explicit annotations or architectural changes, several limitations remain. First, the method assumes access to multi-view imagery during training, which may not always be feasible in practical applications. Second, the reliance on 3D foundation models as supervision sources introduces potential biases and limits the controllability over the distilled geometric signals. Additionally, our framework does not directly generalize to other 3D modalities such as point clouds or meshes.
|
| 238 |
+
|
| 239 |
+
Future work will focus on extending geometric distillation to monocular settings and exploring self-supervised alternatives to reduce dependence on external teacher models.
|
| 240 |
+
|
| 241 |
+
# Acknowledgements
|
| 242 |
+
|
| 243 |
+
This research was supported by the Basic Science Research Program through the National Research Foundation of Korea (NRF), funded by the MSIP (RS-2025-00520207, RS-2023-00219019), KEIT grant funded by the Korean government (MOTIE) (No. 2022-0-00680, No. 2022-0-01045), Artificial Intelligence Graduate School Program (KAIST) (RS-2019-II190075), and SAMSUNG Research, Samsung Electronics Co., Ltd.
|
| 244 |
+
|
| 245 |
+
# References
|
| 246 |
+
|
| 247 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, and 1 others. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 248 |
+
Honggyu An, Jinhyeon Kim, Seonghoon Park, Jaewoo Jung, Jisang Han, Sunghwan Hong, and Seungryong Kim. 2024. Cross-view completion models are zero-shot correspondence estimators. arXiv preprint arXiv:2412.09072.
|
| 249 |
+
Dylan Auty and Krystian Mikolajczyk. 2023. Learning to prompt clip for monocular depth estimation: Exploring the limits of human language. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2039-2047.
|
| 250 |
+
Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. 2022. Scanqa: 3d question answering for spatial scene understanding. In proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 19129-19139.
|
| 251 |
+
Andrew Brown, Weidi Xie, Vicky Kalogeiton, and Andrew Zisserman. 2020. Smooth-ap: Smoothing the
|
| 252 |
+
|
| 253 |
+
path towards large-scale image retrieval. In European conference on computer vision, pages 677-694. Springer.
|
| 254 |
+
Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. 2024. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14455-14465.
|
| 255 |
+
Shoufa Chen, Chongjian Ge, Zhan Tong, Jiangliu Wang, Yibing Song, Jue Wang, and Ping Luo. 2022. Adaptformer: Adapting vision transformers for scalable visual recognition. Advances in Neural Information Processing Systems, 35:16664-16678.
|
| 256 |
+
Wei Chen, Tie-Yan Liu, Yanyan Lan, Zhi-Ming Ma, and Hang Li. 2009. Ranking measures and loss functions in learning to rank. Advances in Neural Information Processing Systems, 22.
|
| 257 |
+
Weifeng Chen, Zhao Fu, Dawei Yang, and Jia Deng. 2016. Single-image depth perception in the wild. Advances in neural information processing systems, 29.
|
| 258 |
+
Duolikun Danier, Mehmet Aygün, Changjian Li, Hakan Bilen, and Oisin Mac Aodha. 2024. Depthcues: Evaluating monocular depth perception in large vision models. arXiv preprint arXiv:2411.17385.
|
| 259 |
+
Matt Deitke, Dustin Schwenk, Jordi Salvador, Luca Weihs, Oscar Michel, Eli VanderBilt, Ludwig Schmidt, Kiana Ehsani, Aniruddha Kembhavi, and Ali Farhadi. 2023. Objverse: A universe of annotated 3d objects. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13142-13153.
|
| 260 |
+
Carl Doersch, Ankush Gupta, Larisa Markeeva, Adria Recasens, Lucas Smaira, Yusuf Aytar, Joao Carreira, Andrew Zisserman, and Yi Yang. 2022. Tap-vid: A benchmark for tracking any point in a video. Advances in Neural Information Processing Systems, 35:13610-13626.
|
| 261 |
+
David Eigen, Christian Puhrsch, and Rob Fergus. 2014. Depth map prediction from a single image using a multi-scale deep network. Advances in neural information processing systems, 27.
|
| 262 |
+
Mohamed El Banani, Amit Raj, Kevis-Kokitsi Maninis, Abhishek Kar, Yuanzhen Li, Michael Rubinstein, Deqing Sun, Leonidas Guibas, Justin Johnson, and Varun Jampani. 2024. Probing the 3d awareness of visual foundation models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21795-21806.
|
| 263 |
+
Mark Everingham, SM Ali Eslami, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. 2015. The pascal visual object classes challenge: A retrospective. International journal of computer vision, 111:98-136.
|
| 264 |
+
|
| 265 |
+
Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. 2018. Deep ordinal regression network for monocular depth estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2002-2011.
|
| 266 |
+
Peng Gao, Shijie Geng, Renrui Zhang, Teli Ma, Rongyao Fang, Yongfeng Zhang, Hongsheng Li, and Yu Qiao. 2024. Clip-adapter: Better vision-language models with feature adapters. International Journal of Computer Vision, 132(2):581-595.
|
| 267 |
+
Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. 2013. Vision meets robotics: The kitti dataset. The international journal of robotics research, 32(11):1231-1237.
|
| 268 |
+
Bumsub Ham, Minsu Cho, Cordelia Schmid, and Jean Ponce. 2016. Proposal flow. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3475-3484.
|
| 269 |
+
Xingyi He, Jiaming Sun, Yuang Wang, Di Huang, Hujun Bao, and Xiaowei Zhou. 2022. Onepose++: Keypoint-free one-shot object pose estimation without cad models. Advances in Neural Information Processing Systems, 35:35103-35115.
|
| 270 |
+
Deepti Hegde, Jeya Maria Jose Valanarasu, and Vishal Patel. 2023. Clip goes 3d: Leveraging prompt tuning for language grounded 3d recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2028-2038.
|
| 271 |
+
Yining Hong, Haoyu Zhen, Peihao Chen, Shuhong Zheng, Yilun Du, Zhenfang Chen, and Chuang Gan. 2023. 3d-llm: Injecting the 3d world into large language models. Advances in Neural Information Processing Systems, 36:20482-20494.
|
| 272 |
+
Ian P Howard and Brian J Rogers. 1995. Binocular vision and stereopsis. Oxford University Press, USA.
|
| 273 |
+
Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, and 1 others. 2022. Lora: Low-rank adaptation of large language models. ICLR, 1(2):3.
|
| 274 |
+
Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In International conference on machine learning, pages 4904-4916. PMLR.
|
| 275 |
+
Amita Kamath, Jack Hessel, and Kai-Wei Chang. 2023. What's" up" with vision-language models? investigating their struggle with spatial reasoning. arXiv preprint arXiv:2310.19785.
|
| 276 |
+
Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 2023. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4):139-1.
|
| 277 |
+
|
| 278 |
+
Seoyeon Kim, Minguk Kang, Dongwon Kim, Jaesik Park, and Suha Kwak. 2023. Extending clip's image-text alignment to referring image segmentation. arXiv preprint arXiv:2306.08498.
|
| 279 |
+
Michael S Landy, Laurence T Maloney, Elizabeth B Johnston, and Mark Young. 1995. Measurement and modeling of depth cue combination: in defense of weak fusion. Vision research, 35(3):389-412.
|
| 280 |
+
Janghyeon Lee, Jongsuk Kim, Hyounguk Shon, Bumsoo Kim, Seung Hwan Kim, Honglak Lee, and Junmo Kim. 2022. Uniclip: Unified framework for contrastive language-image pre-training. Advances in Neural Information Processing Systems, 35:1008-1019.
|
| 281 |
+
Vincent Leroy, Yohann Cabon, and Jérôme Revaud. 2024. Grounding image matching in 3d with mast3r. In European Conference on Computer Vision, pages 71-91. Springer.
|
| 282 |
+
Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. 2023. Blip-2: Bootstrapping language-image pretraining with frozen image encoders and large language models. In International conference on machine learning, pages 19730–19742. PMLR.
|
| 283 |
+
Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022. Blip: Bootstrapping language-image pretraining for unified vision-language understanding and generation. In International conference on machine learning, pages 12888-12900. PMLR.
|
| 284 |
+
Junnan Li, Ramprasaath Selvaraju, Akhilesh Gotmare, Shafiq Joty, Caiming Xiong, and Steven Chu Hong Hoi. 2021. Align before fuse: Vision and language representation learning with momentum distillation. Advances in neural information processing systems, 34:9694-9705.
|
| 285 |
+
Siting Li, Pang Wei Koh, and Simon Shaolei Du. 2024. On erroneous agreements of clip image embeddings. arXiv preprint arXiv:2411.05195.
|
| 286 |
+
Ilya Loshchilov and Frank Hutter. 2017. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101.
|
| 287 |
+
Xiaojian Ma, Silong Yong, Zilong Zheng, Qing Li, Yitao Liang, Song-Chun Zhu, and Siyuan Huang. 2022. Sqa3d: Situated question answering in 3d scenes. arXiv preprint arXiv:2210.07474.
|
| 288 |
+
Yunze Man, Shuhong Zheng, Zhipeng Bao, Martial Hebert, Liangyan Gui, and Yu-Xiong Wang. 2024. Lexicon3d: Probing visual foundation models for complex 3d scene understanding. Advances in Neural Information Processing Systems, 37:76819-76847.
|
| 289 |
+
María Luisa Menéndez, Julio Angel Pardo, Leandro Pardo, and María del C Pardo. 1997. The jensen-shannon divergence. Journal of the Franklin Institute, 334(2):307-318.
|
| 290 |
+
|
| 291 |
+
Songyou Peng, Kyle Genova, Chiyu Jiang, Andrea Tagliasacchi, Marc Pollefeys, Thomas Funkhouser, and 1 others. 2023. Openscene: 3d scene understanding with open vocabularies. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 815-824.
|
| 292 |
+
Zengyi Qin, Jinglu Wang, and Yan Lu. 2019. Monognet: A geometric reasoning network for monocular 3d object localization. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pages 8851-8858.
|
| 293 |
+
Congpei Qiu, Yanhao Wu, Wei Ke, Xiuxiu Bai, and Tong Zhang. 2025. Refining clip's spatial awareness: A visual-centric perspective. arXiv preprint arXiv:2504.02328.
|
| 294 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, and 1 others. 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR.
|
| 295 |
+
Mohit Shridhar, Lucas Manuelli, and Dieter Fox. 2022. *Cliport: What and where pathways for robotic manipulation*. In *Conference on robot learning*, pages 894–906. PMLR.
|
| 296 |
+
James T Todd and J Farley Norman. 2003. The visual perception of 3-d shape from multiple cues: Are observers capable of perceiving metric structure? Perception & psychophysics, 65(1):31-47.
|
| 297 |
+
Shubham Tulsiani, Tinghui Zhou, Alexei A Efros, and Jitendra Malik. 2017. Multi-view supervision for single-view reconstruction via differentiable ray consistency. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2626-2634.
|
| 298 |
+
Narek Tumanyan, Assaf Singer, Shai Bagon, and Tali Dekel. 2024. Dino-tracker: Taming dino for self-supervised point tracking in a single video. In European Conference on Computer Vision, pages 367-385. Springer.
|
| 299 |
+
Jianyuan Wang, Minghao Chen, Nikita Karaev, Andrea Vedaldi, Christian Rupprecht, and David Novotny. 2025. Vggt: Visual geometry grounded transformer. arXiv preprint arXiv:2503.11651.
|
| 300 |
+
Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. 2024. Dust3r: Geometric 3d vision made easy. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 20697-20709.
|
| 301 |
+
Philippe Weinzaepfel, Vincent Leroy, Thomas Lucas, Romain Brégier, Yohann Cabon, Vaibhav Arora, Leonid Antsfeld, Boris Chidlovskii, Gabriela Csurka, and Jérôme Revaud. 2022. Croco: Self-supervised pre-training for 3d vision tasks by cross-view completion. Advances in Neural Information Processing Systems, 35:3502-3516.
|
| 302 |
+
|
| 303 |
+
Philippe Weinzaepfel, Thomas Lucas, Vincent Leroy, Yohann Cabon, Vaibhav Arora, Romain Brégier, Gabriela Csurka, Leonid Antsfeld, Boris Chidlovskii, and Jérôme Revaud. 2023. Croco v2: Improved cross-view completion pre-training for stereo matching and optical flow. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17969-17980.
|
| 304 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, and 1 others. 2019. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771.
|
| 305 |
+
Ke Xian, Jianming Zhang, Oliver Wang, Long Mai, Zhe Lin, and Zhiguo Cao. 2020. Structure-guided ranking loss for single image depth prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 611-620.
|
| 306 |
+
Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. 2023. Scannet++: A high-fidelity dataset of 3d indoor scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12-22.
|
| 307 |
+
Yang You, Yixin Li, Congyue Deng, Yue Wang, and Leonidas Guibas. 2024. Multiview equivariance improves 3d correspondence understanding with minimal feature finetuning. arXiv preprint arXiv:2411.19458.
|
| 308 |
+
Yuanwen Yue, Anurag Das, Francis Engelmann, Siyu Tang, and Jan Eric Lenssen. 2024. Improving 2d feature representations by 3d-aware fine-tuning. In European Conference on Computer Vision, pages 57-74. Springer.
|
| 309 |
+
Yan Zeng, Xinsong Zhang, and Hang Li. 2021. Multi-grained vision language pre-training: Aligning texts with visual concepts. arXiv preprint arXiv:2111.08276.
|
| 310 |
+
Renrui Zhang, Ziyu Guo, Wei Zhang, Kunchang Li, Xupeng Miao, Bin Cui, Yu Qiao, Peng Gao, and Hongsheng Li. 2022a. Pointclip: Point cloud understanding by clip. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8552-8562.
|
| 311 |
+
Renrui Zhang, Ziyao Zeng, Ziyu Guo, and Yafeng Li. 2022b. Can language understand depth? In Proceedings of the 30th ACM International Conference on Multimedia, pages 6868-6874.
|
| 312 |
+
Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. 2019. Semantic understanding of scenes through the ade20k dataset. International Journal of Computer Vision, 127:302-321.
|
| 313 |
+
Xiangyang Zhu, Renrui Zhang, Bowei He, Ziyu Guo, Ziyao Zeng, Zipeng Qin, Shanghang Zhang, and Peng Gao. 2023. Pointclip v2: Prompting clip and
|
| 314 |
+
|
| 315 |
+
gpt for powerful 3d open-world learning. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2639-2650.
|
| 316 |
+
Daniel Zoran, Phillip Isola, Dilip Krishnan, and William T Freeman. 2015. Learning ordinal relationships for mid-level vision. In Proceedings of the IEEE international conference on computer vision, pages 388-396.
|
| 317 |
+
|
| 318 |
+
# Appendix Contents
|
| 319 |
+
|
| 320 |
+
A. Potential Risks
|
| 321 |
+
B. Use or Create Scientific Artifacts
|
| 322 |
+
|
| 323 |
+
- B.1 Discuss The License For Artifacts
|
| 324 |
+
- B.2 Documentation of Artifacts
|
| 325 |
+
- B.3 Statistics for Dataset
|
| 326 |
+
|
| 327 |
+
C. Computational Experiments
|
| 328 |
+
|
| 329 |
+
- C.1 Model Size and Budget
|
| 330 |
+
- C.2 Experimental Setup and Hyperparameters
|
| 331 |
+
- C.3 Descriptive Statistics
|
| 332 |
+
- C.4 Parameters for Packages
|
| 333 |
+
|
| 334 |
+
D. Use of AI Assistants
|
| 335 |
+
|
| 336 |
+
- Information About Use Of AI Assistants
|
| 337 |
+
|
| 338 |
+
E. Additional Quantitative Evaluation
|
| 339 |
+
|
| 340 |
+
Feature Visualization
|
| 341 |
+
- More Qualitative Results
|
| 342 |
+
- Example Result of 3D VQA
|
| 343 |
+
|
| 344 |
+
F. Additional Ablation Study
|
| 345 |
+
|
| 346 |
+
- Comparison of Absolute and Relative Depth Understanding
|
| 347 |
+
- Ablation on Loss Components with Different Training Dataset
|
| 348 |
+
- Comparison of MAST3R and VGGT as a Teacher Model
|
| 349 |
+
|
| 350 |
+
G. Failure Cases
|
| 351 |
+
|
| 352 |
+
# A Potential Risks
|
| 353 |
+
|
| 354 |
+
Our proposed method, Geometric Distillation, enhances vision-language models (VLMs) with 3D spatial understanding by leveraging supervision signals from pretrained 3D foundation models. While our approach is annotation-free and lightweight, there are potential risks associated with its deployment. First, since the 3D models used as teachers may contain biases learned from their own training data, such biases could be inadvertently transferred to the VLMs. Second, because our method relies on pseudo-supervision (e.g., depth maps and correspondences), inaccuracies in the geometric signals could result in incorrect spatial reasoning or degraded model performance. Finally, although our work is intended for academic and constructive use, enhanced spatial reasoning capabilities could potentially be misused in surveillance, military applications, or other ethically sensitive scenarios.
|
| 355 |
+
|
| 356 |
+
# B Use or Create Scientific Artifacts
|
| 357 |
+
|
| 358 |
+
Our study builds entirely on existing resources, including publicly available pretrained models and benchmark datasets. In the following, we briefly describe the licensing status of the artifacts used and provide key statistics for the datasets involved in our experiments.
|
| 359 |
+
|
| 360 |
+
# B.1 Discuss The License for Artifacts
|
| 361 |
+
|
| 362 |
+
In this work, we do not introduce new datasets, but instead make use of publicly available pretrained models and benchmarks. Specifically, we use MASt3R (Leroy et al., 2024) and VGGT (Wang et al., 2025) as geometric teacher models, which are distributed under research-friendly licenses: VGGT is released under the CC BY-NC 4.0 license, and MASt3R, DUSt3R is licensed under the CC BY-NC-SA 4.0 license. Additionally, we evaluate our method using several publicly available datasets: TAP-Vid-DAVIS (Doersch et al., 2022) (Apache 2.0), OnePose-LowTexture (He et al., 2022) (Apache 2.0), ADE20K (Zhou et al., 2019) (CC BSD 3), and Objaverse (Deitke et al., 2023) (Apache 2.0). All datasets are used strictly for non-commercial research purposes in accordance with their respective licenses or terms.
|
| 363 |
+
|
| 364 |
+
# B.2 Documentation of Artifacts
|
| 365 |
+
|
| 366 |
+
All code, pretrained model checkpoints, and evaluation scripts used in this study will be publicly released upon publication. These artifacts will be hosted on a GitHub repository, accompanied by detailed documentation including installation instructions, dataset preparation scripts, and usage examples. A complete README file will be provided to ensure the reproducibility of our results. For datasets that cannot be redistributed due to licensing constraints, we include scripts and links to download them from their original sources. Our release is intended to support both reproduction and future research based on our approach.
|
| 367 |
+
|
| 368 |
+
# B.3 Statistics for Dataset
|
| 369 |
+
|
| 370 |
+
We summarize the dataset statistics used in our experiments across different tasks in Table 6.
|
| 371 |
+
|
| 372 |
+
3D Correspondence Understanding. We evaluate on three benchmarks following the protocols from MEF (You et al., 2024). For semantic correspondence, we use PF-PASCAL that consists of 308 image pairs from 20 object classes, randomly shuffled in different viewpoint settings. For video
|
| 373 |
+
|
| 374 |
+
Table 6: Dataset statistics and split details for each downstream task.
|
| 375 |
+
|
| 376 |
+
<table><tr><td>Task / Dataset</td><td>Split information</td></tr><tr><td colspan="2">3D Correspondence Understanding</td></tr><tr><td>PF-PASCAL</td><td>20 object classes; 308 image pairs; pairs randomly shuffled (in different viewpoint settings)</td></tr><tr><td>TAP-Vid (DAVIS)</td><td>30 object-centric videos; 34–104 frames per video</td></tr><tr><td>OnePose-LowTexture</td><td>40 objects with two videos per object; evaluation every 10th frame</td></tr><tr><td colspan="2">Dense Scene Understanding</td></tr><tr><td>ScanNet++</td><td>Validation split — 50 scenes, 30,638 images</td></tr><tr><td>KITTI</td><td>Test split — 28 scenes, 697 images</td></tr><tr><td>ADE20K</td><td>Validation split — 2,000 images</td></tr><tr><td>VOC2012</td><td>Validation split — 1,449 images</td></tr><tr><td colspan="2">3D Vision-Language Understanding</td></tr><tr><td>SQA3D</td><td>over 33K question-answer pairs</td></tr><tr><td>ScanQA</td><td>over 41K question-answer pairs</td></tr></table>
|
| 377 |
+
|
| 378 |
+
tracking, we follow the protocols of (Doersch et al., 2022; Tumanyan et al., 2024) and use TAP-ViddAVIS, which contains 30 object-centric videos with 34-104 frames per video. For object pose estimation, we follow He et al. (2022) and evaluate on the OnePose-LowTexture dataset which comprises 40 objects, each with two videos, performing evaluations on every 10th frame.
|
| 379 |
+
|
| 380 |
+
Dense Scene Understanding. Following FiT3D (Yue et al., 2024), we perform linear probing evaluations to estimate monocular depth and semantic segmentation. For depth estimation, we use ScanNet++ (Yeshwanth et al., 2023), specifically utilizing its validation split of 50 scenes with 30,638 images. We also use KITTI (Geiger et al., 2013) to evaluate generalization performance on KITTI's test split consisting of 28 scenes and 697 images. For semantic segmentation, we follow standard protocols and evaluate on ADE20K (Zhou et al., 2019)'s validation split with 2,000 images and VOC2012 (Everingham et al., 2015)'s validation split with 1,449 images.
|
| 381 |
+
|
| 382 |
+
3D Vision-Language Understanding. We evaluate 3D visual question-answering capabilities on SQA3D (Ma et al., 2022) and ScanQA (Azuma et al., 2022), following Lexicon3D (Man et al., 2024). Both datasets contain diverse QA pairs designed to probe 3D spatial and semantic reasoning. Specifically, SQA3D comprises over 33K synthetic question-answer pairs, while ScanQA contains over 41K real-world question-answer pairs generated from ScanNet scenes.
|
| 383 |
+
|
| 384 |
+
# C Computational Experiments
|
| 385 |
+
|
| 386 |
+
We conduct a series of computational experiments to evaluate the effectiveness and efficiency of our
|
| 387 |
+
|
| 388 |
+
proposed method. This section outlines the scale and computational cost of our models, the training setup and hyperparameter choices, a summary of the reported evaluation metrics, and the software packages used for implementation and evaluation. Through careful design and efficient training strategies, we ensure that our method achieves strong performance while maintaining high computational efficiency.
|
| 389 |
+
|
| 390 |
+
# C.1 Model Size and Budget
|
| 391 |
+
|
| 392 |
+
We utilize the CLIP (Radford et al., 2021) ViT-B/16 model as our vision-language backbone, which contains approximately 93 million parameters, closely comparable to the vanilla CLIP with about 87 million parameters. For parameter-efficient fine-tuning, we employ the Low-Rank Adaptation (LoRA) (Hu et al., 2022) technique, which takes up roughly 6 million parameters (about $6.5\%$ of the total). All experiments are conducted on up to four NVIDIA A6000 GPUs, and our geometric distillation process takes approximately 1 hour and 20 minutes per model on a single NVIDIA A6000 GPU. Compared to prior methods such as FiT3D, which require up to three days of training on four A6000 GPUs due to costly optimizing 3D feature Gaussians for all training scene, our method significantly reduces computational cost while achieving superior performance.
|
| 393 |
+
|
| 394 |
+
# C.2 Experimental Setup and Hyperparameters
|
| 395 |
+
|
| 396 |
+
We use the AdamW optimizer (Loshchilov and Hutter, 2017) with a learning rate of $1 \times 10^{-5}$ , and a train LoRA for up to 500 training epochs with early-stopping across all experiments. LoRA adapters
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
(a) Images
|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
(b) CLIP
|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
(c) FiT3D
|
| 406 |
+
|
| 407 |
+

|
| 408 |
+
(d) MEF
|
| 409 |
+
Figure 6: Feature visualization. PCA visualization of learned features on randomly selected 3D objects from Objaverse (Deitke et al., 2023). Compared to (b) CLIP (Radford et al., 2021), (c) FiT3D (Yue et al., 2024), and (d) MEF (You et al., 2024), our method (e) not only generates consistently smoother and more coherent features with reduced noise but also accurately preserves semantic correspondences across multiple viewpoints.
|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
(e) Ours
|
| 413 |
+
|
| 414 |
+
with rank $r = 4$ are applied to intermediate self attention layers in the CLIP model baseline. For the relative depth supervision, we add four LoRA layers to the 4th-7th attention layers, along with adapters following (Chen et al., 2022). The loss components are equally weighted: $\lambda_{\mathrm{match}} = 1.0$ , $\lambda_{\mathrm{depth}} = 1.0$ , and $\lambda_{\mathrm{cost}} = 1.0$ . Additionally, we apply temperature annealing to the cost volume alignment loss $\mathcal{L}_{\mathrm{cost}}$ as described in Equation (7), linearly decreasing $\tau$ from 1.0 to 0.5 during training. These hyperparameters were selected based on empirical tuning on ScanNet++ validation split and held consistent across all datasets to ensure fair comparison. We did not perform extensive hyperparameter search, and observed no significant sensitivity to small variations.
|
| 415 |
+
|
| 416 |
+
For view sampling during geometric distillation on ScanNet++, we randomly sample 10,000 views across 100 scenes, then subsequently select 100 random pairs of views that share overlapping 3D regions. This sampling results in a dataset size equivalent to the Objaverse view pairs used in MEF (You et al., 2024).
|
| 417 |
+
|
| 418 |
+
# C.3 Descriptive Statistics
|
| 419 |
+
|
| 420 |
+
All results reported in the main paper and appendix represent the mean values over the full test set. For classification and tracking tasks, we use metrics
|
| 421 |
+
|
| 422 |
+
such as PCK, Jaccard index, and positional accuracy at multiple thresholds. For depth estimation and semantic segmentation, we report RMSE, relative error, mIoU, and mAcc. We do not report error bars or variances, but all evaluations are deterministic and based on a single run unless otherwise specified. Our results are comparable to prior works under the same evaluation protocols and dataset splits.
|
| 423 |
+
|
| 424 |
+
# C.4 Parameters for Packages
|
| 425 |
+
|
| 426 |
+
We rely on several well-established libraries and packages throughout our pipeline. For model implementation and training, we use PyTorch along with the HuggingFace Transformers (Wolf et al., 2019) and PEFT (Parameter Efficient Fine Tuning) libraries to incorporate LoRA into the CLIP backbone. For vision tasks such as depth estimation and segmentation, we use torchvision and mmsegmentation-based tools for data preprocessing and evaluation. NLP evaluation metrics including BLEU, ROUGE, METEOR, and CIDEr are computed using standard implementations from the NLTK and COCOEval toolkits. All packages are used with default parameters unless otherwise specified. No additional tuning or modification was made to external evaluation functions.
|
| 427 |
+
|
| 428 |
+
Question: What is the farthest away object on my left?
|
| 429 |
+
|
| 430 |
+
Situation: I just walked into the room through the doors.
|
| 431 |
+
|
| 432 |
+
Answer: window
|
| 433 |
+
|
| 434 |
+

|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
ta
|
| 438 |
+
X
|
| 439 |
+
|
| 440 |
+

|
| 441 |
+
: window
|
| 442 |
+
|
| 443 |
+

|
| 444 |
+
|
| 445 |
+

|
| 446 |
+
Scene
|
| 447 |
+
|
| 448 |
+

|
| 449 |
+
Before
|
| 450 |
+
|
| 451 |
+

|
| 452 |
+
After
|
| 453 |
+
|
| 454 |
+
Question: Which one is further away from the fan, a cabinet or a trash can?
|
| 455 |
+
|
| 456 |
+
Situation: I am facing a backpack on top a couch, while there is a door behind
|
| 457 |
+
|
| 458 |
+
Answer: trash can
|
| 459 |
+
|
| 460 |
+

|
| 461 |
+
: cabinet
|
| 462 |
+
|
| 463 |
+

|
| 464 |
+
|
| 465 |
+

|
| 466 |
+
: trash can
|
| 467 |
+
|
| 468 |
+

|
| 469 |
+
|
| 470 |
+

|
| 471 |
+
Scene
|
| 472 |
+
|
| 473 |
+

|
| 474 |
+
Before
|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
After
|
| 478 |
+
|
| 479 |
+
Question: Which one is closer to me, the bathtub or the bed?
|
| 480 |
+
|
| 481 |
+
Situation: I am facing the door and the bathroom door opening is on my left.
|
| 482 |
+
|
| 483 |
+
Answer: bathtub
|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
bed
|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
|
| 490 |
+

|
| 491 |
+
: bathtub
|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
Scene
|
| 497 |
+
Figure 7: Qualitative examples of 3D VQA on SQA3D. Visualization of feature clustering for 3D scenes before and after our geometric distillation, following the protocol of Lexicon3D (Man et al., 2024). The 2D CLIP features and fine-tuned 2D CLIP features are lifted into 3D space and clustered using k-means. Each example presents a challenging VQA scenario, asking about relative object positions (e.g., "farthest," "further," "closer"). Compared to vanilla CLIP ("Before"), our distilled features ("After") offer clearer 3D spatial distinction and improved vision-language understanding for given 3D scenes.
|
| 498 |
+
|
| 499 |
+

|
| 500 |
+
Before
|
| 501 |
+
|
| 502 |
+

|
| 503 |
+
After
|
| 504 |
+
|
| 505 |
+
# D AI Assistants In Research Or Writing
|
| 506 |
+
|
| 507 |
+
# D.1 Information About Use Of AI Assistants
|
| 508 |
+
|
| 509 |
+
We acknowledge the use of ChatGPT-4o (Achiam et al., 2023) for grammatical correction and style improvement during the writing of this paper. However, all technical content, experiment design, and conceptual development were performed solely by the authors. No AI-generated content was used for core research contributions or evaluations.
|
| 510 |
+
|
| 511 |
+
# E Additional Qualitative Evaluation
|
| 512 |
+
|
| 513 |
+
# E.1 Feature Visualization
|
| 514 |
+
|
| 515 |
+
To qualitatively analyze the effectiveness of our geometric distillation, we visualize PCA projections of features extracted from randomly sampled 3D objects in Objaverse. We compute a PCA between the patches of the images from the multi-view images of the same object and visualize their first 3 components. As illustrated in Figure 6, existing methods such as vanilla CLIP, FiT3D, and MEF produce noisy or inconsistent feature distributions across multiple views. In contrast, our method generates significantly smoother and more coherent feature maps that consistently preserve semantic correspondence across various viewpoints. This visualization confirms that our approach successfully injects robust multi-view geometric consistency into VLM features, which enables precise and noise-less representation of object parts and their spatial relationships.
|
| 516 |
+
|
| 517 |
+
# E.2 More Qualitative Results
|
| 518 |
+
|
| 519 |
+
We provide additional qualitative comparisons for video tracking performance on the TAP-ViddAVIS dataset (described in Section 4.2.1) in Figure 8. Compared to MEF (You et al., 2024), our method produces notably cleaner and more accurate tracking results, which closely align with the ground-truth trajectories. Specifically, in the first row of Figure 8, MEF struggles to accurately track the trajectory of the rear wheel, confusing it with the front wheel of the car. In contrast, our approach clearly distinguishes and consistently tracks object parts. These results show that our method effectively enhances consistency to viewpoint changes and object motion.
|
| 520 |
+
|
| 521 |
+
# E.3 Example Results of 3D VQA
|
| 522 |
+
|
| 523 |
+
As summarized in Figure 7, we provide example results of 3D visual question answering evaluation
|
| 524 |
+
|
| 525 |
+
on the SQA3D dataset following Section 4.2.2.. Specifically, we visualize features from vanilla CLIP and our fine-tuned CLIP obtained through geometric distillation. For visualization, we first lift the 2D CLIP features into their corresponding 3D scenes and apply k-means clustering. Our distilled features demonstrate clearer spatial coherence and improved geometric consistency compared to vanilla CLIP features. Consequently, our model exhibits superior spatial reasoning capabilities, which accurately identify relative object distances as required by challenging VQA questions, especially determining which object is farther or closer. For instance, while vanilla CLIP incorrectly identifies spatial relationships due to ambiguous feature representations, our method correctly interprets the precise spatial context, including spatially complex questions.
|
| 526 |
+
|
| 527 |
+
# F Additional Ablation Study
|
| 528 |
+
|
| 529 |
+
# F.1 Comparison of Absolute and Relative Depth Understanding
|
| 530 |
+
|
| 531 |
+
We perform an additional analysis comparing the effects of absolute and relative depth losses on 3D correspondence understanding. Specifically, we fine-tune models on ScanNet++ using either absolute depth loss or our proposed relative depth loss, and evaluate them across the 3D correspondence tasks described in Section 4.2.1. For absolute depth loss, we implement log-scale depth regression, which directly predicts depth values. Given predicted depth $\hat{d}_p$ and ground-truth depth $\tilde{d}_p$ at keypoint $p$ for a single view, the absolute depth loss $\mathcal{L}_{\mathrm{abs\_depth}}$ is computed as:
|
| 532 |
+
|
| 533 |
+
$$
|
| 534 |
+
\mathcal {L} _ {\text {a b s _ d e p t h}} = \frac {1}{| \mathcal {P} |} \sum_ {p \in \mathcal {P}} | \hat {d} _ {p} - s \cdot \tilde {d} _ {p} |, \quad s = \frac {D _ {\max } ^ {\text {p r e d}}}{D _ {\max } ^ {\mathrm {g t}}} \tag {10}
|
| 535 |
+
$$
|
| 536 |
+
|
| 537 |
+
where $D_{\mathrm{max}}^{\mathrm{pred}}$ and $D_{\mathrm{max}}^{\mathrm{gt}}$ denote the maximum depth from predictions and ground-truth, respectively, and $s$ is the scale factor ensuring that predictions match the range of the ground-truth.
|
| 538 |
+
|
| 539 |
+
As shown in Table 7, the relative depth loss consistently outperforms absolute depth across all metrics. For semantic correspondence, it significantly improves PCK@0.05 from $27.04\%$ to $28.48\%$ (different views) and from $37.45\%$ to $42.16\%$ (same views). Similarly, relative depth supervision enhances video tracking, increasing the average Jaccard index from $39.27\%$ to $40.09\%$ , and boosts precise pose estimation accuracy at the 1cm-1deg threshold from $9.46\%$ to $10.96\%$ .
|
| 540 |
+
|
| 541 |
+

|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
|
| 545 |
+

|
| 546 |
+
|
| 547 |
+

|
| 548 |
+
|
| 549 |
+

|
| 550 |
+
|
| 551 |
+

|
| 552 |
+
|
| 553 |
+

|
| 554 |
+
|
| 555 |
+

|
| 556 |
+
|
| 557 |
+

|
| 558 |
+
|
| 559 |
+

|
| 560 |
+
(a) Ground Truth
|
| 561 |
+
|
| 562 |
+

|
| 563 |
+
(b) MEF
|
| 564 |
+
|
| 565 |
+

|
| 566 |
+
(c) Ours
|
| 567 |
+
Figure 8: Additional qualitative results on video tracking. Visualization of predicted trajectories compared to (a) ground truth, (b) MEF (You et al., 2024), and (c) ours. Our method provides more accurate and coherent object tracking, which significantly reduces incorrect correspondences and aligns better with ground-truth trajectories.
|
| 568 |
+
|
| 569 |
+
Table 7: Absolute vs. relative depth loss in 3D correspondence understanding after fine-tuning on ScanNet++.
|
| 570 |
+
|
| 571 |
+
<table><tr><td rowspan="3">Method</td><td colspan="6">Semantic Correspondence</td><td colspan="2">Video Tracking</td><td colspan="3">Pose Estimation</td></tr><tr><td colspan="3">Different Views</td><td colspan="3">Same Views</td><td rowspan="2">Jacc.</td><td rowspan="2">Avg. Pts</td><td colspan="3">Thresholds</td></tr><tr><td>0.05</td><td>0.10</td><td>0.15</td><td>0.05</td><td>0.10</td><td>0.15</td><td>1cm-1deg</td><td>3cm-3deg</td><td>5cm-5deg</td></tr><tr><td>Abs.</td><td>27.04</td><td>41.33</td><td>50.37</td><td>37.45</td><td>57.63</td><td>66.58</td><td>39.27</td><td>57.27</td><td>9.46</td><td>42.04</td><td>60.93</td></tr><tr><td>Rel.</td><td>28.48</td><td>43.07</td><td>53.55</td><td>42.16</td><td>61.57</td><td>72.16</td><td>40.09</td><td>57.75</td><td>10.96</td><td>44.93</td><td>63.65</td></tr></table>
|
| 572 |
+
|
| 573 |
+
These results indicate that explicitly modeling relative depth relationships, rather than absolute depth values, yields more generalizable geometric representations. Additionally, it reduces the risk of overfitting to the depth distribution of the training dataset.
|
| 574 |
+
|
| 575 |
+
# F.2 Ablation on Loss Components with Different Training Dataset
|
| 576 |
+
|
| 577 |
+
To further investigate the generalization of each loss component in our geometric distillation, we conduct an additional ablation study by fine-tuning on the real-world ScanNet++ dataset, complementing our earlier analysis performed on Objaverse as in Section 4.3). Specifically, we evaluate the
|
| 578 |
+
|
| 579 |
+
effects of the matching loss $\mathcal{L}_{\mathrm{match}}$ , relative depth loss $\mathcal{L}_{\mathrm{depth}}$ , and cost volume alignment loss $\mathcal{L}_{\mathrm{cost}}$ across the downstream 3D correspondence tasks described in Section 4.2.1.
|
| 580 |
+
|
| 581 |
+
As shown in Table 8, adding the relative depth loss $\mathcal{L}_{\mathrm{depth}}$ significantly enhances semantic correspondence, increasing PCK@0.10 from $41.76\%$ to $43.43\%$ (different views), and improving pose estimation accuracy at the strict 1cm-1deg threshold from $9.61\%$ to $10.80\%$ . Incorporating the cost volume alignment loss $\mathcal{L}_{\mathrm{cost}}$ further strengthens performance, which yields substantial gains across most metrics. Specifically, semantic correspondence at PCK@0.05 notably increases from $26.32\%$ to $28.48\%$ (different views) and from
|
| 582 |
+
|
| 583 |
+
Table 8: Ablation study of loss components on 3D correspondence understanding after finetuning on ScanNet++.
|
| 584 |
+
|
| 585 |
+
<table><tr><td colspan="3">Loss Components</td><td colspan="6">Semantic Correspondence</td><td colspan="2">Video Tracking</td><td colspan="3">Pose Estimation</td></tr><tr><td rowspan="2">\( {\mathcal{L}}_{\text{match }} \)</td><td rowspan="2">\( {\mathcal{L}}_{\text{depth }} \)</td><td rowspan="2">\( {\mathcal{L}}_{\text{cost }} \)</td><td colspan="3">Different Views</td><td colspan="3">Same Views</td><td rowspan="2">Jaccard</td><td rowspan="2">Avg. Pts</td><td colspan="3">Accuracy within Thresholds</td></tr><tr><td>0.05</td><td>0.10</td><td>0.15</td><td>0.05</td><td>0.10</td><td>0.15</td><td>1cm-1deg</td><td>3cm-3deg</td><td>5cm-5deg</td></tr><tr><td>✓</td><td>✘</td><td>✘</td><td>26.32</td><td>41.76</td><td>50.72</td><td>37.45</td><td>58.30</td><td>68.15</td><td>37.78</td><td>57.45</td><td>9.61</td><td>44.77</td><td>63.52</td></tr><tr><td>✓</td><td>✓</td><td>✘</td><td>27.25</td><td>43.43</td><td>52.18</td><td>38.82</td><td>60.20</td><td>69.64</td><td>38.26</td><td>56.43</td><td>10.80</td><td>47.40</td><td>64.93</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>28.48</td><td>43.07</td><td>53.55</td><td>42.16</td><td>61.57</td><td>72.16</td><td>40.09</td><td>57.75</td><td>10.96</td><td>44.93</td><td>63.65</td></tr></table>
|
| 586 |
+
|
| 587 |
+
Table 9: Comparison of our VGGT and MAST3R-based methods on 3D correspondence understanding.
|
| 588 |
+
|
| 589 |
+
<table><tr><td colspan="3">Model</td><td colspan="6">Semantic Correspondence</td><td colspan="2">Video Tracking</td><td colspan="3">Pose Estimation</td></tr><tr><td rowspan="2">Method</td><td rowspan="2">Teacher</td><td rowspan="2">Dataset</td><td colspan="3">Different Views</td><td colspan="3">Same Views</td><td rowspan="2">Jaccard</td><td rowspan="2">Pos. Acc.</td><td rowspan="2">1cm-1deg</td><td rowspan="2">3cm-3deg</td><td rowspan="2">5cm-5deg</td></tr><tr><td>0.05</td><td>0.10</td><td>0.15</td><td>0.05</td><td>0.10</td><td>0.15</td></tr><tr><td>CLIP (Vanilla)</td><td>—</td><td>—</td><td>16.61</td><td>26.96</td><td>37.64</td><td>18.23</td><td>32.27</td><td>43.01</td><td>27.73</td><td>42.59</td><td>2.50</td><td>19.32</td><td>33.11</td></tr><tr><td rowspan="2">Ours (VGGT)</td><td rowspan="2">VGGT</td><td>Objverse</td><td>19.84</td><td>32.79</td><td>44.24</td><td>25.44</td><td>42.48</td><td>55.18</td><td>36.77</td><td>52.68</td><td>6.94</td><td>34.37</td><td>51.83</td></tr><tr><td>ScanNet++</td><td>24.22</td><td>39.52</td><td>48.34</td><td>30.79</td><td>53.03</td><td>63.26</td><td>37.28</td><td>54.22</td><td>8.15</td><td>38.75</td><td>57.55</td></tr><tr><td rowspan="2">Ours (MASt3R)</td><td rowspan="2">MASt3R</td><td>Objverse</td><td>25.87</td><td>39.85</td><td>50.21</td><td>36.77</td><td>56.61</td><td>67.93</td><td>35.60</td><td>54.65</td><td>8.50</td><td>39.30</td><td>57.68</td></tr><tr><td>ScanNet++</td><td>28.48</td><td>43.07</td><td>53.55</td><td>42.16</td><td>61.57</td><td>72.16</td><td>40.09</td><td>57.75</td><td>10.96</td><td>44.93</td><td>63.65</td></tr></table>
|
| 590 |
+
|
| 591 |
+
37.45% to 42.16% (same views). Additionally, video tracking accuracy measured by the average Jaccard index improves from 37.78% to 40.09%, and pose estimation achieves the highest accuracy of 10.96% at 1cm-1deg threshold.
|
| 592 |
+
|
| 593 |
+
These results confirm that each loss component meaningfully contributes to enhancing cross-view consistency and spatial understanding. Particularly, the cost volume alignment loss $\mathcal{L}_{\mathrm{cost}}$ improves the precision of representations, which significantly benefits performance on the most stringent evaluation metrics.
|
| 594 |
+
|
| 595 |
+
# F.3 Comparison of MAST3R and VGGT as a Teacher Model
|
| 596 |
+
|
| 597 |
+
We conduct additional experiments to compare the effectiveness of different pretrained 3D foundation models, MASt3R and VGGT, used as teacher models in our geometric distillation method. Specifically, we evaluate their performance across multiple downstream 3D correspondence tasks as summarized in Table 9.
|
| 598 |
+
|
| 599 |
+
Both MASt3R and VGGT-based models substantially outperform the vanilla CLIP baseline, and this demonstrates the effectiveness of our geometric distillation approach. However, we observe consistent differences between the two teachers. Overall, MASt3R consistently generates superior results compared to VGGT, particularly when finetuned on real-world ScanNet++ data. For example, on ScanNet++, MASt3R achieves significantly better semantic correspondence accuracy (PCK@0.05 of $28.48\%$ vs. $24.22\%$ in different-view scenarios and $42.16\%$ vs. $30.79\%$ in same-view scenarios), enhanced video tracking performance (average Jaccard index $40.09\%$ vs. $37.28\%$ ), and improved pose estimation accuracy ( $10.96\%$ vs. $8.15\%$ at
|
| 600 |
+
|
| 601 |
+
1cm-1deg threshold).
|
| 602 |
+
|
| 603 |
+
We attribute this difference in performance partly to the operational characteristics of each teacher model. Specifically, VGGT requires selecting an anchor viewpoint as user input to estimate dense correspondences across other views, so that it potentially introduces noise or inaccuracies. In contrast, MASt3R directly predicts dense and consistent semantic correspondences without requiring explicit selection of anchor points, which results in more reliable geometric guidance. Thus, while both models effectively enhance the geometric understanding of VLMs, MASt3R provides more precise and robust geometric priors in our experiments.
|
| 604 |
+
|
| 605 |
+
# G Failure Cases
|
| 606 |
+
|
| 607 |
+
Although our geometric distillation method significantly enhances the VLM representations, we identify limitations under certain challenging scenarios, also shared by MEF (You et al., 2024). Specifically, our approach heavily relies on accurate geometric priors from pretrained 3D foundation models. Consequently, when input views have minimal or no overlapping 3D regions, these foundation models may fail to accurately infer or reconstruct the underlying geometry. Such failures can propagate erroneous geometric guidance into our distilled VLM features, which may degrade its performance on downstream tasks. This limitation might be alleviated through improved sampling strategies that explicitly consider shared viewing regions, as well as by enhancing the single-image 3D inference capability of the underlying 3D foundation models.
|
| 608 |
+
|
| 609 |
+
We believe that addressing these limitations is an important future direction. Potential improvements may include utilizing more powerful 3D foundation models trained on diverse, large-scale multi-view
|
| 610 |
+
|
| 611 |
+
datasets or integrating explicit uncertainty estimation to mitigate the impact of unreliable geometric guidance.
|
3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5699a245e0fedfc2ea43f6f975ff2d74cc9b61dacd6a007d322fad23085b254
|
| 3 |
+
size 1244374
|
3dawarevisionlanguagemodelsfinetuningwithgeometricdistillation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e31466ec29195de8a944081137f3e7693a1045ac81a20394fbd1a0c34f44a440
|
| 3 |
+
size 685737
|
abenchmarkforhindiverbargumentstructurealternations/2c2d34ad-8bf4-4c30-a41d-2a44809ffb5f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc5cc71f6983a4e5e0ca6acb190f5c6112be1db5133d2933e6e0a9892181544d
|
| 3 |
+
size 57895
|
abenchmarkforhindiverbargumentstructurealternations/2c2d34ad-8bf4-4c30-a41d-2a44809ffb5f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f57b0b3685bfc2a11a63a6500dcd8050b2602692b4bef90e0f34b876ddd166ef
|
| 3 |
+
size 67839
|
abenchmarkforhindiverbargumentstructurealternations/2c2d34ad-8bf4-4c30-a41d-2a44809ffb5f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ca910c26cb80774545f49f96cfe98e201e2851a348f7f6bded7dc042088cf9b
|
| 3 |
+
size 227672
|
abenchmarkforhindiverbargumentstructurealternations/full.md
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Benchmark for Hindi Verb-Argument Structure Alternations
|
| 2 |
+
|
| 3 |
+
Kanishka Jain and Ashwini Vaidya
|
| 4 |
+
|
| 5 |
+
Indian Institute of Technology Delhi {kanishka, avaidya} @hss.iitd.ac.in
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
In this paper we introduce a Hindi verb alternations benchmark to investigate whether pretrained large language models (LLMs) can infer the frame-selectional properties of Hindi verbs. Our benchmark consists of minimal pairs such as Tina cut the wood/\*Tina disappeared the wood. We create four variants of these alternations for Hindi to test knowledge of verbal morphology and argument case-marking. Our results show that a masked monolingual model performs the best, while causal models fare poorly. We further test the quality of the predictions using a cloze-style sentence completion task. While the models appear to infer the right mapping between verbal morphology and valency in the acceptability task, they do not generate the right verbal morphology in the cloze task. The model completions also lack pragmatic and world knowledge, crucial for making generalizations about verbal alternations. Our work points towards the need for more cross-linguistic research of verbal alternations.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
A question that has been investigated repeatedly is whether large language models (LLMs) are able to learn the syntactic and semantic generalizations of a natural language given the diverse data they are trained on. A number of studies have created linguistic benchmarks consisting of syntactic phenomena (e.g. active-passives, syntactic agreement) using minimal pairs. LLMs are then tested on acceptability judgement tasks, comparing their performance with human judgements (Warstadt et al., 2020; Xiang et al., 2021; Someya and Oseki, 2023; Song et al., 2022).
|
| 14 |
+
|
| 15 |
+
Recent work evaluated transformer LLMs on Hindi syntactic agreement (Kryvosheieva and Levy, 2025). LLMs' performance was robust despite Hindi's complex split-ergative system. With respect to verb argument structure alternations, cross-linguistic results are mixed. For English as well
|
| 16 |
+
|
| 17 |
+
as Chinese, experiments show that model performance is relatively poor for argument structure (Warstadt et al., 2020; Xiang et al., 2021). For Japanese on the other hand, models seem to match human accuracy (Someya et al., 2024). There is no previous work evaluating LLMs' knowledge of verb argument structure for Hindi.
|
| 18 |
+
|
| 19 |
+
The core meaning of an event is contributed by the verb in a sentence or context. It comes densely packed with information about the number of arguments (or participants), their role, and how they are related to each other. This information comprises syntactic knowledge: mapping the verbal morphology to the correct number of arguments in the sentence. It also contains semantic knowledge where the verb and its arguments contribute to the event meaning.
|
| 20 |
+
|
| 21 |
+
In this paper, we use both acceptability judgements and cloze-style sentence completions following Ettinger (2020). We evaluate both masked and causal models, and also compare multilingual and monolingual models (Martin et al., 2020; Song et al., 2022). Results from our acceptability task indicate knowledge of the mapping between verbs and syntactic frames. At the same time, the best performing models from this task are not able to predict the correct verb forms in a cloze-style sentence completion. We show that verb alternations require LLMs to make generalizations that are different from other syntactic phenomena.
|
| 22 |
+
|
| 23 |
+
# 2 Alternations in Hindi
|
| 24 |
+
|
| 25 |
+
Hindi verbs carry morphosyntactic information that signals the change in arguments. In the following examples, the base form of an intransitive verb /ubəl/ 'boil' changes to transitive in /ubal/ and then to the indirect causative in /ubəlva/. While there is variation in the way each of these alternations are realized (e.g. some verbs have a null transitive alternation), there is a surface form-function map-
|
| 26 |
+
|
| 27 |
+
ping unlike English. For example, John broke the window and The window broke are causative and intransitive, respectively but without any surface differences.
|
| 28 |
+
|
| 29 |
+
(1) pani ubəl rəha t'awater.Mboil PROG.SG.M AUX.PST.SG.M 'The water was boiling.'
|
| 30 |
+
(2) lərka pani ubal rəha boy.3.SG.M water.M boil.DCAUS PROG.SG.M tHa AUX.PST.SG.M 'The boy was boiling the water.'
|
| 31 |
+
(3) lərka baccse-se pani boy.3.SG.M child.3.SG.M-AGT water.M ubal-va rha t'boil-ICAUS PROG.SG.M AUX.PST.SG.M 'The boy made/had the child boil the water.'
|
| 32 |
+
|
| 33 |
+
Begum et al. (2008) groups Hindi verbs together on the basis of this morphological relatedness. In this paper, we aim to investigate whether LLMs learn such a mapping between the morphological form and its corresponding argument frame.
|
| 34 |
+
|
| 35 |
+
One challenge in developing such an evaluation dataset for Hindi is that arguments are regularly dropped (elided), and case markers on the nouns exhibit case syncretism. For example in (5) the case /-se/ describes a source (Mira) and takes a transitive form. In example (4), the same case marker /-se/ is instrumental, occurring with a causative form of the verb /bədəl/ 'change'.
|
| 36 |
+
|
| 37 |
+
(4) amit-ne mira-se
|
| 38 |
+
amit.3.SG.M-ERG mira.3.SG.F-INST
|
| 39 |
+
$\mathsf{g}^{\mathrm{h}}\exists \mathrm{Di}$ bədəl-va-i
|
| 40 |
+
watch.3.SG.F change-ICAUS-PST.PERF.SG.F
|
| 41 |
+
'Amit made/had Mira change the watch.'
|
| 42 |
+
(5) amit-ne mira-se
|
| 43 |
+
amit.3.SG.M-ERG mira.3.SG.F-SOURCE
|
| 44 |
+
$\mathsf{g}^{\mathrm{h}}\mathsf{o}\mathsf{D}\mathsf{i}$ bədəl-i
|
| 45 |
+
watch.3.SG.F change-PST.PERF.SG.F
|
| 46 |
+
'Amit exchanged the watch from Mira.'
|
| 47 |
+
|
| 48 |
+
For our benchmark, we choose sentences where all argument and adjunct slots are filled. In our minimal pairs, the acceptable sentence has the /-va/ causative as in (3), with three arguments (causer, agent, and patient). An additional instrumental argument is also added to restrict the choice to causatives and avoid ambiguity. We then replace the grammatically correct verb with an incorrect form to test for awareness of the correct frame.
|
| 49 |
+
|
| 50 |
+
# 3 Benchmark construction
|
| 51 |
+
|
| 52 |
+
To examine the extent to which pretrained models effectively leverage syntactic and semantic information from the context, we introduce a benchmark of minimal pairs in Hindi. We construct minimal pairs such that both sentences have a common sentential prefix and a grammatical or ungrammatical verb (which occurs in SOV order in Hindi). The last word in each sentence is a past tense auxiliary (the verb occurs at second last position). All examples are shown in Table 1.
|
| 53 |
+
|
| 54 |
+
Our benchmark consists of 56 verbs that have been selected on the basis of different criteria. We first chose verbs on the basis of their frequency using the Shabd database corpus (Verma et al., 2022). We have selected verbs that are high on the Zipf scale to maximize the chance of their occurrence across model training corpora. This ensures that these verbs are well represented and we minimize out-of-vocabulary effects. We then categorized verbs according to their valency. Since the goal of this work is to study how well pretrained models understand the verb argument structure of Hindi verbs, the final verb list maps to all three syntactic frames – intransitive (1 argument), transitive (2 arguments), and ditransitive (3 arguments). We also consider finer classifications, e.g. intransitive verbs which are further categorized into unergative and unaccusative verbs. Transitive verbs contain a sub-category of ingesto-reflexives. The final set has 28 intransitive verbs (13 unergatives and 15 unaccusatives), 23 transitive verbs (with 13 ingesto-reflexives), and 5 ditransitive verbs.
|
| 55 |
+
|
| 56 |
+
For our evaluation, we generate four variants of our benchmark that are described below:
|
| 57 |
+
|
| 58 |
+
Different Verb: the two verbs are morphologically unrelated forms, with different valency.
|
| 59 |
+
|
| 60 |
+
Same Verb: the two verbs are morphologically related, but with a different valency.
|
| 61 |
+
|
| 62 |
+
No Case(E): the two verbs are morphologically related, but the verbal aspect is habitual, which results in the ergative marker on the subject being removed<sup>1</sup>.
|
| 63 |
+
|
| 64 |
+
No Case(I): the two verbs are morphologically related, but we remove the additional adjunct argument from both sentences.
|
| 65 |
+
|
| 66 |
+
We can think of the 'Different Verb' and 'Same Verb' variants of the dataset as being maximally specified in terms of the arguments and adjuncts, al
|
| 67 |
+
|
| 68 |
+
<table><tr><td>Task</td><td>Exp</td><td colspan="4">Sentence Prefix</td><td>Verb</td><td>Acceptability</td></tr><tr><td rowspan="4">Acceptability</td><td>DV</td><td>mã-ne mother-ERG</td><td>arjun-se arjun-AGT</td><td>kulhaDi-se axe-INST</td><td>lãkDi wood</td><td>kãt-vai thi cut-DCAUS.PST be.PST joli thi burn.PST be.PST</td><td>✓ x</td></tr><tr><td>SV</td><td>mã-ne mother-ERG</td><td>arjun-se arjun-AGT</td><td>kulhaDi-se axe-INST</td><td>lãkDi wood</td><td>kãt-vai thi cut-DCAUS.PST be.PST kãTi thi cutPST be.PST</td><td>✓ x</td></tr><tr><td>No Case(E)</td><td>mã mother</td><td>arjun-se arjun-AGT</td><td>kulhaDi-se axe-INST</td><td>lãkDi wood</td><td>kãT-va-ti thi cut-DCAUS-HAB be.PST kãt-ti thi cut-HAB be.PST</td><td>✓ x</td></tr><tr><td>No Case(I)</td><td>mã-ne mother</td><td>arjun-se arjun-AGT</td><td>(...) lãkDi (...)</td><td>wood</td><td>kãT-va-i thi cut-DCAUS be.PST kãt-i thi cutPST be.PST</td><td>✓ x</td></tr><tr><td>Cloze</td><td></td><td>mã-ne mother-ERG</td><td>arjun-se arjun-INST</td><td>kulhaDi-se axe-INST</td><td>lãkDi wood</td><td>_ thi</td><td>NA</td></tr></table>
|
| 69 |
+
|
| 70 |
+
Table 1: Minimal pairs from our Hindi verb alternation benchmark. The example sentence is translated as Mother made Arjun cut the wood with an axe. DV=Different Verb, SV=Same Verb, No Case(E)= no ergative case on subject, and No Case(I)= no instrument case marked adjunct. The cloze task shows the sentential prefix, missing verb and the auxiliary. Argument /arjun-se/ is glossed as AGT 'AGENT' to distinguish it from the Instrumental case for kulhaDi 'axe'.
|
| 71 |
+
|
| 72 |
+
lowing us to test whether the mapping between morphological encoding and valency is learned. The 'No Case' variants compares the morphologically related verbs but the case information is changed. This is done primarily to test whether the models are robust to subtle changes in the surface forms of the arguments. Table 1 shows example for each variant.
|
| 73 |
+
|
| 74 |
+
Each set has 56 pairs for the acceptability task. To collect acceptability judgements, we conducted a forced choice acceptability judgment experiment using PCIBEX (Zehr and Schwarz, 2023). Participants were asked to choose the most acceptable sentence (see Appendix B.1 for all details). We present annotator accuracy along with LLMs' in Table 2. For all the variants of our dataset, human accuracy is quite high. We use the sentential prefix as shown in Table 1 for the cloze task.
|
| 75 |
+
|
| 76 |
+
# 4 Models
|
| 77 |
+
|
| 78 |
+
We test our dataset using six models via the HuggingFace Transformers library (Wolf et al., 2020) – four BERT-based masked language models (XLM-RoBERTa, MuRIL, IndicBERTv2 and HindBERT) and two causal language models (mGPT and BLOOM). All models, except for HindBERT are multilingual models and differ primarily in terms of their size and the language(s) they are trained on. (An overview of models is presented in
|
| 79 |
+
|
| 80 |
+
Appendix A). mGPT has 1.3B and 3B variants and BLOOM has 560M, 1.1B, 1.7B, 3B, 7.1B, 13B, and 176B variants. We found that as the parameters increased beyond 1B for the these models, performance worsened. On the 'Different Verb' variant of our benchmark the performance of the 1.7 million and 1.1 billion variants of the BLOOM model was the same (75% accuracy). However, for BLOOM 3 billion, the performance dropped to 62.5%. These results are similar to Kryvosheieva and Levy (2025)'s results for Hindi where the performance dropped for BLOOM's 3 billion variant. Hence, in this study we present results only from $\mathrm{mGPT}_{1.3\mathrm{b}}$ , $\mathrm{BLOOM}_{560\mathrm{m}}$ and $\mathrm{BLOOM}_{1.1\mathrm{B}}$ .
|
| 81 |
+
|
| 82 |
+
We evaluate models' performance using sentence score. For causal models, the score of a sentence is computed as the sum of the log-probabilities of each token conditioned on the sequence of preceding tokens. Whereas for masked models, we employ the pseudo-log-likelihood (PLL) scoring method introduced by Kauf and Ivanova (2023). The original PLL scoring method estimates sentence probability by masking words iteratively in a sentence, calculate the probability of each mask, and then multiplying probabilities of each word (Wang and Cho, 2019; Salazar et al., 2020). However, this method does not mask within word tokens of a multi-token word and results in inflated scores (Kauf and Ivanova, 2023). There
|
| 83 |
+
|
| 84 |
+
<table><tr><td rowspan="2">Type</td><td rowspan="2">Models</td><td colspan="4">Accuracy</td></tr><tr><td>DV</td><td>SV</td><td>No Case(E)</td><td>No Case(I)</td></tr><tr><td rowspan="4">masked</td><td>XLM-Rbase</td><td>67.9</td><td>55.4</td><td>35.7</td><td>58.9</td></tr><tr><td>XLM-Rlarge</td><td>89.3</td><td>62.5</td><td>53.6</td><td>69.6</td></tr><tr><td>MuRIL</td><td>85.7</td><td>76.8</td><td>50.0</td><td>67.9</td></tr><tr><td>IndicBERTv2</td><td>92.9</td><td>91.1</td><td>67.9</td><td>83.9</td></tr><tr><td>(monolingual)</td><td>HindBERT</td><td>98.2</td><td>83.9</td><td>83.9</td><td>91.1</td></tr><tr><td rowspan="3">causal</td><td>mGPT1.3b</td><td>53.6</td><td>21.4</td><td>16.1</td><td>30.4</td></tr><tr><td>BLOOM560m</td><td>58.9</td><td>42.9</td><td>8.9</td><td>42.9</td></tr><tr><td>BLOOM1.1b</td><td>75.0</td><td>58.9</td><td>23.2</td><td>62.5</td></tr><tr><td colspan="2">Humans</td><td>99.0</td><td>90.9</td><td>96.4</td><td>99.7</td></tr></table>
|
| 85 |
+
|
| 86 |
+
Table 2: Average percentage accuracy of the LLMs and human performance on each experiment (chance probability is $50\%$ ). Overall, LLMs performance is comparable to humans and the monolingual model (HindBERT) performs better than the multilingual ones.
|
| 87 |
+
|
| 88 |
+
fore, we calculate the PLL score for each word by masking within word tokens as well.
|
| 89 |
+
|
| 90 |
+
We calculate the PLL score for each sentence individually. The sentence with the greater PLL score is deemed to be more acceptable than the other. We then evaluate these probabilities against the gold data to calculate accuracy.
|
| 91 |
+
|
| 92 |
+
The Syntactic Log-Odds Ratio (SLOR) (Pauls and Klein, 2012; Lau et al., 2017; Lu et al., 2024) is also another method that is used to score sentences, while controlling for sentence length and lexical frequency. We did not calculate this score in our work as the training data for all the models that we tested was not publicly available. We also note that in our dataset all the example sentences were of similar length (between 9-11 words).
|
| 93 |
+
|
| 94 |
+
# 5 Results
|
| 95 |
+
|
| 96 |
+
Acceptability Task: Table 2 shows results for the acceptability task. For the 'Different Verb' variant, all masked models performed above chance with the monolingual model close to the human accuracy. However, all causal models lag far behind humans with only BLOOM<sub>1.1b</sub> achieving $75\%$ accuracy. mGPT and BLOOM have shown good results in Kryvosheieva and Levy (2025)'s experiments on Hindi syntactic agreement but performed poorly for our task. Our results suggest that verbal alternations are more challenging than syntactic agreement for causal models. We additionally tested the Llama 3.2-1B and Llama 3.3-3B models for our acceptability task, but found their performance to be similar to mGPT and BLOOM.
|
| 97 |
+
|
| 98 |
+
For the 'Same Verb' task, there is a drop in performance, which is also reflected in the human accuracy. But the performance drop is more prominent in XLM-R-large and MuRIL. For the 'No
|
| 99 |
+
|
| 100 |
+
Case(I), both IndicBERT and HindBERT are less accurate. This shows that using an additional instrument argument, and maximally filling all argument and adjunct slots does help LLMs to discriminate, while it makes little difference to humans. The weak performance for 'No Case(E)' variant is surprising. All models are less accurate, showing that case information like the ergative marker /-ne/ is an important cue for models. Ravfogel et al. (2019) also report that overt morphological case marking makes model prediction easier for syntactic agreement phenomena.
|
| 101 |
+
|
| 102 |
+
As discussed in Section 2 Hindi verbs can be classified into different categories according to their valency and type. In order to understand whether these distinctions impact model performance, we further analyze our results for each of the different categories. For intransitives and transitives, models' performance across each task was uniform, however we do see a decrease in performance for ditransitives in all variants except for the 'Different Verb' task (see Table 5 in Section C in the Appendix).
|
| 103 |
+
|
| 104 |
+
Sentence Completion Task: We also carried out a cloze-style sentence completion task. We took the best performing models- the multilingual IndicBERTv2 and monolingual HindBERT and asked them to complete the sentence as shown in Table 1. Both models were shown 56 sentential prefixes with the missing verb followed by the auxiliary signaling the end of the sentence. All the gold examples contain the morphological /-va/ causative.
|
| 105 |
+
|
| 106 |
+
Models rarely generated verbs with the /-va/ causative. Rather, the completions are usually transitive or ditransitive verbs. Sometimes these completions may be grammatical due to the ambigu
|
| 107 |
+
|
| 108 |
+
<table><tr><td>Sentential Prefix</td><td>Expected</td><td>Predicted</td></tr><tr><td>mohān-ne bōcci-se pōnkhe-se mombòti —— t'hi ‘Mohan made/had the girl —— the candle with the fan.’</td><td>bujhvaei (made to extinguish)</td><td>1. khəridi (bought) 2. nikali (removed)</td></tr></table>
|
| 109 |
+
|
| 110 |
+
Table 3: Example of cloze predictions from (1) HindBERT and (2) IndicBERTv2
|
| 111 |
+
|
| 112 |
+
ity in the case markers on the nouns (see Section 2). Our qualitative analysis suggests that in $28\%$ of the sentences, LLMs produce completions are ungrammatical. The errors show lack of commonsense or pragmatic knowledge, in particular semantic content of the nominal argument and the case marker. Table 3 shows such an example where the most appropriate verb would be extinguish, but the models predict buy or remove. This shows that the models learn about valency and morphological forms (as shown by the acceptability tasks) but not about event semantics.
|
| 113 |
+
|
| 114 |
+
We also collected human judgements to see whether they prefer the gold completions or models' predictions using a forced choice task. Annotators were shown pairs of completions and asked to select the most grammatical option. We then calculated the percentage of times annotators agreed with the gold completions, finding a mean agreement rate of $85.9\%$ , which indicates strong preference for the gold completions over the models outputs (see Appendix B.2 for the experiment details).
|
| 115 |
+
|
| 116 |
+
# 6 Discussion
|
| 117 |
+
|
| 118 |
+
In this work, we have created a benchmark of minimal pairs with four variants to test the knowledge of Hindi verbal alternations. Our benchmark has been publicly released. We show that masked models are the closest to human performance for the acceptability task, but when these models are used in a cloze-style completion, their completions lack integration of both syntactic and semantic knowledge. This indicates an incomplete understanding of verb frames.
|
| 119 |
+
|
| 120 |
+
Hindi morphologically encodes its verbal argument structure, and this information seems to give the models a boost in the 'Different Verb' variant (Mueller et al., 2020). At the same time, case syncretism is a disadvantage, which makes the argument and adjunct distinction more challenging
|
| 121 |
+
|
| 122 |
+
for 'No Case'. Both IndicBERT $_{v2}$ and HindBERT are fairly large models, trained on 20 billion and 1.8 billion tokens respectively. It is unlikely that increasing the size of the models will help to improve their event semantics knowledge.
|
| 123 |
+
|
| 124 |
+
We see that current models have close to human performance for acceptability judgements but they are far less robust in a generation task. The ungrammatical completions indicate that the models have a surface understanding of valency but are unable to integrate this knowledge with event meaning. Our research points towards the need to investigate syntactic and semantic integration in LLMs.
|
| 125 |
+
|
| 126 |
+
# Limitations
|
| 127 |
+
|
| 128 |
+
Our study focuses on one syntactic phenomenon, that is knowledge of verb frames in Hindi, unlike benchmarks like BLiMP (Warstadt et al., 2020) that includes many syntactic phenomena. Future research work covering other syntactic phenomena for Hindi and other languages will give a generalized idea of models' linguistic competence. Further, we carried out the cloze task only with top performing models and not others. There is a possibility that causal models may have better performance and we plan to explore this in future work.
|
| 129 |
+
|
| 130 |
+
# Ethical Consideration
|
| 131 |
+
|
| 132 |
+
We collected informed consent from all individuals who volunteered to participate in the data collection, adhering to all relevant norms and regulations of our institution. We also obtained required permissions from our institute's ethics committee. All the participants for all the studies were adequately compensated for their time.
|
| 133 |
+
|
| 134 |
+
# Acknowledgments
|
| 135 |
+
|
| 136 |
+
We gratefully acknowledge the Google Research Scholar Award (2024) to the second author, which helped support this research. We are thankful to the reviewers for their comments and valuable feedback. We also thank the annotators for their participation.
|
| 137 |
+
|
| 138 |
+
# References
|
| 139 |
+
|
| 140 |
+
Rafiya Begum, Samar Husain, Lakshmi Bai, and Dipti Misra Sharma. 2008. Developing verb frames for Hindi. In Proceedings of the Sixth International Conference on Language Resources and Evaluation
|
| 141 |
+
|
| 142 |
+
(LREC'08), Marrakech, Morocco. European Language Resources Association (ELRA).
|
| 143 |
+
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettle-moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. CoRR, abs/1911.02116.
|
| 144 |
+
Allyson Ettinger. 2020. What BERT is not: Lessons from a new suite of psycholinguistic diagnostics for language models. Transactions of the Association for Computational Linguistics, 8:34-48.
|
| 145 |
+
Junjie Hu, Sebastian Ruder, Aditya Siddhant, Graham Neubig, Orhan First, and Melvin Johnson. 2020. Xtreme: A massively multilingual multi-task benchmark for evaluating cross-lingual generalisation. In International conference on machine learning, pages 4411-4421. PMLR.
|
| 146 |
+
Raviraj Joshi. 2022. L3Cube-HindBERT and DevBERT: Pre-trained bert transformer models for devanagari based Hindi and marathi languages. arXiv preprint arXiv:2211.11418.
|
| 147 |
+
Divyanshu Kakwani, Anoop Kunchukuttan, Satish Golla, Gokul N.C., Avik Bhattacharyya, Mitesh M. Khapra, and Pratyush Kumar. 2020. IndicNLPSuite: Monolingual corpora, evaluation benchmarks and pre-trained multilingual language models for Indian languages. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 4948-4961, Online. Association for Computational Linguistics.
|
| 148 |
+
Carina Kauf and Anna Ivanova. 2023. A better way to do masked language model scoring. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 925-935.
|
| 149 |
+
Simran Khanuja, Diksha Bansal, Sarvesh Mehtani, Savya Khosla, Atreyee Dey, Balaji Gopalan, Dilip Kumar Margam, Pooja Aggarwal, Rajiv Teja Nagipogu, Shachi Dave, and 1 others. 2021. Muril: Multilingual representations for indian languages. arXiv preprint arXiv:2103.10730.
|
| 150 |
+
Daria Kryvosheeva and Roger Levy. 2025. Controlled evaluation of syntactic knowledge in multilingual language models. *LoResLM* 2025, page 402.
|
| 151 |
+
Jey Han Lau, Alexander Clark, and Shalom Lappin. 2017. Grammaticality, acceptability, and probability: A probabilistic view of linguistic knowledge. Cognitive science, 41(5):1202-1241.
|
| 152 |
+
Jiayi Lu, Jonathan Merchan, Lian Wang, and Judith Degen. 2024. Can syntactic log-odds ratio predict acceptability and satiation? In Proceedings of the Society for Computation in Linguistics 2024, pages 10–19, Irvine, CA. Association for Computational Linguistics.
|
| 153 |
+
|
| 154 |
+
Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suárez, Yoann Dupont, Laurent Romary, Éric de la Clergerie, Djamé Seddah, and Benoit Sagot. 2020. CamemBERT: a tasty French language model. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7203-7219, Online. Association for Computational Linguistics.
|
| 155 |
+
Aaron Mueller, Garrett Nicolai, Panayiotia Petrou-Zeniou, Natalia Talmina, and Tal Linzen. 2020. Cross-linguistic syntactic evaluation of word prediction models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5523-5539, Online. Association for Computational Linguistics.
|
| 156 |
+
Adam Pauls and Dan Klein. 2012. Large-scale syntactic language modeling with treelets. In Proceedings of the 50th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 959-968, Jeju Island, Korea. Association for Computational Linguistics.
|
| 157 |
+
Shauli Ravfogel, Yoav Goldberg, and Tal Linzen. 2019. Studying the inductive biases of RNNs with synthetic variations of natural languages. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3532-3542, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 158 |
+
Julian Salazar, Davis Liang, Toan Q. Nguyen, and Katrin Kirchhoff. 2020. Masked language model scoring. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2699-2712, Online. Association for Computational Linguistics.
|
| 159 |
+
Oleh Shliazhko, Alena Fenogenova, Maria Tikhonova, Anastasia Kozlova, Vladislav Mikhailov, and Tatiana Shavrina. 2024. mgpt: Few-shot learners go multilingual. Transactions of the Association for Computational Linguistics, 12:58-79.
|
| 160 |
+
Taiga Someya and Yohei Osei. 2023. JBLiMP: Japanese benchmark of linguistic minimal pairs. In Findings of the Association for Computational Linguistics: EACL 2023, pages 1581-1594.
|
| 161 |
+
Taiga Someya, Yushi Sugimoto, and Yohei Oseki. 2024. JCoLA: Japanese corpus of linguistic acceptability. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 9477-9488, Torino, Italia. ELRA and ICCL.
|
| 162 |
+
Yixiao Song, Kalpesh Krishna, Rajesh Bhatt, and Mohit Iyyer. 2022. SLING: Sino linguistic evaluation of large language models. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 4606-4634, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
|
| 163 |
+
|
| 164 |
+
Ark Verma, Vivek Sikarwar, Himanshu Yadav, Ranjith Jaganathan, and Pawan Kumar. 2022. Shabd: A psycholinguistic database for Hindi. Behavior Research Methods, 54(2):830-844.
|
| 165 |
+
|
| 166 |
+
Alex Wang and Kyunghyun Cho. 2019. BERT has a mouth, and it must speak: BERT as a Markov random field language model. In Proceedings of the Workshop on Methods for Optimizing and Evaluating Neural Language Generation, pages 30-36, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 167 |
+
|
| 168 |
+
Alex Warstadt, Alicia Parrish, Haokun Liu, Anhad Mohananey, Wei Peng, Sheng-Fu Wang, and Samuel R Bowman. 2020. BLiMP: The benchmark of linguistic minimal pairs for english. Transactions of the Association for Computational Linguistics, 8:377-392.
|
| 169 |
+
|
| 170 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, and 3 others. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
|
| 171 |
+
|
| 172 |
+
BigScience Workshop, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, and 1 others. 2022. Bloom: A 176b-parameter open-access multilingual language model. arXiv preprint arXiv:2211.05100.
|
| 173 |
+
|
| 174 |
+
Beilei Xiang, Changbing Yang, Yu Li, Alex Warstadt, and Katharina Kann. 2021. CLiMP: A benchmark for Chinese language model evaluation. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 2784-2790, Online. Association for Computational Linguistics.
|
| 175 |
+
|
| 176 |
+
Jérémy Zehr and Florian Schwarz. 2023. PennController for internet based experiments (IBEX).
|
| 177 |
+
|
| 178 |
+
# A Models Evaluated
|
| 179 |
+
|
| 180 |
+
# A.1 XLM-R
|
| 181 |
+
|
| 182 |
+
XLM-R (Conneau et al., 2019) is a multilingual masked language model (MLM) developed by Facebook. It is pretrained on trained on 2.5TB of filtered CommonCrawl data in 100 languages including Hindi. In this work, we are evaluating the base and large version of this model. XLM- $\mathbf{R}_{\mathrm{base}}$ has 12 layers, 768 hidden units, 12 attention heads, and 270M parameters where as XLM-R large has 24 layers, 1024 hidden units, 16 attention heads, and 550M parameters.
|
| 183 |
+
|
| 184 |
+
<table><tr><td>Type</td><td>Model</td><td>Tokens</td><td>Par</td></tr><tr><td rowspan="4">maked</td><td>XLM-Rbase</td><td>2.5TB</td><td>270M</td></tr><tr><td>XLM-Rlarge</td><td>2.5TB</td><td>550M</td></tr><tr><td>MuRIL</td><td>21B</td><td>236M</td></tr><tr><td>IndicBertv2</td><td>20.9B</td><td>278M</td></tr><tr><td>(monolingual)</td><td>HindBert</td><td>1.8B</td><td></td></tr><tr><td rowspan="3">causal</td><td>mGPT</td><td>46B & 442B</td><td>1.3B</td></tr><tr><td>Bloom560m</td><td>341B</td><td>560M</td></tr><tr><td>Bloom1.1b</td><td>341B</td><td>1.1B</td></tr></table>
|
| 185 |
+
|
| 186 |
+
Table 4: Models evaluated by training data size (in tokens) and number of parameters (Par). We couldn't find the exact number of parameters for HindBERT.
|
| 187 |
+
|
| 188 |
+
# A.2 MuRIL
|
| 189 |
+
|
| 190 |
+
MuRIL (Multilingual Representations for Indian Languages) (Khanuja et al., 2021) is a multilingual transformer-based language model developed by Google, specifically for Indian languages. It is based on the BERT architecture, with 12 layers, 12 attention heads, and 236 million parameters. MuRIL is trained on significantly large amounts of Indian text corpora across 16 Indian languages and English. It significantly outperforms mBERT on all tasks in XTREME benchmark (Hu et al., 2020).
|
| 191 |
+
|
| 192 |
+
# A.3 IndicBERT
|
| 193 |
+
|
| 194 |
+
IndicBERT (Kakwani et al., 2020) is a multilingual ALBERT-based language model developed by AI4Bharat, optimized for Indian languages. It has two versions and we are testing the version 2. IndicBERT v2 is trained on IndicCorp v2, an Indic monolingual corpus of 20.9 billion tokens, covering 24 Indian languages. The model has 12 encoder layers, 12 attention heads, and 278 million parameters.
|
| 195 |
+
|
| 196 |
+
# A.4 HindBERT
|
| 197 |
+
|
| 198 |
+
HindBERT (Joshi, 2022) is a monolingual BERT-based transformer model trained exclusively on Hindi by L3Cube. It is trained on around 1.8 billion Hindi tokens. The model has 12 layers and 12 attention heads, and the vocabulary size of 197285.
|
| 199 |
+
|
| 200 |
+
# A.5 mGPT
|
| 201 |
+
|
| 202 |
+
Multilingual GPT (mGPT) (Shliazhko et al., 2024) is a causal language model based on the GPT-3 architecture. It supports 61 languages, including several Indian languages, and the pretraining corpus size is 46B (Wikipedia), and 442B UTF characters (C4). There are two variants available for
|
| 203 |
+
|
| 204 |
+
<table><tr><td rowspan="2">Models</td><td colspan="3">DV</td><td colspan="3">SV</td><td colspan="3">No Case(E)</td><td colspan="3">No Case(I)</td></tr><tr><td>Intran</td><td>Tran</td><td>Ditran</td><td>Intran</td><td>Tran</td><td>Ditran</td><td>Intran</td><td>Tran</td><td>Ditran</td><td>Intran</td><td>Tran</td><td>Ditran</td></tr><tr><td>XLM-Rbase</td><td>64.3</td><td>69.6</td><td>80</td><td>75</td><td>43.5</td><td>0</td><td>57.1</td><td>17.4</td><td>0</td><td>75.0</td><td>52.2</td><td>0</td></tr><tr><td>XLM-Rlarge</td><td>85.7</td><td>91.3</td><td>100</td><td>82.1</td><td>47.8</td><td>20.0</td><td>60.7</td><td>47.8</td><td>40.0</td><td>89.3</td><td>56.5</td><td>20.0</td></tr><tr><td>MuRIL</td><td>78.6</td><td>95.6</td><td>80</td><td>78.6</td><td>78.3</td><td>60.0</td><td>53.6</td><td>47.8</td><td>40.0</td><td>71.4</td><td>69.6</td><td>40.0</td></tr><tr><td>IndicBERT</td><td>92.9</td><td>91.3</td><td>100</td><td>96.4</td><td>86.9</td><td>80.0</td><td>75</td><td>56.52</td><td>80.0</td><td>92.9</td><td>78.3</td><td>60.0</td></tr><tr><td>HindBERT</td><td>96.4</td><td>100</td><td>100</td><td>92.9</td><td>82.6</td><td>40.0</td><td>89.3</td><td>86.9</td><td>40.0</td><td>100</td><td>91.3</td><td>40.0</td></tr><tr><td>mGPT1.3b</td><td>42.9</td><td>65.2</td><td>60.0</td><td>53.6</td><td>8.7</td><td>0</td><td>21.4</td><td>13.0</td><td>0</td><td>53.6</td><td>8.7</td><td>0</td></tr><tr><td>BLOOM560m</td><td>50</td><td>69.6</td><td>60.0</td><td>53.6</td><td>39.1</td><td>0</td><td>14.3</td><td>4.3</td><td>0</td><td>53.6</td><td>39.1</td><td>0</td></tr><tr><td>BLOOM1.1b</td><td>71.4</td><td>78.3</td><td>80.0</td><td>75.0</td><td>60.9</td><td>0</td><td>28.6</td><td>21.7</td><td>0</td><td>75.0</td><td>60.9</td><td>0</td></tr></table>
|
| 205 |
+
|
| 206 |
+
Table 5: Average percentage accuracy of the LLMs on each experiment for different class of verbs
|
| 207 |
+
|
| 208 |
+
this model. In this work, we are evaluating only the small one with 1.3 billion parameters
|
| 209 |
+
|
| 210 |
+
# A.6 BLOOM
|
| 211 |
+
|
| 212 |
+
BLOOM (BigScience Large Open-science Open-access Multilingual Language Model) (Workshop et al., 2022) is a multilingual autoregressive transformer model developed by the BigScience project. It supports 46 natural languages, including many low-resource ones, and 13 programming languages. BLOOM is trained on the ROOTS corpus. The full model has 176 billion parameters but also has 5 small size variants. For our study, we test the 560 millions variant and the 1.1 billions variant.
|
| 213 |
+
|
| 214 |
+
# B Experiments with Humans
|
| 215 |
+
|
| 216 |
+
# B.1 Acceptability Task
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Figure 1: Example of a minimal. English translation: Arjun made Mohan catch a fish with net.
|
| 220 |
+
|
| 221 |
+
All the experiments for acceptability task were conducted using PCIBEX. Participants were given instruction about the task in both in Hindi and English. We explained that there are no risks involved in the task to each participant.
|
| 222 |
+
|
| 223 |
+
In each experiment they saw the minimal pair simultaneously as shown in Fig.1 and they were asked to choose the more grammatically acceptable sentence for each pair. We also included fillers and
|
| 224 |
+
|
| 225 |
+
practice sets. The order of main sentences and fillers was shuffled.
|
| 226 |
+
|
| 227 |
+
Participants for first experiment, Different verb, were aged 18-40. We collected the data in person using anonymous id for each one of them. We have 15 judgements for each pair in this experiment. The participants were paid according to our institution policy. For the remaining variants we collected data on the crowdsourcing platform Prolific. For each of these experiments the dataset consisted of 28 randomly sampled sentences. We collected 20 judgements on each pair. All the participants were self reported native Hindi speakers and they were paid in accordance with Prolific's fair compensation policies.
|
| 228 |
+
|
| 229 |
+
# B.2 Cloze Task
|
| 230 |
+
|
| 231 |
+
We collected human judgments on the completions produced by the two models. We presented each sentence prefix to 14 native speakers of Hindi on Prolific and provided them three options: the (gold) causative verb and the verbs predicted by IndicBERT and HindBERT. Participants were asked to choose the most appropriate completion for each sentence. The information sheet clearly mentioned that there are no risks involved in the study. All participants were self reported native speakers of Hindi and were paid in accordance with Prolific's fair compensation policies.
|
| 232 |
+
|
| 233 |
+
# C Class wise analysis for Verbs
|
| 234 |
+
|
| 235 |
+
In Table 5, we present evaluation results of verbs categorized as intransitives (Intran), transitives (Tran) and ditransitives (Ditran) for all the models.
|
abenchmarkforhindiverbargumentstructurealternations/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09ac23f9162d744fe2c95b8ecb7c57853f028d620214fec216b93bdce7e72e9d
|
| 3 |
+
size 234670
|
abenchmarkforhindiverbargumentstructurealternations/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3dd8b54f073e8e0cd44365f69745f91f1decbe19e14f6d1b1f2f7225b439ae7
|
| 3 |
+
size 214716
|
abenchmarkfortranslationsacrossstylesandlanguagevariants/c2e36fb1-0ec3-4187-926d-19b581e20525_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:047c7878e2c64e73fe92188afac12d8d54fa6b4382c156e79e7c5f14eb8fdf4f
|
| 3 |
+
size 97993
|
abenchmarkfortranslationsacrossstylesandlanguagevariants/c2e36fb1-0ec3-4187-926d-19b581e20525_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03e5a462c7d284d53d0b03ed5ae39e39c110114e03e2c1c52ea3dc2506c52b01
|
| 3 |
+
size 117307
|
abenchmarkfortranslationsacrossstylesandlanguagevariants/c2e36fb1-0ec3-4187-926d-19b581e20525_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebacc881893fa6132a887202076c536cede0cda4ff86b4447a47fe15a28536ca
|
| 3 |
+
size 803108
|
abenchmarkfortranslationsacrossstylesandlanguagevariants/full.md
ADDED
|
@@ -0,0 +1,401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Benchmark for Translations Across Styles and Language Variants
|
| 2 |
+
|
| 3 |
+
Xin Tan and Bowei Zou and Ai Ti Aw
|
| 4 |
+
|
| 5 |
+
Institute for Infocomm Research $(\mathrm{I}^2\mathrm{R})$ ,A\*STAR, Singapore
|
| 6 |
+
|
| 7 |
+
{tan_xin,zou_bowei,aaiti}@i2r.a-star.edu.sg
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
As machine translation (MT) rapidly advances in bridging global communication gaps, there is growing interest in variety-targeted translation for fine-grained language variants and specific translation styles. This translation variant aims to generate target outputs that are not only contextually accurate but also culturally sensitive. However, the lack of comprehensive evaluation benchmarks has hindered progress in this field. To bridge this gap, this work focuses on the translation across styles and language variants, aiming to establish a robust foundation for the automatic evaluation of fine-grained cultural and stylistic nuances, thereby fostering innovation in culturally sensitive translations. Specifically, we evaluate translations across four key dimensions: semantic preservation, cultural and regional specificity, expression style, and fluency at both the word and sentence levels. Through detailed human evaluations, we validate the high reliability of the proposed evaluation framework. On this basis, we thoroughly assess translations of state-of-the-art large language models (LLMs) for this task, highlighting their strengths and identifying areas for future improvement.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Machine Translation (MT) has made significant strides in breaking down communication barriers around the world, particularly for widely spoken languages like Chinese and English at a broad level. As MT technologies continue to advance, there is growing interest in variety-targeted translation, targeting fine-grained language variants such as regional dialects (Kumar et al., 2021; Riley et al., 2023), and specialized stylistic adaptations, including formality-aware MT (Niu et al., 2017, 2018; Wang et al., 2019) and personalized MT (Michel and Neubig, 2018; Vincent, 2021). This evolution in MT aims to ensure that translations are not only contextually accurate but also culturally sen
|
| 16 |
+
|
| 17 |
+
sitive, thereby facilitating cross-cultural communication (Yao et al., 2024). The emphasis on integrating translations with different regions, cultural contexts, and specific styles highlights the unique challenges of this task compared to general machine translation. As a result, traditional evaluation metrics such as BLEU are no longer adequate to measure the quality of these fine-grained translations (Riley et al., 2023). Progress in this area has been hampered by the lack of comprehensive, high-quality evaluation benchmarks to assess stylistic and cultural variations in translations.
|
| 18 |
+
|
| 19 |
+
To bridge this gap, this work explores automatic evaluation metrics for translations across styles and language variants. Specifically, we focus on the translation scenario from English to Chinese variants, targeting social media translations in Mainland Mandarin (zh_CN), Taiwanese Mandarin (zh_TW), and the web-minority Singaporean Mandarin (zh_SG). To comprehensively capture cultural and regional nuances as well as the desired expression style in translations, we assess translations at both word and sentence levels across four key dimensions: semantic preservation, cultural and regional specificity, expression style, and fluency. At the word level, we evaluate lexical terms that explicitly reflect regional and cultural nuances, focusing on: 1) models' ability to accurately understand and translate region-specific vocabulary; 2) the alignment of lexical choices in models' translations with local references, showcasing its grasp of domain- or culture-specific expression patterns. At the sentence level, we leverage implicit linguistic expression features to evaluate the model's overall performance in meaning preservation, regional cultural adaptation, and expression style transfer.
|
| 20 |
+
|
| 21 |
+
In summary, the key contributions of this work are three-fold:
|
| 22 |
+
|
| 23 |
+
- We develop and release a benchmark for the translation across styles and language variants,
|
| 24 |
+
|
| 25 |
+
featuring several automatic evaluation metrics from linguistic perspectives, along with test sets that are manually annotated with region-and style-specific words.1
|
| 26 |
+
|
| 27 |
+
- We conduct detailed human evaluation across multiple evaluation dimensions, verifying the strong consistency between human judgments and the automatic metrics, thereby ensuring the high reliability of the proposed evaluation framework.
|
| 28 |
+
- Using the proposed evaluation framework, we provide a comprehensive assessment of predictions generated by several state-of-the-art large language models (LLMs), highlighting their strengths in this task and identifying directions for future improvement.
|
| 29 |
+
|
| 30 |
+
# 2 Related Work
|
| 31 |
+
|
| 32 |
+
# 2.1 Variety-Targeted Machine Translation
|
| 33 |
+
|
| 34 |
+
Nowadays, variety-targeted MT work mainly focuses on regions and styles. Among these, region-aware MT targets specific regions or dialects (Zbib et al., 2012; Baniata et al., 2018; Costa-jussa et al., 2018; Honnet et al., 2018; Chakraborty et al., 2018; Lakew et al., 2018; Sajjad et al., 2020; Wan et al., 2020; Kumar et al., 2021). Style-targeted MT has explored several subtypes such as formality-aware MT (Niu et al., 2017, 2018; Wang et al., 2019), which focuses on different levels of formality, and personalized MT (Michel and Neubig, 2018; Vincent, 2021), which aims to match an individual's specific style. These efforts contribute to more contextually appropriate and user-centric translations.
|
| 35 |
+
|
| 36 |
+
# 2.2 Cross-Cultural and Stylistic Evaluation
|
| 37 |
+
|
| 38 |
+
Evaluation on translations across cultural and stylistic boundaries remains underexplored. Yao et al. (2024) address cultural evaluation by focusing on culture-specific items, Riley et al. (2023) examine regional lexical and terminological variations. However, they focus on vocabulary-level differences and overlook finer-grained cultural, regional, and stylistic nuances embedded in discourse patterns, idiomatic expressions. Besides, research in text style transfer (TST), which aims to modify the stylistic properties (such as formality, politeness, and sentiment) of a sentence while preserving its core meaning, sharing important parallels with
|
| 39 |
+
|
| 40 |
+
cross-cultural and -stylistic translation. Despite its contribution in evaluating content preservation, fluency, and style transfer (Li et al., 2018; Mir et al., 2019; Pryzant et al., 2020; Briakou et al., 2021), current TST evaluation remains limited in capturing cultural nuances.
|
| 41 |
+
|
| 42 |
+
To address these limitations, this work uniquely focuses on evaluating sensitivity to cross-cultural expressive styles, moving beyond superficial vocabulary differences. By capturing these nuances, our work introduces a comprehensive evaluation framework that goes beyond traditional MT metrics such as BLEU, providing a deeper assessment of the cultural adaptability and stylistic appropriateness of translations.
|
| 43 |
+
|
| 44 |
+
# 2.3 LLMs on Machine Translation
|
| 45 |
+
|
| 46 |
+
Large language models (LLMs), with billions of parameters and training on massive multilingual datasets, have shown promising results in the domain of MT. In addition to LLMs with strong multilingual translation capabilities, such as GPT-4o² and models designed specifically for translation-related tasks like TowerInstruct³, there is a growing body of work exploring the translation capabilities of LLMs, particularly through techniques like fine-tuning, prompt engineering, and domain adaptation (Zhang et al., 2023; Bawden and Yvon, 2023; Vilar et al., 2023; Hendy et al., 2023; Lu et al., 2024; Zhu et al., 2024a; Zeng et al., 2024; Zhu et al., 2024b). The field of MT has undergone a dramatic transformation, achieving remarkable improvements in both fluency and contextual accuracy, steadily breaking down language barriers.
|
| 47 |
+
|
| 48 |
+
In contrast, traditional NMT systems lag behind LLMs, especially in variety-targeted MT, where the scarcity of large-scale training data limits their performance. Given this gap, this work focuses exclusively on LLMs, analyzing their relative strengths and limitations in facing linguistic diversity.
|
| 49 |
+
|
| 50 |
+
# 3 Variety-Targeted MT across Styles and Language Variants
|
| 51 |
+
|
| 52 |
+
# 3.1 Task Definition
|
| 53 |
+
|
| 54 |
+
General MT translates between coarse-grained language sentences. Given a source sentence $X = (x_{1}, x_{2}, \dots, x_{n})$ , a translation model generates the
|
| 55 |
+
|
| 56 |
+
<table><tr><td></td><td>General MT</td><td>Variety-Targeted MT across Styles and Languages</td></tr><tr><td rowspan="2">Translation Language</td><td>Coarse-grained languages.</td><td>Fine-grained language variants (regional dialects).</td></tr><tr><td>E.g., Chinese, English</td><td>E.g., Singaporean Mandarin, Taiwanese Mandarin</td></tr><tr><td>Translation Style</td><td>Remain source style</td><td>Specific style different from Source</td></tr><tr><td>Translation Focus</td><td>Word by word translation</td><td>Semantic translation</td></tr></table>
|
| 57 |
+
|
| 58 |
+
Table 1: A comparison of general and variety-targeted MT.
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
Figure 1: Four evaluation dimensions and their manifests at the word and sentence levels.
|
| 62 |
+
|
| 63 |
+
corresponding target sentence $\hat{Y} = (\hat{y}_1, \hat{y}_2, \dots, \hat{y}_m)$ , prioritizing the semantic accuracy of the words.
|
| 64 |
+
|
| 65 |
+
In contrast, Variety-targeted MT goes beyond content preservation, adapting the source sentence $X = (x_{1}, x_{2}, \dots, x_{n})$ into a target sentence $Y_{T}^{ES} = (y_{1}, y_{2}, \dots, y_{k})$ that retains the same semantic meaning while incorporating a distinct style $ES$ suited to regional dialects or fine-grained language variants. Table 1 outlines the core differences. While general MT emphasizes literal or meaning-preserving translation between standard languages, variety-targeted MT demands context-sensitive adaptation at both the lexical and stylistic levels. This distinction makes it more challenging: the model must infer implicit style and variant cues and produce outputs that satisfy both semantic fidelity and stylistic conformity. This paper focuses on Chinese variants in social media scenarios, where style transformation involves: a) using appropriate slang and colloquialisms; b) adopting typical social media discourse patterns; and c) reflecting the cultural norms and sensitivities.
|
| 66 |
+
|
| 67 |
+
# 3.2 Evaluation Criteria
|
| 68 |
+
|
| 69 |
+
To evaluate whether a translation aligns with the intended cultural context, regional variation, and
|
| 70 |
+
|
| 71 |
+
stylistic requirements, we assess outputs across four key dimensions: 1) Semantic Preservation. How well the core meaning of the source sentence is retained in the translation. 2) Cultural and Regional Specificity. Whether the translation reflects the appropriate regional dialect and culturally relevant expressions. 3) Expression Style. The degree to which the translation adopts target style, particularly social media discourse patterns and informal tone. 4) Fluency. The overall naturalness, grammaticality, and readability of the translation. These dimensions are assessed at both the word and sentence levels, as illustrated in Figure 1. Specifically: At the word level, we evaluate:
|
| 72 |
+
|
| 73 |
+
- Region-specific lexical term translation. The ability of a model to correctly translate region-specific vocabulary.
|
| 74 |
+
- Vocabulary similarity. The alignment of lexical choices with culturally preferred or regionally conventional terms.
|
| 75 |
+
|
| 76 |
+
At the sentence level, we assess:
|
| 77 |
+
|
| 78 |
+
- Semantic preservation. The extent to which the sentence meaning is retained.
|
| 79 |
+
- Cultural and style adaptation. The implicit adaptation of tone, idiomatic usage, and cultural references.
|
| 80 |
+
- Fluency. The sentence's coherence and grammatical correctness.
|
| 81 |
+
|
| 82 |
+
The dual-level evaluation provides a holistic view of both explicit lexical choices and implicit contextual appropriateness, to ensure that translations are not only accurate but also stylistically and culturally resonant.
|
| 83 |
+
|
| 84 |
+
# 3.3 Evaluation Metrics
|
| 85 |
+
|
| 86 |
+
To operationalize the five evaluation dimensions introduced above, we propose a set of automatic metrics.
|
| 87 |
+
|
| 88 |
+
Region-Specific Lexical Term Translation. Certain regions use unique lexical terms influenced by local culture. For example, in Singaporean Mandarin, the term "多多" refers to a lottery gaming activity. To assess whether the model correctly translates culturally or regionally distinctive terms, we annotate region-specific terms in the reference translations (refer to 3.5 for details) and calculate the match ratio between model output and reference. It allows for partial matches in semantically equivalent variants. For example, "多多" (ToTo) and "多多彩票" (ToTo lottery) share the same meaning, we allow partial matches to ensure evaluation flexibility.
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\operatorname {s c o r e} _ {W R} = \frac {N _ {L _ {\text {m a t c h}}}}{N _ {L _ {\text {m a t c h}}} + N _ {L _ {\text {m i s m a t c h}}}}, \tag {1}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $N_{L\_match}$ and $N_{L\_mismatch}$ are the numbers of correctly and incorrectly translated annotated terms, respectively.
|
| 95 |
+
|
| 96 |
+
Vocabulary Similarity. Beyond marked terms, we assess how well the model aligns with region-preferred vocabulary. For instance, the expressions "一杯烧咖啡" in Singaporean Mandarin and "一杯热咖啡" in Mainland Mandarin both convey "a cup of hot coffee", but the terms "烧" and "热" are contextually fixed to their respective regions, reflecting distinct linguistic conventions. Key content words in the reference $r_i$ and hypothesis $h_i$ are identified using TF-IDF vectors<sup>4</sup>, and a weighted match score is calculated as:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
M a t c h \left(h _ {i}, r _ {i}\right) = \frac {N _ {V \_ m a t c h}}{N _ {V \_ m a t c h} + N _ {V \_ m i s m a t c h}}, \tag {2}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
where $N_{V\_match}$ and $N_{V\_mismatch}$ denote the number of key content words in the reference that are matched and unmatched in the hypothesis, respectively. While vocabulary similarity (e.g., word overlap) is useful, it may fail to capture semantically equivalent expressions. To mitigate this limitation, we incorporate semantic similarity, measured by TF-IDF vector cosine similarity $(sim)^5$ , as a penalty weight to adjust the lexical match score. After empirical experiments, a threshold of 0.7 (very similar) is used:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\operatorname {s e n t} _ {\text {s c o r e}} = \left\{ \begin{array}{l} \operatorname {M a t c h} \left(h _ {i}, r _ {i}\right), \text {i f} \operatorname {s i m} \geq 0. 7 \\ \operatorname {s i m} \cdot \operatorname {M a t c h} \left(h _ {i}, r _ {i}\right), \text {o t h e r w i s e} \end{array} \right. \tag {3}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
The final score is averaged at the sentence level across the corpus:
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\operatorname {s c o r e} _ {W V} = \left(\sum \operatorname {s e n t} _ {\text {s c o r e}}\right) / N \tag {4}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
Semantic Preservation. Semantic preservation measures the similarity in content between reference translations and system-generated outputs. In general MT tasks, where high word-level overlaps are often required, BLEU (Papineni et al., 2002) is commonly employed as it evaluates $n$ -gram overlaps between system outputs and reference translations. However, variety-targeted MT frequently involves variations in word choice and word order while preserving semantic meaning, which limits BLEU's effectiveness due to its inability to account for reordered words. In contrast, $chrF$ (Popovic, 2015), which evaluates character $n$ -gram F-scores, has demonstrated a strong correlation with human judgments in the TST tasks (Briakou et al., 2021). Its ability to capture nuanced linguistic differences makes it well-suited for evaluating semantic preservation.
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\operatorname {s c o r e} _ {S S} = \left(\sum c h r F \left(r _ {i}, h _ {i}\right)\right) / N \tag {5}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
Cultural and Style Adaptation. Beyond explicit lexical elements, implicit features within contextual sentences play a key role in shaping subtle cultural nuances and stylistic traits. To automatically extract these features for assessing Cultural and Style Adaptation, we leverage a language model (LM) to classify whether translations satisfy the expected cultural and expressive style, inspired by the success of TST (Rao and Tetreault, 2018; Briakou et al., 2021). We fine-tune XLM-R $^6$ (Conneau et al., 2020), a multilingual pre-trained language model, using both human-written news and social media sentences in zh_CN, zh_SG, and zh_TW language variants (see Appendix A.1 for fine-tuning details). The fine-tuned XLM-R serves as a classifier $C$ , which predicts the accuracy of model-generated translations $r_i$ aligning with the desired language variant and expression style $ES$ , as follows:
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
s c o r e _ {S C} = \left(\sum N _ {C \left(r _ {i}\right) = E S}\right) / N \tag {6}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
Fluency. Fluency, also referred to as grammaticality, readability, and naturalness of a sentence (Mir et al., 2019), plays a crucial role in evaluating translation quality. Previous work on
|
| 127 |
+
|
| 128 |
+
TST has validated fluency evaluation by measuring perplexity and likelihood scores (PPL) based on the probability distributions of language models (LMs) applied to model-generated outputs (Pang and Gimpel, 2019). In particular, (Briakou et al., 2021) demonstrated strong correlations with human judgments using pseudo-likelihood scores (PSEUDO-LL) derived from pre-trained masked XLM-R models<sup>7</sup>. Inspired by this, we adopt PSEUDO-LL for fluency evaluation of translations. Given PSEUDO-LL score $P_{i}$ for each translation, we employ min-max normalization to obtain the corpus-level score:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\operatorname {S c o r e} _ {S F} = \left(\sum \frac {P _ {i} - \min (P)}{\max (P) - \min (P)}\right) / N \tag {7}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
# 3.4 Evaluation Scenarios
|
| 135 |
+
|
| 136 |
+
Overall Assessment. The metrics described above reflect distinct aspects of the translations individually. To comprehensively evaluate the model's performance, it is essential to consider these metrics collectively, integrating their insights to provide a holistic assessment. To achieve this, we propose a combination method that rewards consistency across individual scores while penalizing substantial imbalances among them. Specifically, we first normalize the individual scores using min-max scaling to ensure all metrics are scaled to the same range and thus directly comparable. Additionally, we introduce a penalty term $p_{o}$ for the fusion of metrics from different perspectives. It is calculated as the mean absolute deviation (MAD) of the individual normalized scores $\hat{Score}_i$ ( $i \in \{WR, WV, SS, SC, SF\}$ ) from their mean value $\overline{Score}$ :
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
p _ {o} = \left(\sum \left| \hat {S} c o r e _ {i} - \bar {S} c o r e \right|\right) / 5 \tag {8}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
This penalty term highlights discrepancies between the metrics, ensuring a balanced and fair evaluation across different dimensions of translation quality. With the penalty term, we define the final overall score $F_{O}$ as:
|
| 143 |
+
|
| 144 |
+
$$
|
| 145 |
+
F _ {o} = \left(\sum \hat {S} \operatorname {c o r e} _ {i} - \omega \cdot p _ {o}\right) / 5 \tag {9}
|
| 146 |
+
$$
|
| 147 |
+
|
| 148 |
+
where $\omega$ is a penalty weight<sup>9</sup>.
|
| 149 |
+
|
| 150 |
+
While we encourage using the overall score $F_{o}$ for a comprehensive assessment of translation quality, we also recognize that variety-targeted translation tasks may have varying requirements and
|
| 151 |
+
|
| 152 |
+
<table><tr><td>Language</td><td>Sent Num.</td><td>Avg Ref Len.</td><td>Lexical Num.</td></tr><tr><td>zh_CN</td><td>200</td><td>36.83</td><td>240</td></tr><tr><td>zh_TW</td><td>200</td><td>28.93</td><td>209</td></tr><tr><td>zh_SG</td><td>200</td><td>52.42</td><td>254</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Table 2: Statistic on test sets. "Lexical Num." refers to the number of annotated region-specific lexical terms.
|
| 155 |
+
|
| 156 |
+
that test sets in other languages may present unique challenges. Therefore, we provide additional assessments tailored to specific needs as following.
|
| 157 |
+
|
| 158 |
+
Word-Level Assessment. Evaluation metrics for Region-Specific Lexical Term Translation $(Score_{WR})$ and Vocabulary Similarity $(Score_{WV})$ provide detailed insights into translation quality at the lexical level. Together, these metrics offer complementary perspectives on the lexical fidelity and appropriateness of the translations, enabling a thorough word-level evaluation. Similar to overall assessment, to mitigate large discrepancies among the individual scores, we introduce the penalty term $p_w$ , computed among normalized scores $\hat{Score}_w \in \{\hat{Score}_{WR}, \hat{Score}_{WV}\}$ . And the word-level score is then calculated as:
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
F _ {w} = \left(\sum \hat {S c o r e} _ {w} - \omega \cdot p _ {w}\right) / 2 \tag {10}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
Sentence-Level Assessment. Evaluation metrics for Semantic Preservation $(Score_{SS})$ , Cultural and Style Adaptation $(Score_{SC})$ , and Fluency $(Score_{SF})$ together provide a comprehensive evaluation of sentence-level quality, reflecting both accuracy of the translation and the appropriateness of the cultural and style. Therefore, sentence-level score is computed based on the normalized individual scores $\hat{Score}_s \in \{\hat{Score}_{SS}, \hat{Score}_{SC}, \hat{Score}_{SF}\}$ and the penalty term $p_s$ , calculated to account for discrepancies among these scores:
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
F _ {s} = \left(\sum \hat {S} c o r e _ {s} - \omega \cdot p _ {s}\right) / 3 \tag {11}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
Content Preservation Assessment. Beyond word- and sentence-level assessments, we also evaluate the preservation of overall content. This is achieved by combining the normalized Semantic Preservation score $\hat{Score}_{SS}$ and Region-Specific Lexical Term Translation score $\hat{Score}_{WR}$ , capturing meaning preservation at both the sentence and word levels:
|
| 171 |
+
|
| 172 |
+
$$
|
| 173 |
+
F _ {c} = \operatorname {a v g} \left(\hat {S c o r e} _ {S S}, \hat {S c o r e} _ {W R}\right) \tag {12}
|
| 174 |
+
$$
|
| 175 |
+
|
| 176 |
+
<table><tr><td>Prompt</td><td>{0}</td></tr><tr><td>Please perform region-aware formality-controlled translation on the following input by translating it into the style of {0}. Output translation only.
|
| 177 |
+
Input: en_src
|
| 178 |
+
Output: ref
|
| 179 |
+
>>>»
|
| 180 |
+
Input: en_src
|
| 181 |
+
Output: >>>»</td><td>Informal Mainland Mandarin,
|
| 182 |
+
i.e., speak Chinese on social media like people in Mainland China.
|
| 183 |
+
Informal Taiwan Mandarin,
|
| 184 |
+
i.e., speak Chinese on social media like people in Taiwan area.
|
| 185 |
+
Informal Singaporean Mandarin,
|
| 186 |
+
i.e., speak Chinese on social media like Singaporeans.</td></tr></table>
|
| 187 |
+
|
| 188 |
+
Table 3: Prompt used for translation generation.
|
| 189 |
+
|
| 190 |
+
# 3.5 Evaluation Sets
|
| 191 |
+
|
| 192 |
+
Social media language varies widely in different platforms, showcasing different dialects, slang, and idiomatic expressions that are unique to various cultural groups. To evaluate the sensitivity of translations across language variants and styles, we construct test sets for translation scenarios from English to social media style Mainland Mandarin (zh_CN), Taiwanese Mandarin (zh_TW), and Singaporean Mandarin (zh_SG) (mainly involves gossip and daily life domains). Specifically, we collect locally written sentences from social media platforms: zh_CN samples are sourced from Zhihu<sup>10</sup>, zh_TW samples from PTT<sup>11</sup>, and zh_SG samples from Facebook<sup>12</sup>. Two paid professional translators are hired to translate the social media sentences into English, creating corresponding en-zh_* sentence pairs<sup>13</sup>. To ensure the validity of word-level evaluation, region-specific lexical terms differing across regions are annotated based on online resources<sup>14</sup> and the expertise of the translators.
|
| 193 |
+
|
| 194 |
+
As a result, we construct three test sets, with detailed statistics provided in Table 2.
|
| 195 |
+
|
| 196 |
+
# 3.6 Human Judgments
|
| 197 |
+
|
| 198 |
+
To verify the alignment between human judgments and each of automatic evaluation metrics, we collect human ratings as follows:
|
| 199 |
+
|
| 200 |
+
- For Semantic Preservation, we adopt the Semantic Textual Similarity (STS) annotation scheme (Agirre et al., 2016). Model outputs are rated on a scale from 1 to 6 based on their degree of semantic similarity to the reference.
|
| 201 |
+
|
| 202 |
+
The levels are: Completely dissimilar, Not equivalent but on same topic, Not equivalent but share some details, Roughly equivalent, Mostly equivalent, Completely equivalent.
|
| 203 |
+
|
| 204 |
+
- For Cultural and Style Adaptation, translations are annotated with both the language variant (zh_CN, zh_TW, zh_SG) and the level of style (news or social media).
|
| 205 |
+
- For Fluency, model outputs are rated on a discrete scale from 1 to 5 to indicate fluency degree (Heilman et al., 2014). The levels are: Other, Incomprehensible, Somewhat comprehensible, Comprehensible, Perfect.
|
| 206 |
+
- For Region-Specific Lexical Term Translation, binary labels (0 and 1) are used to indicate whether the marked lexical term in the translation matches the reference.
|
| 207 |
+
- For Vocabulary Similarity, we rate the model outputs on a discrete scale from 1 to 5 based on the degree of lexical similarity with the reference. The levels are: Completely dissimilar, Slightly similar, Moderately similar, Very similar, Identical.
|
| 208 |
+
|
| 209 |
+
The alignment between human judgments and automatic metrics is reported in Section 4.2.
|
| 210 |
+
|
| 211 |
+
# 4 Experimentation
|
| 212 |
+
|
| 213 |
+
# 4.1 Experimental Settings
|
| 214 |
+
|
| 215 |
+
Models. We evaluate several LLMs to verify the consistency between automatic metrics and human judgments. The selected models include the most advanced GPT-4o (2024-05-13) (OpenAI, 2024), open Llama Family (Llama3, 2024): Llama-3-8B-Instruct and Llama-3.2-3B-Instruct, Chinese and MT oriented LLMs: TowerInstruct-7b-v0.2 (Alves et al., 2024), QWen2.5-7B-Instruct (Qwen, 2025),
|
| 216 |
+
|
| 217 |
+
<table><tr><td></td><td>Semantic Preservation</td><td>Vocabulary Similarity</td><td>Fluency</td><td>Region-Specific Lexical Term Translation</td><td>Culture and Style Adaptation</td></tr><tr><td>Spearman's ρ</td><td>0.57</td><td>0.61</td><td>0.60</td><td>-</td><td>-</td></tr><tr><td>Cohen's κ</td><td>-</td><td>-</td><td>-</td><td>0.90</td><td>0.79</td></tr></table>
|
| 218 |
+
|
| 219 |
+
Table 4: Correlation between human judgments and automatic evaluation metrics. Spearman's $\rho$ is used to measure discrete human ratings and continuous metric scores; Cohen's $\kappa$ is used to measure discrete human and metric ratings.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Figure 2: Comparison of individual evaluation metrics across three translation scenarios.
|
| 223 |
+
|
| 224 |
+
gemma-2-9b-it (Gemma, 2024), aya-expanse8b (Aya, 2024), and Llama3-Chinese-8B-Instructv3 (Cui et al., 2024).
|
| 225 |
+
|
| 226 |
+
Parameters. For all the LLMs, cutoff_len=256 and do_sample=False during generation to reduce hallucinations and ensure deterministic outputs.
|
| 227 |
+
|
| 228 |
+
**Prompts.** We generate translations with 1-shot in-context learning. Table 3 lists the prompt used for this task.
|
| 229 |
+
|
| 230 |
+
# 4.2 Correlation Evaluation
|
| 231 |
+
|
| 232 |
+
We recruit three paid annotators, all familiar with both English and the Chinese variants, to evaluate the translation outputs of the aforementioned LLMs. The evaluation is conducted across three scenarios: en-zh_CN, en-zh_TW, and en-zh_SG. Each annotator assesses 50 randomly selected translations for each scenario, as described in Section 3.6. The annotations exhibit moderate interannotator agreement, ensuring the reliability of the human evaluation process. Table 4 reports the average correlation scores across annotators and the automatic metrics for a total of 150 selected translations.
|
| 233 |
+
|
| 234 |
+
For Semantic Preservation, Vocabulary Similarity, and Fluency metrics, we calculate the Spearman's $\rho$ between human-annotated discrete scale labels and metrics-generated continuous scores. The
|
| 235 |
+
|
| 236 |
+
correlation scores for these metrics all exceed 0.55, demonstrating a positive relationship between human and automatic evaluations. Additionally, a heatmap illustrating these correlation scores for each region is provided in Appendix A.2. For Region-Specific Lexical Term Translation and Cultural and Style Adaptation metrics, we compute Cohen's $\kappa$ between human and metric-annotated discrete labels. The results indicate that the Kappa score for Cultural and Style Adaptation falls within substantial agreement (0.61-0.80). Notably, the correlation between human and metric evaluations for Region-Specific Lexical Term Translation achieves near-perfect agreement. Additionally, for Cultural and Style Adaptation indicator, we further assess correlations separately for language variant classification and expression style classification. The model's scores on $F_{1}$ for these classifications reach 93.24 and 91.70, respectively. Moreover, we analyze the translations with GEMBA-MQM (Kocmi and Federmann, 2023) and provide analysis examples in Appendix A.3.
|
| 237 |
+
|
| 238 |
+
All in all, these results highlight a strong alignment between human evaluations and automatic metrics, verifying the reliability of the proposed evaluation framework.
|
| 239 |
+
|
| 240 |
+
Moreover, we examine the independence and complementarity of the proposed metrics through the cross-metric Pearson correlation. The analysis in Appendix A.4 shows that these metrics are distinct yet correlated within a hierarchical assessment framework for translation quality, reflecting their ability to independently assess different aspects of translation while jointly contributing to the overall quality.
|
| 241 |
+
|
| 242 |
+
# 4.3 Analysis of LLM Gap in Cultural Language Understanding and Generation
|
| 243 |
+
|
| 244 |
+
We evaluate several recent LLMs on this task, grouping them into three categories for performance comparison in Table 5.
|
| 245 |
+
|
| 246 |
+
Comparing results across the three translation scenarios, LLMs generally perform better on en-zh_CN translations (average $F_{o} =$
|
| 247 |
+
|
| 248 |
+
<table><tr><td></td><td>Model</td><td>Overall (Fo)</td><td>Sentence-Level (Fs)</td><td>Word-Level (Fw)</td><td>Content Preservation (Fc)</td></tr><tr><td rowspan="8">en-zh_CN</td><td>GPT-4o</td><td>51.66</td><td>60.21</td><td>47.58</td><td>35.27</td></tr><tr><td>Llama3</td><td>33.75</td><td>52.08</td><td>23.29</td><td>16.57</td></tr><tr><td>Llama3.2</td><td>24.87</td><td>42.97</td><td>14.68</td><td>10.23</td></tr><tr><td>TowerInstruct-v0.2</td><td>31.16</td><td>48.68</td><td>20.56</td><td>14.82</td></tr><tr><td>Qwen2.5</td><td>40.05</td><td>53.30</td><td>30.99</td><td>21.07</td></tr><tr><td>Gemma2</td><td>44.58</td><td>55.62</td><td>39.19</td><td>27.40</td></tr><tr><td>Aya</td><td>35.34</td><td>50.59</td><td>25.76</td><td>17.01</td></tr><tr><td>Llama3-Chinese</td><td>36.88</td><td>55.83</td><td>25.79</td><td>18.45</td></tr><tr><td rowspan="8">en-zh_TW</td><td>GPT-4o</td><td>42.07</td><td>48.96</td><td>49.12</td><td>39.62</td></tr><tr><td>Llama3</td><td>21.90</td><td>39.14</td><td>23.04</td><td>15.88</td></tr><tr><td>Llama3.2</td><td>22.50</td><td>45.17</td><td>16.28</td><td>9.61</td></tr><tr><td>TowerInstruct0.2</td><td>19.40</td><td>37.02</td><td>19.61</td><td>12.15</td></tr><tr><td>Qwen2.5</td><td>25.49</td><td>39.69</td><td>28.19</td><td>18.74</td></tr><tr><td>Gemma2</td><td>41.72</td><td>52.68</td><td>42.07</td><td>35.56</td></tr><tr><td>Aya</td><td>21.98</td><td>35.78</td><td>26.52</td><td>17.70</td></tr><tr><td>Llama3-Chinese</td><td>26.56</td><td>40.99</td><td>29.71</td><td>22.10</td></tr><tr><td rowspan="8">en-zh_SG</td><td>GPT-4o</td><td>44.47</td><td>50.61</td><td>49.60</td><td>38.97</td></tr><tr><td>Llama3</td><td>27.62</td><td>47.26</td><td>19.50</td><td>14.64</td></tr><tr><td>Llama3.2</td><td>25.25</td><td>56.06</td><td>13.82</td><td>9.75</td></tr><tr><td>TowerInstruct0.2</td><td>28.77</td><td>54.69</td><td>20.93</td><td>14.27</td></tr><tr><td>Qwen2.5</td><td>33.51</td><td>48.45</td><td>29.56</td><td>20.64</td></tr><tr><td>Gemma2</td><td>32.92</td><td>50.67</td><td>24.50</td><td>17.56</td></tr><tr><td>Aya</td><td>27.47</td><td>41.68</td><td>26.46</td><td>17.01</td></tr><tr><td>Llama3-Chinese</td><td>28.20</td><td>44.09</td><td>23.76</td><td>16.29</td></tr></table>
|
| 249 |
+
|
| 250 |
+
Table 5: Results of evaluation metrics on diverse evaluation scenarios. All p-values (paired t-test) $\leq 0.05$
|
| 251 |
+
|
| 252 |
+

|
| 253 |
+
Figure 3: Comparison of individual metrics within each translation scenario.
|
| 254 |
+
|
| 255 |
+
37.29, $F_{s} = 52.41$ than on en-zh_TW (average $F_{o} = 27.20$ , $F_{s} = 42.43$ ) and en-zh_SG (average $F_{o} = 31.03$ , $F_{s} = 49.18$ ). Given GPT-4o's consistently strong performance across scenarios, we visualize its individual metric results in Figure 2 to examine its strengths and limitations. The figure shows that GPT-4o notably excels in sentence-level Cultural and style Adaptation for en-zh_CN translations, explaining its higher overall and sentence-level scores compared to en-zh_SG and -zh_TW. This advantages likely stems from training data predominantly composed of Mainland Mandarin, with limited exposure to Singaporean and Taiwanese Mandarin varieties. Meanwhile, GPT-4o's performance on other metrics remains relatively modest and consistent across all scenarios, revealing a key limitation in handling evolving slang and localized discourse practices across diverse cultural settings.
|
| 256 |
+
|
| 257 |
+
nario, we find that beyond GPT-4o's strong performance, Chinese and MT oriented LLMs (third group in each scenario) exhibit a clear advantage over general open models (Llama3 and Llama3.2) in capturing cross-cultural nuances, with Gemma2 being particularly notable. To further reveal the challenges faced by LLMs in this task, we visualize their performance across individual evaluation metrics in Figure $3^{15}$ . While a few models show promise in identify cross-cultural discourse pattern and idiomatic expressions (Cultural and style Adaptation), most struggle with word-level cultural nuances (vocabulary Similarity, Region-Specific Vocabulary Term Translation), reflecting insufficient background knowledge of LLMs. More importantly, Figure 3 reveals a fundamental and ongoing challenge: achieving cultural and stylistic adaptation without compromising semantic adequacy in
|
| 258 |
+
|
| 259 |
+
Comparing results within each translation sce
|
| 260 |
+
|
| 261 |
+
cross-cultural and style-sensitive MT. This imbalance underscores the need for future work to effectively balance meaning preservation and culturally-aware adaptation to advance the development of translations across style and culture.
|
| 262 |
+
|
| 263 |
+
# 5 Conclusion
|
| 264 |
+
|
| 265 |
+
To fill the gap in a thorough evaluation of variety-targeted machine translation, this work proposes a benchmark for automatically assessing machine translation across language variants and styles. A detailed human assessment validates the high reliability of the proposed evaluation framework. Leveraging the proposed metrics, we perform a comprehensive evaluation of recent LLMs on this task and highlight key challenges for future research.
|
| 266 |
+
|
| 267 |
+
# 6 Limitations
|
| 268 |
+
|
| 269 |
+
We identify four main limitations of the proposed metrics:
|
| 270 |
+
|
| 271 |
+
Firstly, this study proposes an evaluation framework and test sets covering three Chinese variants: abundant Mainland Mandarin, few-shot Singaporean Mandarin, and Taiwanese Mandarin. These Chinese variants provide a rich testbed due to their distinct lexical, stylistic, and cultural differences. By establishing this comprehensive evaluation framework, we aim to lay the foundation for adapting the metric to other language pairs in the future. In particular, we plan to explore diverse language families, such as European Portuguese vs. Brazilian Portuguese, Canadian French vs. European French, which exhibit structural and cultural distinctions different from Chinese, thereby broadening the applicability of the metric. To achieve that, we plan to implement word-level metrics in a human-in-the-loop workflow: 1) Leveraging large region-specific corpora to automatically identify candidate dialectal terms using statistical methods such as PMI to detect words strongly associated with a specific region and 2) Automatically generating candidate lists for human annotators for efficient validation and refinement to maintain high-quality standards. Additionally, while the current test set is carefully curated with an emphasis on quality and detailed annotations (Sections 3.5) capture subtle phenomena like cultural and stylistic adaptation, we acknowledge the importance of scaling it further. Moving forward, we will continue to expand the test set and advance this line of research.
|
| 272 |
+
|
| 273 |
+
Secondly, despite our careful selection of source texts from local social media content and professional translation efforts to preserve style, cultural context, and dialectal features, translating already translated texts may still pose limitations in fidelity and naturalness. However, this also implies that although LLMs may have seen the original Chinese posts from Zhihu, PTT, or Facebook in their training data, it is highly unlikely that they were exposed to the professionally translated English source sentences we specifically created for the benchmark, which minimizes the risk of data contamination and helps ensure the reliability of the experimental results.
|
| 274 |
+
|
| 275 |
+
Thirdly, while the framework focuses on cultural and expression style transfer, variety-targeted machine translation encompasses a broader spectrum of styles, such as politeness and personalized tones. The current approach does not account for all these styles, limiting its ability to evaluate customized translations comprehensively.
|
| 276 |
+
|
| 277 |
+
Fourthly, we rely on in-context learning to assess large language models (LLMs) rather than finetuned models specifically optimized for this task. As a result, the LLMs' potential performance may not be fully reflected in the evaluation.
|
| 278 |
+
|
| 279 |
+
# Acknowledgments
|
| 280 |
+
|
| 281 |
+
This research is supported by the National Research Foundation, Singapore under its National Large Language Models Funding Initiative. Any opinions, findings, conclusions, or recommendations expressed in this material are those of the author(s) and do not reflect the views of the National Research Foundation, Singapore.
|
| 282 |
+
|
| 283 |
+
# References
|
| 284 |
+
|
| 285 |
+
Eneko Agirre, Carmen Banea, Daniel Cer, Mona Diab, Aitor Gonzalez-Agirre, Rada Mihalcea, German Rigau, and Janyce Wiebe. 2016. SemEval-2016 task 1: Semantic textual similarity, monolingual and cross-lingual evaluation. In Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016), pages 497-511, San Diego, California. Association for Computational Linguistics.
|
| 286 |
+
Duarte M Alves, José Pombal, Nuno M Guerreiro, Pedro H Martins, João Alves, Amin Farajian, Ben Pe- ters, Ricardo Rei, Patrick Fernandes, Sweta Agrawal, et al. 2024. Tower: An open multilingual large language model for translation-related tasks. arXiv preprint arXiv:2402.17733.
|
| 287 |
+
|
| 288 |
+
Aya. 2024. Aya expanse: Combining research breakthroughs for a new multilingual frontier. Preprint, arXiv:2412.04261.
|
| 289 |
+
Laith H. Baniata, Se-Young Park, and Seong-Bae Park. 2018. A neural machine translation model for arabic dialects that utilises multitask learning (mtl). Computational Intelligence and Neuroscience, 2018.
|
| 290 |
+
Rachel Bawden and François Yvon. 2023. Investigating the translation performance of a large multilingual language model: the case of BLOOM. In Proceedings of the 24th Annual Conference of the European Association for Machine Translation, pages 157-170, Tampere, Finland. European Association for Machine Translation.
|
| 291 |
+
Eleftheria Briakou, Sweta Agrawal, Joel Tetreault, and Marine Carpuat. 2021. Evaluating the evaluation metrics for style transfer: A case study in multilingual formality transfer. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1321-1336, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 292 |
+
Saurav Chakraborty, Anup Sinha, and Sanghamitra Nath. 2018. A bengali-sylheti rule-based dialect translation system: Proposal and preliminary system. In Proceedings of the International Conference on Computing and Communication Systems: I3CS 2016, NEHU, Shillong, India, pages 451-460. Springer.
|
| 293 |
+
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishray Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8440-8451, Online. Association for Computational Linguistics.
|
| 294 |
+
Marta R. Costa-jussà, Marcos Zampieri, and Santanu Pal. 2018. A neural approach to language variety translation. In Proceedings of the Fifth Workshop on NLP for Similar Languages, Varieties and Dialects (VarDial 2018), pages 275-282, Santa Fe, New Mexico, USA. Association for Computational Linguistics.
|
| 295 |
+
Yiming Cui, Ziqing Yang, and Xin Yao. 2024. Efficient and effective text encoding for chinese llama and alpaca. Preprint, arXiv:2304.08177.
|
| 296 |
+
Gemma. 2024. Gemma 2: Improving open language models at a practical size. Preprint, arXiv:2408.00118.
|
| 297 |
+
Michael Heilman, Aoife Cahill, Nitin Madnani, Melissa Lopez, Matthew Mulholland, and Joel Tetreault. 2014. Predicting grammaticality on an ordinal scale. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 174-180, Baltimore, Maryland. Association for Computational Linguistics.
|
| 298 |
+
|
| 299 |
+
Amr Hendy, Mohamed Abdelrehim, Amr Sharaf, Vikas Raunak, Mohamed Gabr, Hitokazu Matsushita, Young Jin Kim, Mohamed Afify, and Hany Hassan Awadalla. 2023. How good are gpt models at machine translation? a comprehensive evaluation. arXiv preprint arXiv:2302.09210.
|
| 300 |
+
Pierre-Edouard Honnet, Andrei Popescu-Belis, Claudi Musat, and Michael Baeriswyl. 2018. Machine translation of low-resource spoken dialects: Strategies for normalizing Swiss German. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).
|
| 301 |
+
Tom Kocmi and Christian Federmann. 2023. GEMBA-MQM: Detecting translation quality error spans with GPT-4. In Proceedings of the Eighth Conference on Machine Translation, pages 768-775, Singapore. Association for Computational Linguistics.
|
| 302 |
+
Sachin Kumar, Antonios Anastasopoulos, Shuly Wintner, and Yulia Tsvetkov. 2021. Machine translation into low-resource language varieties. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 110-121, Online. Association for Computational Linguistics.
|
| 303 |
+
Surafel Melaku Lakew, Aliia Erofeeva, and Marcello Federico. 2018. Neural machine translation into language varieties. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 156-164, Brussels, Belgium. Association for Computational Linguistics.
|
| 304 |
+
Juncen Li, Robin Jia, He He, and Percy Liang. 2018. Delete, retrieve, generate: a simple approach to sentiment and style transfer. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1865-1874, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 305 |
+
Llama3. 2024. The llama 3 herd of models. Preprint, arXiv:2407.21783.
|
| 306 |
+
Hongyuan Lu, Haoran Yang, Haoyang Huang, Dongdong Zhang, Wai Lam, and Furu Wei. 2024. Chain-of-dictionary prompting elicits translation in large language models. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 958-976, Miami, Florida, USA. Association for Computational Linguistics.
|
| 307 |
+
Paul Michel and Graham Neubig. 2018. Extreme adaptation for personalized neural machine translation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 312-318, Melbourne, Australia. Association for Computational Linguistics.
|
| 308 |
+
|
| 309 |
+
Remi Mir, Bjarke Felbo, Nick Obradovich, and Iyad Rahwan. 2019. Evaluating style transfer for text. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 495-504, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 310 |
+
Xing Niu, Marianna Martindale, and Marine Carpuat. 2017. A study of style in machine translation: Controlling the formality of machine translation output. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2814-2819, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 311 |
+
Xing Niu, Sudha Rao, and Marine Carpuat. 2018. Multi-task neural models for translating between styles within and across languages. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1008-1021, Santa Fe, New Mexico, USA. Association for Computational Linguistics.
|
| 312 |
+
OpenAI. 2024. Gpt-4o system card. Preprint, arXiv:2410.21276.
|
| 313 |
+
Richard Yuanzhe Pang and Kevin Gimpel. 2019. Unsupervised evaluation metrics and learning criteria for non-parallel textual transfer. In Proceedings of the 3rd Workshop on Neural Generation and Translation, pages 138–147, Hong Kong. Association for Computational Linguistics.
|
| 314 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.
|
| 315 |
+
Maja Popovic. 2015. chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392–395, Lisbon, Portugal. Association for Computational Linguistics.
|
| 316 |
+
Reid Pryzant, Richard Diehl Martinez, Nathan Dass, Sadao Kurohashi, Dan Jurafsky, and Diyi Yang. 2020. Automatically neutralizing subjective bias in text. In Proceedings of the aaai conference on artificial intelligence, volume 34, pages 480-489.
|
| 317 |
+
Qwen. 2025. Qwen2.5 technical report. Preprint, arXiv:2412.15115.
|
| 318 |
+
Sudha Rao and Joel Tetreault. 2018. Dear sir or madam, may I introduce the GYAFC dataset: Corpus, benchmarks and metrics for formality style transfer. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 129-140, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 319 |
+
|
| 320 |
+
Parker Riley, Timothy Dozat, Jan A. Botha, Xavier Garcia, Dan Garrette, Jason Riesa, Orhan First, and Noah Constant. 2023. FRMT: A benchmark for few-shot region-aware machine translation. Transactions of the Association for Computational Linguistics, 11:671-685.
|
| 321 |
+
Hassan Sajjad, Ahmed Abdelali, Nadir Durrani, and Fahim Dalvi. 2020. AraBench: Benchmarking dialectal Arabic-English machine translation. In Proceedings of the 28th International Conference on Computational Linguistics, pages 5094-5107, Barcelona, Spain (Online). International Committee on Computational Linguistics.
|
| 322 |
+
David Vilar, Markus Freitag, Colin Cherry, Jiaming Luo, Viresh Ratnakar, and George Foster. 2023. Prompting PaLM for translation: Assessing strategies and performance. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 15406-15427, Toronto, Canada. Association for Computational Linguistics.
|
| 323 |
+
Sebastian Vincent. 2021. Towards personalised and document-level machine translation of dialogue. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pages 137-147, Online. Association for Computational Linguistics.
|
| 324 |
+
Yu Wan, Baosong Yang, Derek F Wong, Lidia S Chao, Haihua Du, and Ben CH Ao. 2020. Unsupervised neural dialect translation with commonality and diversity modeling. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 9130-9137.
|
| 325 |
+
Yunli Wang, Yu Wu, Lili Mou, Zhoujun Li, and Wenhan Chao. 2019. Harnessing pre-trained neural networks with rules for formality style transfer. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3573-3578, Hong Kong, China. Association for Computational Linguistics.
|
| 326 |
+
Binwei Yao, Ming Jiang, Tara Bobinac, Diyi Yang, and Junjie Hu. 2024. Benchmarking machine translation with cultural awareness. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, pages 13078-13096, Miami, Florida, USA. Association for Computational Linguistics.
|
| 327 |
+
Rabih Zbib, Erika Malchiodi, Jacob Devlin, David Stallard, Spyros Matsoukas, Richard Schwartz, John Makhoul, Omar F. Zaidan, and Chris Callison-Burch. 2012. Machine translation of Arabic dialects. In Proceedings of the 2012 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 49-59, Montreal, Canada. Association for Computational Linguistics.
|
| 328 |
+
|
| 329 |
+
Jiali Zeng, Fandong Meng, Yongjing Yin, and Jie Zhou. 2024. Improving machine translation with large language models: A preliminary study with cooperative decoding. In Findings of the Association for Computational Linguistics: ACL 2024, pages 13275-13288, Bangkok, Thailand. Association for Computational Linguistics.
|
| 330 |
+
|
| 331 |
+
Biao Zhang, Barry Haddow, and Alexandra Birch. 2023. Prompting large language model for machine translation: A case study. In International Conference on Machine Learning, pages 41092-41110. PMLR.
|
| 332 |
+
|
| 333 |
+
Shaolin Zhu, Leiyu Pan, Bo Li, and Deyi Xiong. 2024a. LANDeRMT: Detecting and routing language-aware neurons for selectively finetuning LLMs to machine translation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12135-12148, Bangkok, Thailand. Association for Computational Linguistics.
|
| 334 |
+
|
| 335 |
+
Wenhao Zhu, Hongyi Liu, Qingxiu Dong, Jingjing Xu, Shujian Huang, Lingpeng Kong, Jiajun Chen, and Lei Li. 2024b. Multilingual machine translation with large language models: Empirical results and analysis. In Findings of the Association for Computational Linguistics: NAACL 2024, pages 2765-2781, Mexico City, Mexico. Association for Computational Linguistics.
|
| 336 |
+
|
| 337 |
+
# A Appendix
|
| 338 |
+
|
| 339 |
+
# A.1 Fine-Tune XLM-R for Cultural and Style Adaptation Evaluation
|
| 340 |
+
|
| 341 |
+
To enable XLM-R to identify cultural and stylistic diversities, we employ LoRA fine-tuning on XLM-R for 5 epochs (learning_rate=5 × 10 $^{-5}$ , batch_size=32, shuffle(seed=42, max_seq_length=128) using a dataset of total 10,000 examples with the following labels:
|
| 342 |
+
|
| 343 |
+
Label 0: zh_CN social media comments from Zhihu (https://www.zhihu.com/explore);
|
| 344 |
+
|
| 345 |
+
Label 1: zh_SG social media comments from Facebook (https://www.facebook.com/facebook/);
|
| 346 |
+
|
| 347 |
+
Label 2: zh_TW social media comments from PTT (https://www.ptt.cc/index.html);
|
| 348 |
+
|
| 349 |
+
Label 3: zh_CN news sentences from voachinese (https://www.voachinese.com/China);
|
| 350 |
+
|
| 351 |
+
Label 4: zh_SG news sentences from zaobao (https://www.zaobao.com.sg/);
|
| 352 |
+
|
| 353 |
+
Label 5: zh_TW news sentences from twreporter (https://www.twreporter.org/)
|
| 354 |
+
|
| 355 |
+
The fine-tuned XLM-R achieves an accuracy of $97.07\%$ on a dev set consisting of 6,000 sentences (each label 1,000 sentences).
|
| 356 |
+
|
| 357 |
+
# A.2 Spearman's $\rho$ on Each Translation Scenario
|
| 358 |
+
|
| 359 |
+
Detailed Spearman's $\rho$ between human-annotated discrete scale labels and metrics-generated continuous scores for each translation scenario is shown in Figure 4.
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
Figure 4: Spearman's $\rho$ between human judgments and automatic metrics on three translation scenarios.
|
| 363 |
+
|
| 364 |
+
# A.3 Analysis with GEMBA-MQM
|
| 365 |
+
|
| 366 |
+
We analyze the translations using GEMBA-MQM (Kocmi and Federmann, 2023). To adapt GEMBA-MQM for this task, we modify the prompt as follows:
|
| 367 |
+
|
| 368 |
+
source-lang source: source_seg
|
| 369 |
+
|
| 370 |
+
target-lang translation: target_seg
|
| 371 |
+
|
| 372 |
+
Based on the source segment and machine translation surrounded with triple backticks, identify error types in the translation and classify them. The categories of errors are: accuracy (addition, mistranslation, omission, untranslated text), fluency (character encoding, grammar, inconsistency, punctuation, register, spelling), style (non-informal Mainland Mandarin expressions), terminology (inappropriate for context, inconsistent use), non-translation, other, or no-error.
|
| 373 |
+
|
| 374 |
+
Each error is classified as one of three categories: critical, major, and minor. Critical errors inhibit comprehension of the text. Major errors disrupt the flow, but what the text is trying to say is still understandable. Minor errors are technically errors, but do not disrupt the flow or hinder comprehension.
|
| 375 |
+
|
| 376 |
+
<table><tr><td></td><td>Source</td><td>Translation</td><td>GEMBA-MQM Analysis</td></tr><tr><td>zh_CN</td><td>I was working in Singapore in 1969. Back then, I often saw him in 7th month getai. He is getting old now.</td><td>我1969年在新加坡工作过。那个时候我经常在七月中秋看到他。他现在都老了。</td><td>Critical: no-error
|
| 377 |
+
Major: accuracy/mistranslation- “七月中秋” should be “七月歌台” or “七月戏台” as “getai” refers to a live stage performance held during the Hungry Ghost Festival in the seventh lunar month, not “Mid-Autumn” which is “中秋”. Minor: no-error</td></tr><tr><td>zh_TW</td><td>Your values have changed. Also, all-you-can-eat buffets are not the trend at all, okay?</td><td>妳的設定改變了喔!而且吃到飽根本不是流行趨勢啦,好嗎?</td><td>Critical: no-error Major: accu-racy/mistranslation - “妳的設定改變了喔” should be something like “妳的價值觀改變了” as “values” in this context refers to “values” not “settings”. Minor: no-error The mis-translation of “values” to “settings” (設定) could potentially lead to confusion about the meaning of the sentence, as it changes the subject from personal beliefs or principles to something more akin to prefer-ences or configurations. This is a major error because it could signifi-cantly alter the intended message of the source text.</td></tr><tr><td>zh_SG</td><td>Captured a highly intelligent, hi-larious CJ7 that speaks human languages. Please do not fight with me over it. Thanks!</td><td>抓到了一只超聪明、会说人话的搞笑长江七号。别跟我抢哦,谢 谢!</td><td>Critical: no-error Major: accu-racy/mistranslation - “搞笑” should be “可笑的” or “滑稽的” to better match “hilarious” instead of “funny” which “搞笑” implies. Minor: no-error</td></tr></table>
|
| 378 |
+
|
| 379 |
+
Table 6: Analysis with GEMBA-MQM.
|
| 380 |
+
|
| 381 |
+
As shown in Table 6, we found that GEMBA-MQM has certain ability to identify translation errors, along with region-specific lexical choices and expressions, although some of them are classified as translation inaccuracies.
|
| 382 |
+
|
| 383 |
+
# A.4 Analysis on Independence and Complementarity of Metrics
|
| 384 |
+
|
| 385 |
+
We conduct a cross-metric Pearson correlation analysis. As shown in Table 7, word-level metrics (Region-Specific Lexical Term Translation and Vocabulary Similarity) are strongly correlated with the sentence-level Semantic Preservation metric $(r = 0.74$ and $r = 0.75)$ , reflecting the interconnected nature of translation quality. This suggests that while these word-level metrics independently assess explicit lexicla choices, they also contribute substantially to the evaluation of overall sentence-level contextual adequacy. Moreover, Culture and Style Adaptation shows moderate correlations with meaning-oriented metrics: Region-Specific Lexical Term Translation, Vocabulary Similarity, and Semantic Preservation $(r = 0.41$ to 0.67), indicating an added cultural dimension beyond semantics and vocabulary. By contrast, Fluency exhibits negative
|
| 386 |
+
|
| 387 |
+
correlations with the other metrics ( $r = -0.27$ to $-0.59$ ), highlighting it as a distinct and sometimes competing quality dimension.
|
| 388 |
+
|
| 389 |
+
Overall, these metrics are independent yet complementary, collectively providing a comprehensive assessment of translation quality.
|
| 390 |
+
|
| 391 |
+
# A.5 Results on Individual Evaluation Metrics
|
| 392 |
+
|
| 393 |
+
Detailed results of LLMs on individual evaluation metrics are presented in Table 8.
|
| 394 |
+
|
| 395 |
+
<table><tr><td></td><td>Culture and Style Adaptation</td><td>Semantic Preservation</td><td>Region-Specific Lexical Term Translation</td><td>Vocabulary Similarity</td><td>Fluency</td></tr><tr><td>Culture and Style Adaptation</td><td>1.00</td><td>0.67</td><td>0.41</td><td>0.51</td><td>-0.59</td></tr><tr><td>Semantic Preservation</td><td>0.67</td><td>1.00</td><td>0.74</td><td>0.75</td><td>-0.46</td></tr><tr><td>Region-Specific Lexical Term Translation</td><td>0.41</td><td>0.74</td><td>1.00</td><td>0.60</td><td>-0.27</td></tr><tr><td>Vocabulary Similarity</td><td>0.51</td><td>0.75</td><td>0.60</td><td>1.00</td><td>-0.27</td></tr><tr><td>Fluency</td><td>-0.59</td><td>-0.46</td><td>-0.27</td><td>-0.27</td><td>1.00</td></tr></table>
|
| 396 |
+
|
| 397 |
+
Table 7: Cross-Metric Pearson Correlation Results.
|
| 398 |
+
|
| 399 |
+
<table><tr><td rowspan="2">Translation Task</td><td rowspan="2">Model</td><td colspan="2">Word-Level Metric</td><td colspan="3">Sentence-Level Metric</td></tr><tr><td>Region-Specific Lexical Term Translation</td><td>Vocabulary Similarity</td><td>Semantic Preservation</td><td>Culture and Style Adaptation</td><td>Fluency</td></tr><tr><td rowspan="8">en-zh_CN</td><td>GPT-4o</td><td>43.15</td><td>53.00</td><td>27.39</td><td>90.50</td><td>69.77</td></tr><tr><td>Llama3</td><td>14.94</td><td>33.50</td><td>18.19</td><td>76.50</td><td>68.81</td></tr><tr><td>Llama3.2</td><td>6.64</td><td>24.50</td><td>13.82</td><td>61.50</td><td>59.85</td></tr><tr><td>TowerInstrunct-v0.2</td><td>11.20</td><td>32.00</td><td>18.44</td><td>70.50</td><td>63.59</td></tr><tr><td>Qwen2.5</td><td>21.58</td><td>42.50</td><td>20.56</td><td>84.00</td><td>62.35</td></tr><tr><td>Gemma2</td><td>34.44</td><td>45.00</td><td>20.36</td><td>86.50</td><td>67.55</td></tr><tr><td>Aya</td><td>14.11</td><td>40.00</td><td>19.91</td><td>75.50</td><td>62.94</td></tr><tr><td>Llama3-Chinese</td><td>17.43</td><td>36.00</td><td>19.46</td><td>83.50</td><td>72.33</td></tr><tr><td rowspan="8">en-zh_TW</td><td>GPT-4o</td><td>53.55</td><td>45.50</td><td>25.69</td><td>47.00</td><td>80.00</td></tr><tr><td>Llama3</td><td>16.11</td><td>31.50</td><td>15.64</td><td>27.00</td><td>83.01</td></tr><tr><td>Llama3.2</td><td>7.11</td><td>27.50</td><td>12.11</td><td>49.00</td><td>81.49</td></tr><tr><td>TowerInstrunct-v0.2</td><td>9.48</td><td>32.00</td><td>14.82</td><td>24.50</td><td>79.76</td></tr><tr><td>Qwen2.5</td><td>21.80</td><td>36.00</td><td>15.67</td><td>31.50</td><td>79.34</td></tr><tr><td>Gemma2</td><td>50.71</td><td>35.00</td><td>20.40</td><td>67.00</td><td>77.56</td></tr><tr><td>Aya</td><td>17.54</td><td>37.50</td><td>17.86</td><td>18.00</td><td>79.71</td></tr><tr><td>Llama3-Chinese</td><td>27.01</td><td>33.00</td><td>17.19</td><td>31.00</td><td>82.58</td></tr><tr><td rowspan="8">en-zh_SG</td><td>GPT-4o</td><td>48.05</td><td>51.50</td><td>29.89</td><td>51.50</td><td>75.00</td></tr><tr><td>Llama3</td><td>11.72</td><td>29.00</td><td>17.56</td><td>58.00</td><td>72.59</td></tr><tr><td>Llama3.2</td><td>5.08</td><td>24.50</td><td>14.42</td><td>64.50</td><td>98.18</td></tr><tr><td>TowerInstrunct-v0.2</td><td>8.59</td><td>36.00</td><td>19.95</td><td>57.00</td><td>94.60</td></tr><tr><td>Qwen2.5</td><td>22.66</td><td>38.00</td><td>18.62</td><td>60.50</td><td>72.62</td></tr><tr><td>Gemma2</td><td>18.36</td><td>32.00</td><td>16.76</td><td>72.00</td><td>70.50</td></tr><tr><td>Aya</td><td>12.11</td><td>44.00</td><td>21.90</td><td>36.50</td><td>72.39</td></tr><tr><td>Llama3-Chinese</td><td>12.11</td><td>38.00</td><td>20.47</td><td>47.50</td><td>69.34</td></tr></table>
|
| 400 |
+
|
| 401 |
+
Table 8: Results of individual evaluation metrics.
|
abenchmarkfortranslationsacrossstylesandlanguagevariants/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30e9df972b72aed7f0b4af1ef3faa2171a5bba1bd99322cc3494609c69274e72
|
| 3 |
+
size 747026
|
abenchmarkfortranslationsacrossstylesandlanguagevariants/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc67508a288ba4d7d5610342f18f72c6f802091b5279c2a400e51eda8b373272
|
| 3 |
+
size 414445
|
acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/be9000e2-57eb-4fbd-ba11-37716b55c35b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9cd44228a83b2b4b8104407a113e078957d8e14872b2148a14c30638dac85f68
|
| 3 |
+
size 164367
|
acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/be9000e2-57eb-4fbd-ba11-37716b55c35b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32d519cad086e7702e1fe766201884e02e798f2b10610716497e32f1fdd0eb5c
|
| 3 |
+
size 218256
|
acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/be9000e2-57eb-4fbd-ba11-37716b55c35b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:447826f264e7d6b22762dce3210f44117f32358dd39f984376ef3fd7e2d0ee8c
|
| 3 |
+
size 671086
|
acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/full.md
ADDED
|
@@ -0,0 +1,950 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Category-Theoretic Approach to Neural-Symbolic Task Planning with Bidirectional Search
|
| 2 |
+
|
| 3 |
+
Shuhui Qu
|
| 4 |
+
|
| 5 |
+
Stanford University shuhuiq@stanford.edu
|
| 6 |
+
|
| 7 |
+
Jie Wang
|
| 8 |
+
|
| 9 |
+
Stanford University jiewang@stanford.edu
|
| 10 |
+
|
| 11 |
+
Kincho H. Law
|
| 12 |
+
|
| 13 |
+
Stanford University law@stanford.edu
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
We introduce a Neural-Symbolic Task Planning framework integrating Large Language Model (LLM) decomposition with category-theoretic verification for resource-aware, temporally consistent planning. Our approach represents states as objects and valid operations as morphisms in a categorical framework, ensuring constraint satisfaction through mathematical pullbacks. We employ bidirectional search that simultaneously expands from initial and goal states, guided by a learned planning distance function that efficiently prunes infeasible paths. Empirical evaluations across three planning domains demonstrate that our method improves completion rates by up to $6.6\%$ and action accuracy by $9.1\%$ , while eliminating resource violations compared to the existing baselines. These results highlight the synergy between LLM-based operator generation and category-theoretic verification for reliable planning in domains requiring both resource-awareness and temporal consistency.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
Effective task planning remains a critical challenge in artificial intelligence, particularly in domains where resource constraints, temporal consistency, and trustworthiness are paramount (Ghallab et al., 2004; Zhang et al., 2023; Jiang et al., 2024). Large Language Models (LLMs) (Achiam et al., 2023; Grattafori et al., 2024; Touvron et al., 2023) offer powerful generative capabilities for natural language planning, but frequently overlook domain constraints (Wang et al., 2024; Valmeekam et al., 2024), yielding plans that violate resource limitations or temporal dependencies (Valmeekam et al., 2023). In contrast, classical symbolic planners (Pallagani et al., 2022; Illanes et al., 2020; Ghallab et al., 2004) ensure formal correctness but suffer from limited flexibility and require extensive domain engineering.
|
| 22 |
+
|
| 23 |
+
Recent research has attempted to bridge this conceptual gap through methods such as Chain-of-Thought (Wei et al., 2022), Monte Carlo Tree Search (MCTS)-based planning (Zhao et al., 2023), and reinforcement learning methods (Chen et al., 2025; Dalal et al., 2024). However, these approaches encode constraints as heuristic signals or sparse rewards (Havrilla et al., 2024; Huang et al., 2022) without providing structural guarantees. Other reasoning-oriented approaches such as Tree-of-Thoughts (ToT) (Yao et al., 2023a), ReWOO (Xu et al., 2023), and ToS (Katz et al., 2024) improve reasoning depth and search efficiency, but still lack mechanisms for ensuring compositional validity of generated plans. As benchmark evaluations of LLM planning expand (Stein et al., 2023; Wu et al., 2025), the need for principled approaches that unify neural flexibility with formal constraint enforcement becomes urgent.
|
| 24 |
+
|
| 25 |
+
We address these challenges by introducing Neural-Symbolic Task Planning (Figure 1). The framework comprises three key innovations:
|
| 26 |
+
|
| 27 |
+
1. LLM-Driven Operator Decomposition: A formalized technique for transforming natural language tasks into structured categorical specifications through iterative refinement, creating a bridge between unstructured language and mathematical formalism.
|
| 28 |
+
2. Category-Theoretic Verification: A novel framework that leverages category theory to represent planning domains, modeling states as objects and operations as morphisms in a categorical framework. By employing mathematical pullbacks, we provide compositional validity guarantees that ensure resource, temporal, and logical constraint satisfaction throughout the planning process.
|
| 29 |
+
3. Bidirectional Search: A theoretically-grounded algorithm that simultaneously ex
|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
Figure 1: Neural-Symbolic Task Planning framework with three key stages: (1) LLM decomposition of natural language tasks into structured specifications, (2) category-theoretic verification to ensure constraint satisfaction, (3) bidirectional search to efficiently connect initial and goal states.
|
| 33 |
+
|
| 34 |
+
plies from initial and goal states guided by a categorical distance function, reducing computational complexity from $O(b^{L})$ to $O(b^{L / 2})$ while maintaining plan optimality.
|
| 35 |
+
|
| 36 |
+
Our contribution centers on the integration of category-theoretic verification with neural operator generation and search. This enables our framework to act as a constraint-safety layer that can be applied to LLM-driven planners, including CoT(Wei et al., 2022), ReAct(Yao et al., 2023b), ToT(Yao et al., 2023a), ensuring that generated plans remain resource-aware, temporally consistent, and logically valid.
|
| 37 |
+
|
| 38 |
+
We evaluate our framework across three diverse planning domains: cooking recipes (RecipeNLG) (Bien et al., 2020), procedural texts (ProcessBench) (Zheng et al., 2024), and standardized procedures (Proc2PDDL) (Zhang et al., 2024b). Our method consistently achieves $15 - 25\%$ higher completion rates than other baselines, while substantially reducing resource/time violations by up to $77\%$ . These results demonstrate that combining LLM-based operator generation with category-theoretic verification creates a powerful synergy for reliable, flexible planning in constraint-intensive domains.
|
| 39 |
+
|
| 40 |
+
# 2 Related Work
|
| 41 |
+
|
| 42 |
+
Classical Planning. Symbolic planners (Ghallab et al., 2004; Jiang et al., 2019; Holler et al., 2020) guarantee correctness but require extensive domain engineering and struggle with partially specified domains (Smirnov et al., 2024; Zhang et al., 2023). Hybrid approaches such as Fast-Downward (Helmert, 2006) and LAMA (Richter and Westphal, 2010) add heuristics, but they lack
|
| 43 |
+
|
| 44 |
+
mechanisms for handling quantitative resource and temporal constraints.
|
| 45 |
+
|
| 46 |
+
LLM-Based Planning. Recent approaches leverage LLMs (Achiam et al., 2023; Touvron et al., 2023) to generate plans directly from text (Dagan et al., 2023; Song et al., 2023; Zeng et al., 2023), avoiding domain engineering. However, these models often act as black boxes that violate logical, temporal, or resource constraints (Valmeekam et al., 2022; Gestrin et al., 2024). To improve robustness, several works have introduced search-augmented techniques: Monte Carlo Tree Search (MCTS) (Zhao et al., 2023; Zhang et al., 2024a), ReAct (Yao et al., 2023b), Reflexion (Shinn et al., 2023), LLMFP(Hao et al., 2024), integrate dynamic programming(Dagan et al., 2023), or feedback-driven strategies (Shah et al., 2023; Suri et al., 2024). These methods demonstrate the potential of combining search with neural heuristics and LLM judge(Gu et al., 2024) but still lacking structural correctness guarantees(Kambhampati et al., 2024).
|
| 47 |
+
|
| 48 |
+
Reasoning-Oriented LLM Frameworks Parallel to direct plan generation, reasoning-oriented frameworks such as Tree-of-Thoughts(Yao et al., 2023a), ReWOO (Xu et al., 2023), and ToS (Katz et al., 2024) enhance reasoning depth and search efficiency by structuring LLM outputs into tree- or workflow-like processes. While effective for improving exploration, these methods also do not guarantee principled categorical verification when integrating multiple constraints across domains.
|
| 49 |
+
|
| 50 |
+
Neural-Symbolic Methods. Neural-symbolic approaches (DeLong et al., 2024; Mao et al., 2019) aim to combine neural flexibility with symbolic pre
|
| 51 |
+
|
| 52 |
+
cision in domains such as visual reasoning (Hudson and Manning, 2019) and program synthesis (Ellis et al., 2021). Category theory provides powerful mathematical frameworks for compositional reasoning (Rydeheard and Burstall, 1988; Pierce, 1991; Jacob, 1990; Walters and Walters, 1991; Baez and Pollard, 2017), though prior applications have largely focused on symbolic systems without deep integration of neural operator generation.
|
| 53 |
+
|
| 54 |
+
Our framework uniquely combines the generative capabilities of LLMs with category-theoretic verification to structurally enforce resource, temporal, and logical constraints. By embedding pullback-based validation into a bidirectional search framework, we bridge the gap between the flexibility of LLM planners and the formal guarantees of symbolic reasoning.
|
| 55 |
+
|
| 56 |
+
# 3 Problem Statement
|
| 57 |
+
|
| 58 |
+
We formalize task planning as a category-theoretic framework where states are objects and operations are morphisms. Each state $w \in W = (r, s, l, t)$ encapsulates resources $r$ , symbolic progress $s$ , logical constraints $l$ , and temporal allocations $t$ . Morphisms $f: w_1 \to w_2$ represent valid state transitions that preserve resource bounds, state validity, constraint satisfaction, and temporal consistency.
|
| 59 |
+
|
| 60 |
+
Definition 3.1 (Planning Problem). Given an initial state $w_0 = (r_0, s_0, l_0, t_0)$ and goal specification $w^* = (r^*, s^*, l^*, t^*)$ , find a sequence of morphisms in planning category $\mathcal{T}$ :
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
w _ {0} \xrightarrow {f _ {1}} w _ {1} \xrightarrow {f _ {2}} \dots \xrightarrow {f _ {n - 1}} w _ {n - 1} \xrightarrow {f _ {n}} w _ {n}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
such that each intermediate state $w_{i}$ remains valid under categorical constraints, and $w_{n}$ satisfies the criteria in $w^{*}$ .
|
| 67 |
+
|
| 68 |
+
A more formal problem statement can be found in Appendix A.
|
| 69 |
+
|
| 70 |
+
# 4 Theoretical Analysis
|
| 71 |
+
|
| 72 |
+
In this section, we analyze the formal properties of the category-theoretic verification framework. We establish three key guarantees: local reachability, global completeness, and probabilistic completeness. Together, these theorems ensure that our approach preserves the rigor of symbolic planning while leveraging the generative flexibility of LLMs. Crucially, they highlight our main contribution: by embedding category-theoretic constructs (in particular, pullback-based verification) into an
|
| 73 |
+
|
| 74 |
+
LLM-driven planner, we can provide structural guarantees that are missing from existing heuristic or black-box approaches.
|
| 75 |
+
|
| 76 |
+
# 4.1 State Space Properties
|
| 77 |
+
|
| 78 |
+
Let a planning distance function be $D: W \times W \to \mathbb{R}$ that estimates the minimum cost to transform one state into another. It enables theoretical guarantees through three properties:
|
| 79 |
+
|
| 80 |
+
1. Component Integration: $D$ incorporates all four state components (resources, symbolic state, logical constraints, temporal intervals)
|
| 81 |
+
2. Categorical Consistency: It respects the category structure, with $D(w_{1},w_{2}) < \infty$ only when morphisms can connect the states
|
| 82 |
+
3. Continuous Measure: It provides a differentiable measure of "plan difficulty" between states, guiding search toward promising paths
|
| 83 |
+
|
| 84 |
+
# 4.2 Theoretical Guarantees
|
| 85 |
+
|
| 86 |
+
Our first theorem establishes local reachability in the planning space:
|
| 87 |
+
|
| 88 |
+
Theorem 4.1 (ε-Reachability). For any two states $w_{1}, w_{2} \in W$ with $D(w_{1}, w_{2}) < \epsilon$ , there exists a sequence of valid morphisms $f_{1}, \ldots, f_{k}$ such that $f_{k} \circ \ldots \circ f_{1}(w_{1}) = w_{2}$ , where $k \leq \lceil 1 / \epsilon \rceil$ .
|
| 89 |
+
|
| 90 |
+
This theorem guarantees local connectivity of the categorical state space: nearby states can always be connected via a bounded number of morphisms. This ensures that our planner can efficiently explore neighborhoods of valid states without "falling out" of the constraint-respecting space. Proof can be found in Appendix B.
|
| 91 |
+
|
| 92 |
+
Building on local connectivity, we establish global completeness:
|
| 93 |
+
|
| 94 |
+
Theorem 4.2 (Completeness). If a valid plan exists between initial state $w_0$ and goal state $w^*$ , the bidirectional search algorithm will find it.
|
| 95 |
+
|
| 96 |
+
Completeness is the cornerstone of symbolic planning. By proving completeness despite the stochasticity of LLM-generated operators, we show that our neural-symbolic framework provides formal coverage guarantees—the planner will not overlook feasible solutions simply because of neural variability.
|
| 97 |
+
|
| 98 |
+
Theorem 4.3 (Probabilistic Completeness). Under bounded resources and finite constraints, the probability of finding a valid plan in $n$ steps is:
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
P (\text {f i n d p l a n i n} n \text {s t e p s}) \geq 1 - e ^ {- \lambda n} \tag {1}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
Figure 2: Iterative LLM-based planning formulation process with feedback loops that enable progressive refinement from natural language to categorical representations.
|
| 106 |
+
|
| 107 |
+
where $\lambda > 0$ depends on the reliability of LLM-generated morphisms.
|
| 108 |
+
|
| 109 |
+
This result ensures robustness under uncertainty: even though LLM-generated morphisms may be noisy or inconsistent, our framework converges exponentially toward valid plans as the number of steps $n$ increases. This property provides a strong theoretical foundation for the reliability under stochastic language-based operators.
|
| 110 |
+
|
| 111 |
+
The theoretical foundation is central to our contribution: category-theoretic verification not only ensures structural correctness of plans but also enables principled integration of neural generative models into symbolic reasoning.
|
| 112 |
+
|
| 113 |
+
# 5 Methodology
|
| 114 |
+
|
| 115 |
+
We now turn to our Neural-Symbolic Task Planning framework, which combines LLM-based operator generation, pullback-based verification, and bidirectional search to generate valid plans (Figure 1).
|
| 116 |
+
|
| 117 |
+
# 5.1 LLM-Based Task Decomposition
|
| 118 |
+
|
| 119 |
+
We transform high-level user queries into formal specifications through a systematic four-stage process using a pretrained Large Language Model (e.g., GPT-4, Llama) (Figure 2):
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
Figure 3: Bidirectional search reduces the effective search depth by simultaneously expanding from both the initial state $w_{0}$ and goal state $w^{*}$ . When a pullback exists between states $w_{2}^{F}$ and $w^{*}$ (at meeting point $w_{m,1}$ ), a valid plan can be constructed with fewer expansions.
|
| 123 |
+
|
| 124 |
+
- Initial Decomposition: Extract candidate resources, operators, and constraints from natural language.
|
| 125 |
+
- Constraint Refinement: Identify ambiguities, clarify task specifications, and resolve implicit dependencies through targeted queries.
|
| 126 |
+
- Resource Formalization: Transform resource into typed, quantified specifications.
|
| 127 |
+
- Categorical Encoding: Encode specifications as categorical objects, morphisms, and constraints.
|
| 128 |
+
|
| 129 |
+
This iterative process uses feedback loops to progressively refine representations until they reach the precision required for category-theoretic planning, significantly reducing the manual engineering typically needed for symbolic approaches. To ensure reproducibility across domains, we provide in Appendix D a prompt template and guidelines that generalizes across domains.
|
| 130 |
+
|
| 131 |
+
# 5.2 Bidirectional Search
|
| 132 |
+
|
| 133 |
+
Task planning can be formulated using a variety of search and optimization strategies (e.g., $\mathbf{A}^*$ , MCTS). We focus on bidirectional search, one of the most efficient formulations, as it reduces search depth from $O(b^{L})$ to $O(b^{L / 2})$ while retaining completeness guarantees, as illustrated in Figure 3. Our algorithm draws inspiration from Retro* and DESCP (Xie et al., 2022; Yu et al., 2024) but is generalized to operate with category-theoretic validation. For a valid morphism sequence $\mathcal{P} = \{f_1,f_2,f_3,\ldots \}$ , the total cost of the sequence is $\sum_{1}^{n}c(f)$ , where $c(f)$ is the cost of applying morphism $f$ .
|
| 134 |
+
|
| 135 |
+
# 5.2.1 Planning Distance
|
| 136 |
+
|
| 137 |
+
We now define our planning distance function $D$ that estimates the minimum cost to transform one state into another as:
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
\begin{array}{l} D \left(w _ {1}, w _ {2}\right) = \alpha_ {s} d _ {s} \left(s _ {1}, s _ {2}\right) + \alpha_ {r} \left\| r _ {1} - r _ {2} \right\| \tag {2} \\ + \alpha_ {l} d _ {l} \left(l _ {1}, l _ {2}\right) + \alpha_ {t} d _ {t} \left(t _ {1}, t _ {2}\right) \\ \end{array}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $\alpha_{r},\alpha_{s},\alpha_{l},\alpha_{t}$ are weighting factors, and $d_{s},d_{t},d_{l}$ are appropriate metrics for symbolic states, temporal components, and logical constraints, respectively. More details can be found in Appendix C<sup>1</sup>. This function serves as a domain-general heuristic that guides both forward search (from initial state) and backward search (from goal state), enabling efficient identification of promising meeting points. Importantly, the distance formulation is not specific to DESP or Retro* but can be embedded into a wide range of search frameworks (including A* and MCTS), making our approach adaptable across different planning backbones.
|
| 144 |
+
|
| 145 |
+
# 5.2.2 Search Graphs
|
| 146 |
+
|
| 147 |
+
We follow the same configuration as DESP and maintain two search graphs:
|
| 148 |
+
|
| 149 |
+
1. $\mathcal{G}^F$ (forward) initiates from $w_0$ and expands in a "bottom-up" manner by applying forward morphisms $f: w \to w'$ .
|
| 150 |
+
2. $\mathcal{G}^B$ (backward) starts from $w^{*}$ and expands "top-down" by applying backward morphisms that effectively invert feasible transitions.
|
| 151 |
+
|
| 152 |
+
The search uses an AND-OR graph structure (Xie et al., 2022) with objects in category $w \in W$ as OR-nodes and valid morphisms as AND-nodes(all children must be solved).
|
| 153 |
+
|
| 154 |
+
Our implementation supports two search strategies using a target condition function $\gamma : W \to W$ :
|
| 155 |
+
|
| 156 |
+
- Front-to-End (F2E): Target opposing end states: $\gamma(w) = w^{*}$ for $w \in \mathcal{G}^F$ and $\gamma(w) = w_0$ for $w \in \mathcal{G}^B$
|
| 157 |
+
- Front-to-Front (F2F): Target closest states in opposing graph: $\gamma(w) = \arg \min_{w' \in \mathcal{G}^B} D(w, w')$ for $w \in \mathcal{G}^F$ , $\gamma(w) = \arg \min_{w' \in \mathcal{G}^F} D(w', w)$ for $w \in \mathcal{G}^B$
|
| 158 |
+
|
| 159 |
+
# 5.2.3 Search Procedure
|
| 160 |
+
|
| 161 |
+
The search procedure (Figure 4) selects and expands frontier states from both graphs:
|
| 162 |
+
|
| 163 |
+
Following Retro*, We let $V_w$ be the minimum cost to achieve state $w$ from $w_0$ ; $V_t(w|\mathcal{G})$ be the estimated cost of achieving $w^*$ using state $w$ given search graph $\mathcal{G}$ ; $rn(w|\mathcal{G})$ be the minimum cost to reach state $w$ in search graph $\mathcal{G}$ ; $D_w$ be the distance $D(\gamma(w), w)$ between a state and its target; $sn(w|\mathcal{G})$ be the step number represented as $D_w - V_w$ for related frontier nodes; $D_t(w|\mathcal{G})$ be the multiset of $D_w - V_w$ values along the minimum cost route through state $w$ .
|
| 164 |
+
|
| 165 |
+
Frontier State Selection. Let $\mathcal{F}^F$ and $\mathcal{F}^B$ denote the frontier sets of the unsolved states in the forward and backward graphs, respectively.
|
| 166 |
+
|
| 167 |
+
For backward selection in the backward graph, we select a frontier state that minimizes the expected total cost of planning from the initial state $w_0$ to the goal state $w^*$ through that state: $w_{\mathrm{select},B} \gets \arg \min_{w \in \mathcal{F}^B} \left[ V_t(w|\mathcal{G}^B) + \min (D_t(w|\mathcal{G}^B)) \right]$
|
| 168 |
+
|
| 169 |
+
The forward selection in the forward graph is identical to Retro*:
|
| 170 |
+
|
| 171 |
+
$$
|
| 172 |
+
w_{\text{select},F}\leftarrow \arg \min_{w\in \mathcal{F}^{F}}V_{t}(w|\mathcal{G}^{F})
|
| 173 |
+
$$
|
| 174 |
+
|
| 175 |
+
State Expansion Policies. For backward expansion, we follow AND-OR-based algorithms in calling a single-step morphism, applying the top $n$ predicted morphisms to the selected frontier node and adding the resulting morphisms and their states as nodes to the graph.
|
| 176 |
+
|
| 177 |
+
For state $w$ in $\mathcal{G}^F$ (forward direction), we perform the forward expansion procedure:
|
| 178 |
+
|
| 179 |
+
- For state $w$ , we generate successor states $w'$ via morphisms $f: w \to w'$ and initialize $sn(w'|G^F) \gets V_{w'} = D(w', \gamma(w'))$
|
| 180 |
+
|
| 181 |
+
For state $w$ in $\mathcal{G}^B$ (backward direction):
|
| 182 |
+
|
| 183 |
+
- For state $w$ , we generate predecessor states $w'$ via morphisms $f: w' \to w$ and initialize the values as:
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
\begin{array}{l} - r n \left(w ^ {\prime} \mid \mathcal {G} ^ {B}\right) \leftarrow V _ {w ^ {\prime}} \\ - s n \left(w ^ {\prime} \mid \mathcal {G} ^ {B}\right) \leftarrow D \left(\gamma \left(w ^ {\prime}\right), w ^ {\prime}\right) - V _ {w ^ {\prime}} \\ \end{array}
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
Value Propagation. After value initialization, for $\mathcal{G}^F$ , we update values using the propagation from the Retro* algorithm.
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
Figure 4: (a) Bidirectional Search algorithm. Evaluation of top nodes is based on both cost $V_{w}$ and distance $D$ . (b) Overview of the one-step expansion procedures.
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
|
| 196 |
+
For $\mathcal{G}^B$ , we update the graphs through uppropagation and downpropagation. Similar to AND-OR algorithms, we first propagate updates to relevant values up the graph, and then down propagate to related nodes.
|
| 197 |
+
|
| 198 |
+
Uppropagation (for morphism nodes $f$ and state nodes $w$ ):
|
| 199 |
+
|
| 200 |
+
$$
|
| 201 |
+
s n (f | \mathcal {G} ^ {B}) \gets \sum_ {w \in c h (f)} s n (w | \mathcal {G} ^ {B})
|
| 202 |
+
$$
|
| 203 |
+
|
| 204 |
+
$$
|
| 205 |
+
s n (w | \mathcal {G} ^ {B}) \leftarrow \left\{ \begin{array}{l} [ D _ {w} - V _ {w} ], \text {i f} w \in \mathcal {F} ^ {B} \\ s n \Big (\arg \min _ {f \in c h (w)} r n (f) | \mathcal {G} ^ {B} \Big) \end{array} \right.
|
| 206 |
+
$$
|
| 207 |
+
|
| 208 |
+
# Downpropagation:
|
| 209 |
+
|
| 210 |
+
$$
|
| 211 |
+
\begin{array}{l} D _ {t} (f | \mathcal {G} ^ {B}) \gets s n (p r (f) | \mathcal {G} ^ {B}) \\ - s n \left(\arg \min _ {f ^ {\prime} \in c h (p r (f))} r n \left(f ^ {\prime} \mid \mathcal {G} ^ {B}\right) \mid \mathcal {G} _ {B}\right) \\ + s n (f | \mathcal {G} ^ {B}) \\ \end{array}
|
| 212 |
+
$$
|
| 213 |
+
|
| 214 |
+
$$
|
| 215 |
+
D _ {t} (w | \mathcal {G} ^ {B}) \gets D _ {t} \Big (\arg \min _ {f \in p r (w)} r n (f | \mathcal {G} ^ {B}) | \mathcal {G} ^ {B} \Big)
|
| 216 |
+
$$
|
| 217 |
+
|
| 218 |
+
where the $ch$ and $pr$ functions denote the children and parent nodes; $sn$ tracks the differences for nodes, enabling efficient propagation of cost estimates throughout the search graph. These update rules ensure that cost information flows correctly between states (objects in our category) and the morphisms connecting them.
|
| 219 |
+
|
| 220 |
+
# 5.2.4 Forward expansion policy with single-step morphism
|
| 221 |
+
|
| 222 |
+
LLM-based Morphism Generation. In this work, we use LLMs to generate valid morphisms through two key functions:
|
| 223 |
+
|
| 224 |
+
$$
|
| 225 |
+
\begin{array}{l} \phi_ {f}: W \times W \to f = \operatorname {L L M} \left(w _ {1}, w _ {2}\right) \\ \phi_ {w}: W \times W \times f \rightarrow W = \operatorname {L L M} (w _ {1}, w _ {2}, f) \\ \end{array}
|
| 226 |
+
$$
|
| 227 |
+
|
| 228 |
+
The function $\phi_f$ generates candidate morphisms between states, while $\phi_w$ determines the resulting state after applying a morphism. These functions are implemented as structured prompts to the LLM that request specific outputs conforming to our categorical framework.
|
| 229 |
+
|
| 230 |
+
Merging via Pullbacks. Periodically, we attempt to connect the search graphs by finding states $w^{F} \in \mathcal{G}^{F}$ and $w^{B} \in \mathcal{G}^{B}$ with $D(w^{F}, w^{B}) < \epsilon$ that can be connected through category-theoretic pullback checks, where $\epsilon$ is a small value for threshold. When we find candidate states, we verify their compatibility using pullback checks and compose their respective plan fragments to obtain a complete sequence from $w_{0}$ to $w^{*}$ .
|
| 231 |
+
|
| 232 |
+
# 5.3 Pullback Checks for Plan Validity
|
| 233 |
+
|
| 234 |
+
Pullbacks ensure plan compositions respect all constraints by computing potential pullback states and verifying their validity. When a valid pullback exists, we compose partial plans while guaranteeing constraint satisfaction. The verification process for states $w_{1}$ and $w_{2}$ with morphisms to a common state $w_{c}$ works as follows:
|
| 235 |
+
|
| 236 |
+
1. Compute potential pullback state $w_{p} = (r_{p}, s_{p}, l_{p}, t_{p})$ where:
|
| 237 |
+
|
| 238 |
+
- $r_p$ satisfies resource constraints for both states
|
| 239 |
+
- $l_{p} = l_{1} \wedge l_{2}$ (logical AND of constraints)
|
| 240 |
+
- $t_p = t_1 \cap t_2$ (intersection of temporal intervals)
|
| 241 |
+
- $s_p$ is a valid symbolic state with transitions to both $s_1$ and $s_2$
|
| 242 |
+
|
| 243 |
+
2. Verify that $w_{p}$ is a valid state (satisfies all capacity constraints)
|
| 244 |
+
3. Confirm that morphisms $p_1: w_p \to w_1$ and $p_2: w_p \to w_2$ exist
|
| 245 |
+
|
| 246 |
+
# 5.4 Algorithm Summary
|
| 247 |
+
|
| 248 |
+
Algorithm 1 in Appendix E outlines our bidirectional search procedure. The algorithm initializes search graphs from initial and goal states, then iteratively selects and expands states from both frontiers. After each expansion, it attempts to connect the search graphs via pullback checks. When a valid connection is found, it composes the partial plans to form a complete solution.
|
| 249 |
+
|
| 250 |
+
We establish the computational efficiency of our bidirectional search approach:
|
| 251 |
+
|
| 252 |
+
Theorem 5.1 (Time Complexity). Given maximum path length $L$ , branching factor $b$ , and $n$ states, the bi-directional search algorithm has time complexity $O(b^{L/2})$ .
|
| 253 |
+
|
| 254 |
+
This represents a quadratic improvement in the exponent compared to unidirectional search $(O(b^{L}))$ , making our approach more efficient for practical applications.
|
| 255 |
+
|
| 256 |
+
# 6 Experiments
|
| 257 |
+
|
| 258 |
+
We evaluate our approach on three datasets with diverse planning characteristics: PLANBENCH (goal-oriented planning), RECIPENLG (resource and temporal constraints), and PROC2PDDL (formal planning with precondition/effect validation).
|
| 259 |
+
|
| 260 |
+
# 6.1 Datasets and Planning Scenarios
|
| 261 |
+
|
| 262 |
+
PlanBench. PlanBench² (Valmeekam et al., 2023) consists of 600 Blocksworld problems in PDDL format. Tasks involve transforming block configurations into goal states under logical constraints and cost minimization. We use a 50-50 train-test split.
|
| 263 |
+
|
| 264 |
+
RecipeNLG. RecipeNLG (Bien et al., 2020) contains cooking recipes with ingredient lists and step-by-step directions. We augment recipes with explicit resource limits (e.g., “ $\leq$ 1/2 cup sugar” for health-conscious modifications) and temporal intervals (e.g., “bake 20-25 minutes”) using GPT-4, testing quantitative resource and timing. We use an 80-20 train-test split.
|
| 265 |
+
|
| 266 |
+
Proc2PDDL. Proc2PDDL $^3$ (Zhang et al., 2024b) provides 95 procedural texts with expert-annotated PDDL domain files across 27 domains. We evaluate precondition/effect prediction and executable plan generation using a 50–50 split per domain.
|
| 267 |
+
|
| 268 |
+
# 6.2 Baselines and Comparative Methods
|
| 269 |
+
|
| 270 |
+
We compare against direct prompting, reasoning-augmented prompting, and search-augmented planners, all using GPT-4o unless otherwise noted:
|
| 271 |
+
|
| 272 |
+
GPT-4o (Direct Prompting). Prompted with raw task descriptions and request step-by-step plans, without additional reasoning instructions.
|
| 273 |
+
|
| 274 |
+
CoT-GPT4o (Chain-of-Thought). Prompted with chain-of-thought. Explicit reasoning over resources, temporal requirements, and dependencies before producing a plan.
|
| 275 |
+
|
| 276 |
+
Thoughts-of-Search (Katz et al., 2024) Structures LLM exploration as a guided search tree for improved reasoning depth.
|
| 277 |
+
|
| 278 |
+
ReAct(Yao et al., 2023b) Interleaves reasoning traces with environment interactions to refine planning decisions.
|
| 279 |
+
|
| 280 |
+
$\mathbf{LLM} + \mathbf{P}$ (Liu et al., 2023) Augments LLMs with symbolic planners for constraint-aware reasoning.
|
| 281 |
+
|
| 282 |
+
LLM-MCTS(Zhao et al., 2023) Monte Carlo Tree Search with 50 rollouts per problem, guided by LLM confidence scores.
|
| 283 |
+
|
| 284 |
+
Our approach combines LLM-based operator generation with category-theoretic verification and bidirectional search (details in Appendix C).
|
| 285 |
+
|
| 286 |
+
# 6.3 Evaluation Metrics
|
| 287 |
+
|
| 288 |
+
For PlenBench, we report: (1) Completion rate: Percentage of problems solved correctly; (2) Cost optimality: Percentage of solutions with minimal cost; For RecipeNLG: (3) BLEU Score; (4) Constraint violations: Percentage of solutions violating resource or (5) temporal constraints; For Proc2PDDL (6) Action-wise accuracy: Percentage of correctly predicted preconditions/effects; and (7) Problem-file solve rate: percentage of files executable in a PDDL solver.
|
| 289 |
+
|
| 290 |
+
# 6.4 Results
|
| 291 |
+
|
| 292 |
+
Table 1 summarizes performance across all datasets. Our approach consistently outperforms all baselines, achieving state-of-the-art results across Plan-Bench, RecipeNLG, and Proc2PDDL.
|
| 293 |
+
|
| 294 |
+
Table 1: Performance comparison across all datasets. Best results in bold, second best underlined.
|
| 295 |
+
|
| 296 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">PlanBench</td><td colspan="3">RecipeNLG</td><td colspan="2">Proc2PDDL</td></tr><tr><td>Comp%</td><td>Cost Opt%</td><td>BLEU</td><td>Res Viol%</td><td>Temp Viol%</td><td>Action Acc%</td><td>PF Solve%</td></tr><tr><td>GPT-4o</td><td>34.3</td><td>33.0</td><td>0.903</td><td>27.7</td><td>32.4</td><td>15.9</td><td>33.7</td></tr><tr><td>CoT-GPT4o</td><td>47.0</td><td>41.5</td><td>0.902</td><td>21.5</td><td>24.3</td><td>9.3</td><td>21.1</td></tr><tr><td>ToS</td><td>41.5</td><td>36.3</td><td>0.898</td><td>26.6</td><td>30.5</td><td>10.4</td><td>24.7</td></tr><tr><td>ReAct</td><td>63.0</td><td>56.8</td><td>0.915</td><td>19.4</td><td>22.9</td><td>34.6</td><td>43.7</td></tr><tr><td>LLM+P</td><td>90</td><td>83.3</td><td>0.888</td><td>3.4</td><td>5.7</td><td>72.0</td><td>79.2</td></tr><tr><td>LLM-MCTS</td><td>69.0</td><td>63.1</td><td>0.881</td><td>18.8</td><td>19.7</td><td>21.4</td><td>45.3</td></tr><tr><td>Ours</td><td>96.6</td><td>93.5</td><td>0.901</td><td>0</td><td>1.4</td><td>81.1</td><td>87.4</td></tr></table>
|
| 297 |
+
|
| 298 |
+
PlanBench Our method achieves the highest completion rate (96.6%) and cost optimality (93.5%), improving by 6.6% and 10.9% over the strongest LLM+P baseline; +27.6% and +30.4% over the LLM-MCTS. This demonstrates that category-theoretic verification effectively enforces logical dependencies (e.g., supporting block structures), preventing invalid moves that other LLM-based planners frequently make.
|
| 299 |
+
|
| 300 |
+
RecipeNLG All methods achieve comparable BLEU scores (0.881–0.915), suggesting similar textual quality. However, our method achieves near-perfect constraint satisfaction with $0\%$ resource violations and only $1.4\%$ temporal violations, far surpassing both LLM-MCTS $(18.8\%, 19.7\%)$ and LLM+P $(3.4\%, 5.7\%)$ . This improvement is most pronounced in recipes with complex resource tracking requirements, such as recipes using partial ingredients across multiple steps. For example, when handling recipes requiring resource splitting (e.g., using half of an ingredient in one step given the global resource constraint), our pullback-based verification preserved consistency that baselines failed to capture.
|
| 301 |
+
|
| 302 |
+
Proc2PDDL This dataset is the most challenging, requiring formal reasoning over preconditions and effects. Our method achieves the highest action accuracy (81.1%) and solver success rate (87.4%), outperforming LLM+P by +9.1% and +8.2% respectively. The improvement is particularly significant for multi-step procedures with long-range dependencies, where pullback verification successfully preserves logical consistency throughout the planning process, which will be shown in our ablation study.
|
| 303 |
+
|
| 304 |
+
# 6.5 Ablation Studies
|
| 305 |
+
|
| 306 |
+
Reasoning vs. non-reasoning Table 2 shows the influence of LLM backbone type and scale. Reasoning vs. non-reasoning. Reasoning-
|
| 307 |
+
|
| 308 |
+
Table 2: Performance comparison across difference LLM backbones.
|
| 309 |
+
|
| 310 |
+
<table><tr><td rowspan="2">Base LLM</td><td colspan="2">PlanBench</td></tr><tr><td>Comp%</td><td>Cost Opt%</td></tr><tr><td>GPT-4o</td><td>96.6</td><td>93.5</td></tr><tr><td>o4-mini</td><td>98.8</td><td>93.7</td></tr><tr><td>Claude-3.5</td><td>94.3</td><td>91.0</td></tr><tr><td>LLaMA-3-70B</td><td>92.4</td><td>85.1</td></tr><tr><td>LLaMA-3-13B</td><td>91.0</td><td>83.3</td></tr><tr><td>LLaMA-3-8B</td><td>72.7</td><td>59.4</td></tr><tr><td>DeepSee-R1-Distill-Qwen-14B</td><td>94.9</td><td>88.2</td></tr><tr><td>Qwen3-14B</td><td>93.6</td><td>87.1</td></tr></table>
|
| 311 |
+
|
| 312 |
+
augmented models (o4-mini, Claude-3.5, Qwen3-14B, DeepSeek-R1) achieve higher raw performance than non-reasoning models (GPT-4o, LLaMA). Our categorical verification, however, boosts both categories: for reasoning models, it enforces stricter constraint validity (e.g., o4-mini improves to $98.8\%$ completion, $93.7\%$ cost optimality); for non-reasoning models, it compensates for weaker reasoning depth, lifting LLaMA-3-13B to 91.0/83.3, rivaling much larger models.
|
| 313 |
+
|
| 314 |
+
Scaling effect Larger backbones generally yield better results (LLaMA-3-70B at $92.4\%$ vs. LLaMA-3-8B at $72.7\%$ ), but our framework narrows the scale gap: Qwen3-14B $(93.6\%)$ and DeepSeek-R1 $(94.9\%)$ approach or surpass the performance of GPT-4o and LLaMA-3-70B despite being smaller. This shows that verification amplifies the planning ability of mid-scale reasoning models, making them competitive with much larger non-reasoning backbones.
|
| 315 |
+
|
| 316 |
+
Distance functions Table 3 highlights the role of the planning distance $D$ . Bidirectional search with a learned $D$ achieves the best performance across all datasets, reducing constraint violations on RecipeNLG and boosting action accuracy on Proc2PDDL. However, even a raw metric $D$ (cosine or $L_{2}$ ) performs well, showing that training $D$ improves efficiency but is not essential for correct-
|
| 317 |
+
|
| 318 |
+
Table 3: Impact of difference distance function. all using LLaMA-3-13B unless otherwise noted.
|
| 319 |
+
|
| 320 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">PlanBench</td><td colspan="2">RecipeNLG</td><td colspan="2">Proc2PDDL</td></tr><tr><td>Comp%</td><td>Cost Opt%</td><td>Res Viol%</td><td>Temp Viol%</td><td>Action Acc%</td><td>PF Solve%</td></tr><tr><td>MCTS + raw D</td><td>40.7</td><td>34.7</td><td>1.9</td><td>18.1</td><td>10.4</td><td>20.1</td></tr><tr><td>MCTS + learned D</td><td>61.2</td><td>57.3</td><td>15.6</td><td>16.3</td><td>16.2</td><td>31.7</td></tr><tr><td>Bidirectional + raw D</td><td>78.3</td><td>75.0</td><td>14.5</td><td>7.3</td><td>51.4</td><td>64.6</td></tr><tr><td>Bidirectional + learned D</td><td>91.0</td><td>83.3</td><td>4.2</td><td>3.8</td><td>57.9</td><td>71.6</td></tr></table>
|
| 321 |
+
|
| 322 |
+
Table 4: Impact of verification on PlanBench.
|
| 323 |
+
|
| 324 |
+
<table><tr><td>Variant</td><td>Comp (%)</td><td>Cost Opt (%)</td></tr><tr><td>With verification</td><td>96.6</td><td>93.5</td></tr><tr><td>Without verification</td><td>59.3</td><td>47.4</td></tr><tr><td>Absolute Difference</td><td>37.3</td><td>46.1</td></tr></table>
|
| 325 |
+
|
| 326 |
+
Table 5: Search strategy comparison on PlanBench for different Plan Length. (P.L.)
|
| 327 |
+
|
| 328 |
+
<table><tr><td>Search Strategy</td><td>Simple (<5 P.L.)</td><td>Complex (>5 P.L.)</td></tr><tr><td>Bidirectional</td><td>98.1%</td><td>84.5%</td></tr><tr><td>LLM-MCTS</td><td>88.3%</td><td>42.8%</td></tr><tr><td>GPT-4</td><td>65.2%</td><td>18.7%</td></tr></table>
|
| 329 |
+
|
| 330 |
+
ness verification guarantees validity regardless of distance quality.
|
| 331 |
+
|
| 332 |
+
Impact of verification. Table 4 shows that removing categorical verification reduces completion rates by $37.3\%$ and cost optimality by $46.1\%$ on PlanBench. The verification component ensures physical constraints in block stacking are maintained, preventing invalid moves such as removing blocks that support other blocks. Without verification, the planner generates invalid plans.
|
| 333 |
+
|
| 334 |
+
Search strategy comparison. Table 5 demonstrates the advantage of bidirectional search over alternatives, particularly as problem complexity increases. For complex problems with plan lengths exceeding 5 steps, bidirectional search achieves $84.5\%$ completion, substantially outperforming LLM-MCTS $(42.8\%)$ and LLM-only approaches $(18.7\%)$ . This performance gap widens exponentially with plan length. At 8-step plans, the completion rate difference between bidirectional search and LLM-MCTS increases to 38.9 percentage points. The deterioration in performance for non-bidirectional approaches occurs primarily at decision points requiring long-horizon planning. This confirms our theoretical complexity reduction from $O(b^{L})$ to $O(b^{L / 2})$ translates to practical performance gains on complex planning tasks.
|
| 335 |
+
|
| 336 |
+
These results demonstrate that both category-theoretic verification and bidirectional search contribute significantly to performance. Verification ensures plan validity while bidirectional search enables efficient exploration.
|
| 337 |
+
|
| 338 |
+
# 7 Conclusion
|
| 339 |
+
|
| 340 |
+
We introduced a Neural-Symbolic Task Planning framework integrating LLM-based decomposition with category-theoretic verification for resource-aware planning. By modeling states as categorical objects and operations as morphisms, our approach ensures constraint satisfaction through pullbacks while using bidirectional search for computational efficiency. Experiments across three domains demonstrate significant improvements over existing methods for completion rate and violation reduction. Our results establish category-theoretic verification as a promising approach for neural-symbolic planning in resource-constrained tasks.
|
| 341 |
+
|
| 342 |
+
# 7.1 Limitations
|
| 343 |
+
|
| 344 |
+
Our approach faces challenges with complex temporal dependencies, computational overhead for complex tasks with large state spaces despite the $O(b^{L / 2})$ complexity reduction, and degraded performance when domain knowledge is missing from the LLM's pre-training. Nevertheless, our experiments confirm that neural-symbolic integration substantially improves constraint satisfaction while maintaining natural language flexibility.
|
| 345 |
+
|
| 346 |
+
# Acknowledgments
|
| 347 |
+
|
| 348 |
+
This research is partially supported by Stanford's Center for Sustainable Development and Global Competitiveness (SDGC) and the Yonghua Foundation. The authors would like to thank Dr. Spencer Breiner and Dr. Ram Sriram of the US National Institute of Standards and Technology and Dr. Eswaran Subrahmanian of Carnegie Mellon University for their helpful comments and suggestions.
|
| 349 |
+
|
| 350 |
+
# References
|
| 351 |
+
|
| 352 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 353 |
+
John C Baez and Blake S Pollard. 2017. A compositional framework for reaction networks. *Reviews in Mathematical Physics*, 29(09):1750028.
|
| 354 |
+
Michal Bien, Michal Gilski, Martyna Maciejewska, Wojciech Taisner, Dawid Wisniewski, and Agnieszka Lawrynowicz. 2020. Recipenlg: A cooking recipes dataset for semi-structured text generation. In Proceedings of the 13th International Conference on Natural Language Generation, pages 22-28.
|
| 355 |
+
Kevin Chen, Marco Cusumano-Towner, Brody Huval, Aleksei Petrenko, Jackson Hamburger, Vladlen Koltun, and Philipp Krahenbuhl. 2025. Reinforcement learning for long-horizon interactive llm agents. arXiv preprint arXiv:2502.01600.
|
| 356 |
+
Gautier Dagan, Frank Keller, and Alex Lascarides. 2023. Dynamic planning with a llm. arXiv preprint arXiv:2308.06391.
|
| 357 |
+
Murtaza Dalal, Tarun Chiruvolu, Devendra Chaplot, and Ruslan Salakhutdinov. 2024. Plan-seq-learn: Language model guided rl for solving long horizon robotics tasks. arXiv preprint arXiv:2405.01534.
|
| 358 |
+
Lauren Nicole DeLong, Ramon Fernández Mir, and Jacques D Fleuriot. 2024. Neurosymbolic ai for reasoning over knowledge graphs: A survey. IEEE Transactions on Neural Networks and Learning Systems.
|
| 359 |
+
Kevin Ellis, Catherine Wong, Maxwell Nye, Mathias Sable-Meyer, Lucas Morales, Luke Hewitt, Luc Cary, Armando Solar-Lezama, and Joshua B Tenenbaum. 2021. Dreamcoder: Bootstrapping inductive program synthesis with wake-sleep library learning. In Proceedings of the 42nd acm sigplan international conference on programming language design and implementation, pages 835-850.
|
| 360 |
+
Elliot Gestrin, Marco Kuhlmann, and Jendrik Seipp. 2024. Nl2plan: Robust llm-driven planning from minimal text descriptions. arXiv preprint arXiv:2405.04215.
|
| 361 |
+
Malik Ghallah, Dana S. Nau, and Paolo Traverso. 2004. Automated Planning: Theory and Practice. Elsevier.
|
| 362 |
+
Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
|
| 363 |
+
Jiawei Gu, Xuhui Jiang, Zhichao Shi, Hexiang Tan, Xuehao Zhai, Chengjin Xu, Wei Li, Yinghan Shen, Shengjie Ma, Honghao Liu, et al. 2024. A survey on llm-as-a-judge. arXiv preprint arXiv:2411.15594.
|
| 364 |
+
|
| 365 |
+
Yilun Hao, Yang Zhang, and Chuchu Fan. 2024. Planning anything with rigor: General-purpose zero-shot planning with llm-based formalized programming. arXiv preprint arXiv:2410.12112.
|
| 366 |
+
Alex Havrilla, Yuqing Du, Sharath Chandra Raparthy, Christoforos Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, Sainbayar Sukhbaatar, and Roberta Raileanu. 2024. Teaching large language models to reason with reinforcement learning. arXiv preprint arXiv:2403.04642.
|
| 367 |
+
Malte Helmert. 2006. The fast downward planning system. Journal of Artificial Intelligence Research, 26:191-246.
|
| 368 |
+
Daniel Höller, Gregor Behnke, Pascal Bercher, Susanne Biundo, Humbert Fiorino, Damien Pellier, and Ron Alford. 2020. Hddl: An extension to pddl for expressing hierarchical planning problems. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 9883-9891.
|
| 369 |
+
Wenlong Huang, Pieter Abbeel, Deepak Pathak, and Igor Mordatch. 2022. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. In International conference on machine learning, pages 9118-9147. PMLR.
|
| 370 |
+
Drew Hudson and Christopher D Manning. 2019. Learning by abstraction: The neural state machine. Advances in neural information processing systems, 32.
|
| 371 |
+
León Illanes, Xi Yan, Rodrigo Toro Icarte, and Sheila A McIlraith. 2020. Symbolic plans as high-level instructions for reinforcement learning. In Proceedings of the international conference on automated planning and scheduling, volume 30, pages 540-550.
|
| 372 |
+
Jeremy Jacob. 1990. Categorising non-interference. In [1990] Proceedings. The Computer Security Foundations Workshop III, pages 44-50. IEEE.
|
| 373 |
+
Xue Jiang, Yihong Dong, Lecheng Wang, Zheng Fang, Qiwei Shang, Ge Li, Zhi Jin, and Wenpin Jiao. 2024. Self-planning code generation with large language models. ACM Transactions on Software Engineering and Methodology, 33(7):1-30.
|
| 374 |
+
Yu-qian Jiang, Shi-qi Zhang, Piyush Khandelwal, and Peter Stone. 2019. Task planning in robotics: an empirical comparison of pddl-and asp-based systems. Frontiers of Information Technology & Electronic Engineering, 20:363-373.
|
| 375 |
+
Subbarao Kambhampati, Karthik Valmeekam, Lin Guan, Mudit Verma, Kaya Stechly, Siddhant Bhambri, Lucas Paul Saldyt, and Anil B Murthy. 2024. Position: Llms can't plan, but can help planning in llm-modulo frameworks. In *Forty-first International Conference on Machine Learning*.
|
| 376 |
+
Michael Katz, Harsha Kokel, Kavitha Srinivas, and Shirin Sohrabi Araghi. 2024. Thought of search: Planning with language models through the lens of efficiency. Advances in Neural Information Processing Systems, 37:138491-138568.
|
| 377 |
+
|
| 378 |
+
Bo Liu, Yuqian Jiang, Xiaohan Zhang, Qiang Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. 2023. Llm+ p: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477.
|
| 379 |
+
Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B Tenenbaum, and Jiajun Wu. 2019. The neurosymbolic concept learner: Interpreting scenes, words, and sentences from natural supervision. arXiv preprint arXiv:1904.12584.
|
| 380 |
+
Vishal Pallagani, Bharath Muppasani, Keerthiram Murugesan, Francesca Rossi, Lior Horesh, Biplav Srivastava, Francesco Fabiano, and Andrea Loreggia. 2022. Plansformer: Generating symbolic plans using transformers. arXiv preprint arXiv:2212.08681.
|
| 381 |
+
Benjamin C Pierce. 1991. Basic category theory for computer scientists. MIT press.
|
| 382 |
+
Silvia Richter and Matthias Westphal. 2010. The lama planner: Guiding cost-based anytime planning with landmarks. Journal of Artificial Intelligence Research, 39:127-177.
|
| 383 |
+
David E Rydeheard and Rod M Burstall. 1988. Computational category theory, volume 152. Prentice Hall Englewood Cliffs.
|
| 384 |
+
Dhruv Shah, Michael Robert Equi, Błajej Osiński, Fei Xia, Brian Ichter, and Sergey Levine. 2023. Navigation with large language models: Semantic guesswork as a heuristic for planning. In Conference on Robot Learning, pages 2683-2699. PMLR.
|
| 385 |
+
Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. 2023. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, 36:8634-8652.
|
| 386 |
+
Pavel Smirnov, Frank Joublin, Antonello Ceravola, and Michael Gienger. 2024. Generating consistent pddl domains with large language models. arXiv preprint arXiv:2404.07751.
|
| 387 |
+
Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su. 2023. Llm-planner: Few-shot grounded planning for embodied agents with large language models. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2998-3009.
|
| 388 |
+
Katharina Stein, Daniel Fiser, Jörg Hoffmann, and Alexander Koller. 2023. Autoplanbench: Automatically generating benchmarks for llm planners from pddl. arXiv preprint arXiv:2311.09830.
|
| 389 |
+
Gaurav Suri, Lily R Slater, Ali Ziaee, and Morgan Nguyen. 2024. Do large language models show decision heuristics similar to humans? a case study using gpt-3.5. Journal of Experimental Psychology: General, 153(4):1066.
|
| 390 |
+
|
| 391 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
|
| 392 |
+
Karthik Valmeekam, Matthew Marquez, Sarath Sreedharan, and Subbarao Kambhampati. 2023. On the planning abilities of large language models-a critical investigation. Advances in Neural Information Processing Systems, 36:75993-76005.
|
| 393 |
+
Karthik Valmeekam, Alberto Olmo, Sarath Sreedharan, and Subbarao Kambhampati. 2022. Large language models still can't plan (a benchmark for llms on planning and reasoning about change). In NeurIPS 2022 Foundation Models for Decision Making Workshop.
|
| 394 |
+
Karthik Valmeekam, Kaya Stechly, and Subbarao Kambhampati. 2024. Llms still can't plan; can lrms? a preliminary evaluation of openai's o1 on planbench. arXiv preprint arXiv:2409.13373.
|
| 395 |
+
Robert Frank Carslaw Walters and Richard F Walters. 1991. Categories and computer science. Cambridge University Press.
|
| 396 |
+
Kevin Wang, Junbo Li, Neel P Bhatt, Yihan Xi, Qiang Liu, Ufuk Topcu, and Zhangyang Wang. 2024. On the planning abilities of openai's o1 models: Feasibility, optimality, and generalizability. arXiv preprint arXiv:2409.19924.
|
| 397 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.
|
| 398 |
+
Zirui Wu, Xiao Liu, Jiayi Li, Lingpeng Kong, and Yansong Feng. 2025. Haste makes waste: Evaluating planning abilities of llms for efficient and feasible multitasking with time constraints between actions. arXiv preprint arXiv:2503.02238.
|
| 399 |
+
Shufang Xie, Rui Yan, Peng Han, Yingce Xia, Lijun Wu, Chenjuan Guo, Bin Yang, and Tao Qin. 2022. Retrograph: Retrosynthetic planning with graph search. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pages 2120-2129.
|
| 400 |
+
Binfeng Xu, Zhiyuan Peng, Bowen Lei, Subhabrata Mukherjee, Yuchen Liu, and Dongkuan Xu. 2023. Rewoo: Decoupling reasoning from observations for efficient augmented language models. arXiv preprint arXiv:2305.18323.
|
| 401 |
+
Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. 2023a. Tree of thoughts: Deliberate problem solving with large language models. Advances in neural information processing systems, 36:11809-11822.
|
| 402 |
+
|
| 403 |
+
Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023b. React: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR).
|
| 404 |
+
Kevin Yu, Jihye Roh, Ziang Li, Wenhao Gao, Runzhong Wang, and Connor Coley. 2024. Double-ended synthesis planning with goal-constrained bidirectional search. Advances in Neural Information Processing Systems, 37:112919-112949.
|
| 405 |
+
Zhen Zeng, William Watson, Nicole Cho, Saba Rahimi, Shayleen Reynolds, Tucker Balch, and Manuela Veloso. 2023. Flowmind: automatic workflow generation with llms. In Proceedings of the Fourth ACM International Conference on AI in Finance, pages 73-81.
|
| 406 |
+
Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. 2024a. Rest-mcts*: LIm self-training via process reward guided tree search. Advances in Neural Information Processing Systems, 37:64735-64772.
|
| 407 |
+
Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B Tenenbaum, and Chuang Gan. 2023. Planning with large language models for code generation. arXiv preprint arXiv:2303.05510.
|
| 408 |
+
Tianyi Zhang, Li Zhang, Zhaoyi Hou, Ziyu Wang, Yuling Gu, Peter Clark, Chris Callison-Burch, and Niket Tandon. 2024b. Proc2pDDL: Open-domain planning representations from texts. arXiv preprint arXiv:2403.00092.
|
| 409 |
+
Zirui Zhao, Wee Sun Lee, and David Hsu. 2023. Large language models as commonsense knowledge for large-scale task planning. Advances in Neural Information Processing Systems, 36:31967-31987.
|
| 410 |
+
Chujie Zheng, Zhenru Zhang, Beichen Zhang, Runji Lin, Keming Lu, Bowen Yu, Dayiheng Liu, Jingren Zhou, and Junyang Lin. 2024. Processbench: Identifying process errors in mathematical reasoning. arXiv preprint arXiv:2412.06559.
|
| 411 |
+
|
| 412 |
+
# A Formal Problem Statement
|
| 413 |
+
|
| 414 |
+
Here, we show the formal problem statement.
|
| 415 |
+
|
| 416 |
+
# A.1 Category-Theoretic Planning Framework
|
| 417 |
+
|
| 418 |
+
We formalize planning as a category-theoretic problem where states are objects and operations are morphisms. Each state captures resource usage, active constraints, symbolic progress, and temporal allocations. The morphisms represent valid transitions/operations that preserve these properties through constraint verification.
|
| 419 |
+
|
| 420 |
+
Definition A.1 (Planning Domain). A planning domain consists of:
|
| 421 |
+
|
| 422 |
+
- A set of resource types $I$ , where each type $i \in I$ has an associated ordered monoid $(R_i, +i, \leq_i, 0_i)$ and capacity bound $ri$ , max
|
| 423 |
+
- A set of symbolic states $S$ with a directed graph $G_{S} = (S, E_{S})$ of valid transitions
|
| 424 |
+
- A set of logical constraints $\mathcal{L}$ expressed as predicates over resources, states, and temporal properties
|
| 425 |
+
- A temporal framework $\mathcal{T}$ for representing time intervals and precedence relations
|
| 426 |
+
|
| 427 |
+
Definition A.2 (Planning Category). Let $\mathcal{T}$ be a category whose objects are hybrid task states $w = (r, s, l, t)$ where:
|
| 428 |
+
|
| 429 |
+
- $r \in R = \prod_{i \in I} R_i$ represents resource configuration, with each component $r[i] \leq r_{i,\max}$ respecting capacity bounds
|
| 430 |
+
- $s \in S$ is a discrete symbolic state from the state transition graph $G_{S}$ .
|
| 431 |
+
- $l \in L = 0, 1^k$ is a boolean vector encoding $k$ logical constraints, where $l[j] = 1$ indicates constraint $j$ is satisfied
|
| 432 |
+
- $t \in T \subseteq \mathbb{R}^+ \times \mathbb{R}^+ \times \mathcal{P}(I)$ represents temporal intervals $[t_{start}, t_{end}]$ and scheduling constraints over a set of interval relations $I$
|
| 433 |
+
|
| 434 |
+
Definition A.3 (Morphism). A morphism $f: w_1 \to w_2$ in $\mathcal{T}$ transforms state $w_1 = (r_1, s_1, l_1, t_1)$ to state $w_2 = (r_2, s_2, l_2, t_2)$ while preserving categorical constraints. The transformation is characterized by component functions $(f_r, f_s, f_l, f_t)$ that may depend on all aspects of the input state, ensuring:
|
| 435 |
+
|
| 436 |
+
- Resource validity: $r_2 = f_r(r_1, s_1, l_1, t_1)$ where $r_2[i] \leq r_{i,\max}$ for all resource types $i$ . Resource transformations respect the properties of the underlying ordered monoids.
|
| 437 |
+
- State transitions: $s_2 = f_s(r_1, s_1, l_1, t_1)$ such that $(s_1, s_2) \in E_S$ is an edge in the state transition graph, and all preconditions for the transition are satisfied.
|
| 438 |
+
- Constraint satisfaction: $l_{2} = f_{l}(r_{1}, s_{1}, l_{1}, t_{1})$ where:
|
| 439 |
+
|
| 440 |
+
- Invariant constraints remain satisfied: if $l_{1}[j]$ is an invariant and $l_{1}[j] = 1$ then $l_{2}[j] = 1$
|
| 441 |
+
- Postcondition constraints may be established: $l_{2}$ may have additional satisfied constraints
|
| 442 |
+
- Precondition constraints are checked before applying the morphism
|
| 443 |
+
|
| 444 |
+
- Temporal consistency: $t_2 = f_t(r_1, s_1, l_1, t_1)$ preserves precedence relations and ensures non-overlapping intervals for mutually exclusive operations.
|
| 445 |
+
|
| 446 |
+
Each morphism has an associated probability $p(f) \in [0,1]$ reflecting its empirical success rate. Morphism composition $g \circ f$ is valid if and only if all component functions compose and preserve the above constraints.
|
| 447 |
+
|
| 448 |
+
As shown in the methodology, in our neural-symbolic framework, morphism are generated by an LLM conditioned on the current state. The detail will be explained in the next section.
|
| 449 |
+
|
| 450 |
+
The power of our framework comes from compositional verification using categorical pullbacks:
|
| 451 |
+
|
| 452 |
+
Definition A.4 (Pullback). Given morphisms $f: A \to C$ and $g: B \to C$ in category $\mathcal{T}$ , a pullback consists of:
|
| 453 |
+
|
| 454 |
+
- An object $P$ (the pullback object)
|
| 455 |
+
- Morphisms $p_1: P \to A$ and $p_2: P \to B$
|
| 456 |
+
|
| 457 |
+
such that $f \circ p_1 = g \circ p_2$ (i.e., both paths from $P$ to $C$ yield the same result), forming a commutative square. Furthermore, for any other object $Q$ with morphisms $q_1: Q \to A$ and $q_2: Q \to B$ satisfying $f \circ q_1 = g \circ q_2$ , there exists a unique morphism $u: Q \to P$ such that $p_1 \circ u = q_1$ and $p_2 \circ u = q_2$ (i.e., the morphism $u$ preserves all path relationships).
|
| 458 |
+
|
| 459 |
+
Lemma A.5 (Pullback Structure). Given morphisms $f: A \to C$ and $g: B \to C$ in category $\mathcal{T}$ , if a pullback exists, it is an object $P$ with morphisms $p_1: P \to A$ and $p_2: P \to B$ such that:
|
| 460 |
+
|
| 461 |
+
- $P = (r_P, s_P, l_P, t_P)$ where:
|
| 462 |
+
- $r_P$ satisfies $p_{1r}(r_P) = r_A$ and $p_{2r}(r_P) = r_B$
|
| 463 |
+
- $s_P$ is a symbolic state with valid transitions to both $s_A$ and $s_B$
|
| 464 |
+
- $l_{P}^{inv}$ preserves all invariant constraints from both $l_{A}^{inv}$ and $l_{B}^{inv}$
|
| 465 |
+
- $t_P$ is a valid refinement of both $t_A$ and $t_B$
|
| 466 |
+
|
| 467 |
+
- The diagram commutes: $f \circ p_1 = g \circ p_2$
|
| 468 |
+
- For any object $Q$ with morphisms $q_{1}: Q \to A$ and $q_{2}: Q \to B$ satisfying $f \circ q_{1} = g \circ q_{2}$ , there exists a unique morphism $u: Q \to P$ such that $p_{1} \circ u = q_{1}$ and $p_{2} \circ u = q_{2}$
|
| 469 |
+
|
| 470 |
+
Theorem A.6 (Plan Compatibility Characterization). Given morphisms $f: A \to C$ and $g: B \to C$ in category $\mathcal{T}$ :
|
| 471 |
+
|
| 472 |
+
1. If a pullback of $f$ and $g$ exists, then the plans represented by $f$ and $g$ are compatible, meaning:
|
| 473 |
+
|
| 474 |
+
- Resource usage from both plans can be combined without exceeding capacity limits
|
| 475 |
+
- All invariant logical constraints from both plans can be simultaneously satisfied
|
| 476 |
+
- Time intervals from both plans can be merged without violating precedence constraints
|
| 477 |
+
|
| 478 |
+
2. Conversely, if no pullback exists, then the plans are incompatible with respect to at least one of these constraint types.
|
| 479 |
+
|
| 480 |
+
Constructively, given states $w_{A}$ and $w_{B}$ with morphisms to common state $w_{C}$ , the pullback state $w_{P} = (r_{P}, s_{P}, l_{P}, t_{P})$ can be computed as:
|
| 481 |
+
|
| 482 |
+
- Resources: For each resource type $i$ , $r_P[i]$ is a minimum valid configuration that maps to both $r_A[i]$ and $r_B[i]$ through the respective morphisms
|
| 483 |
+
- Logical state: $l_{P}[j] = l_{A}[j] \wedge l_{B}[j]$ for invariant constraints (logical AND)
|
| 484 |
+
- Temporal windows: $t_P = t_A \cap t_B$ (interval intersection) when non-empty
|
| 485 |
+
- Symbolic state: A valid state $s_P$ with transitions to both $s_A$ and $s_B$ in the state graph $G_S$ .
|
| 486 |
+
|
| 487 |
+
Definition A.7 (Planning Problem). Given:
|
| 488 |
+
|
| 489 |
+
- Initial state $w_{0} = (r_{0}, s_{0}, l_{0}, t_{0})$ with available resources and constraints
|
| 490 |
+
- Goal specification $w^{*} = (r^{*}, s^{*}, l^{*}, t^{*})$ defining desired properties
|
| 491 |
+
|
| 492 |
+
Find a sequence of morphisms in $\mathcal{T}$ for a planning category:
|
| 493 |
+
|
| 494 |
+
$$
|
| 495 |
+
w _ {0} \xrightarrow {f _ {1}} w _ {1} \xrightarrow {f _ {2}} \dots \xrightarrow {f _ {n - 1}} w _ {n - 1} \xrightarrow {f _ {n}} w _ {n}
|
| 496 |
+
$$
|
| 497 |
+
|
| 498 |
+
such that each intermediate state $w_{i}$ remains valid under the categorical constraints, and $w_{n}$ satisfies or exceeds the criteria in $w^{*}$ .
|
| 499 |
+
|
| 500 |
+
While LLMs can generate candidate morphisms, they may produce invalid or inconsistent operations. Our framework addresses this by integrating LLM-based generation with category-theoretic verification. For single operations (unary morphisms), we directly verify constraint satisfaction. For combining plan fragments (binary morphisms), we use pullbacks to ensure compositional validity.
|
| 501 |
+
|
| 502 |
+
# B Proof
|
| 503 |
+
|
| 504 |
+
# B.1 Plan Composition
|
| 505 |
+
|
| 506 |
+
Proof. Let $f: A \to C$ and $g: B \to C$ be morphisms in our planning category $\mathcal{T}$ , where states are represented as $w = (r, s, l, t)$ . Let the pullback object be $P$ with projections $p_1: P \to A$ and $p_2: P \to B$ such that $f \circ p_1 = g \circ p_2$ . We prove each guarantee in turn:
|
| 507 |
+
|
| 508 |
+
1. Resource Compatibility: By definition, the resource component of our states is represented as a vector $r \in R \subseteq \mathbb{R}^n$ subject to capacity constraints. For valid morphisms $f$ and $g$ , we have:
|
| 509 |
+
|
| 510 |
+
$$
|
| 511 |
+
f _ {r} (r _ {A}) = r _ {C} \mathrm {a n d} g _ {r} (r _ {B})
|
| 512 |
+
$$
|
| 513 |
+
|
| 514 |
+
Let the resource component at the pullback be $r_P$ . By the universal property of pullbacks, $r_P$ must map to both $r_A$ and $r_B$ through $p_{1r}$ and $p_{2r}$ respectively:
|
| 515 |
+
|
| 516 |
+
$$
|
| 517 |
+
p _ {1 r} (r _ {P}) = r _ {A} \text {a n d} p _ {2 r} (r _ {P}) = r _ {B}
|
| 518 |
+
$$
|
| 519 |
+
|
| 520 |
+
For these mappings to exist, $r_P$ must satisfy the resource constraints of both plans. Since resource transformations in our category are monotonic (resources can only be consumed, not created), $r_P$ must contain at least the maximum resource requirements of both plans. Formally:
|
| 521 |
+
|
| 522 |
+
$$
|
| 523 |
+
r _ {P} [ i ] \geq \max \left(r _ {A} [ i ], r _ {B} [ i ]\right) \text {f o r e a c h r e s o u r c e d i m e n s i o n}
|
| 524 |
+
$$
|
| 525 |
+
|
| 526 |
+
Given that $f$ and $g$ are valid morphisms respecting capacity constraints, we know:
|
| 527 |
+
|
| 528 |
+
$$
|
| 529 |
+
r _ {A} [ i ] \leq r _ {\max } [ i ] \text {a n d} r _ {B} [ i ] \leq r _ {\max } [ i ]
|
| 530 |
+
$$
|
| 531 |
+
|
| 532 |
+
Therefore:
|
| 533 |
+
|
| 534 |
+
$$
|
| 535 |
+
r _ {P} [ i ] \leq r _ {\max } [ i ] \text {f o r a l l} i
|
| 536 |
+
$$
|
| 537 |
+
|
| 538 |
+
Thus, the combined resource usage at $P$ remains within capacity constraints.
|
| 539 |
+
|
| 540 |
+
2. Logical Consistency: Let the logical constraint vectors be $l_{A}, l_{B}$ , and $l_{P}$ for states $A, B$ , and $P$ respectively. Valid morphisms in our category must preserve satisfied constraints monotonically, meaning:
|
| 541 |
+
|
| 542 |
+
$$
|
| 543 |
+
\text {I f} l _ {A} [ j ] = 1, \text {t h e n} l _ {C} [ j ] = 1
|
| 544 |
+
$$
|
| 545 |
+
|
| 546 |
+
$$
|
| 547 |
+
\text {I f} l _ {B} [ j ] = 1, \text {t h e n} l _ {C} [ j ] = 1
|
| 548 |
+
$$
|
| 549 |
+
|
| 550 |
+
For the pullback object $P$ , the logical constraints must be consistent with both $A$ and $B$ . Since constraint satisfaction is preserved by morphisms, $l_{P}$ must satisfy:
|
| 551 |
+
|
| 552 |
+
$$
|
| 553 |
+
\text {I f} l _ {P} [ j ] = 1, \text {t h e n} l _ {A} [ j ] = 1 \text {a n d} l _ {B} [ j ] = 1
|
| 554 |
+
$$
|
| 555 |
+
|
| 556 |
+
Conversely, if a constraint is satisfied in both $A$ and $B$ , it must be satisfied in $P$ :
|
| 557 |
+
|
| 558 |
+
$$
|
| 559 |
+
\text {I f} l _ {A} [ j ] = 1 \text {a n d} l _ {B} [ j ] = 1, \text {t h e n} l _ {P} [ j ] = 1
|
| 560 |
+
$$
|
| 561 |
+
|
| 562 |
+
This construction ensures that $l_{P}$ preserves all constraints satisfied in both $A$ and $B$ , while not introducing any new constraints that would create inconsistencies when mapped to either $A$ or $B$ .
|
| 563 |
+
|
| 564 |
+
3. Temporal Coherence: For the temporal component, let $t_A = [t_A.start, t_A.end]$ , $t_B = [t_B.start, t_B.end]$ , and $t_P = [t_P.start, t_P.end]$ represent the time intervals for states $A$ , $B$ , and $P$ respectively. Valid morphisms in our category must preserve temporal ordering and non-overlapping constraints. For the pullback to exist, the time intervals must be compatible, meaning there exists a valid time interval $t_P$ that can be mapped to both $t_A$ and $t_B$ while preserving ordering constraints. The most general such interval is the intersection:
|
| 565 |
+
|
| 566 |
+
$$
|
| 567 |
+
t _ {P}. \text {s t a r t} = \max \left(t _ {A}. \text {s t a r t}, t _ {B}. \text {s t a r t}\right)
|
| 568 |
+
$$
|
| 569 |
+
|
| 570 |
+
$$
|
| 571 |
+
t _ {P}. e n d = \min \left(t _ {A}. e n d, t _ {B}. e n d\right)
|
| 572 |
+
$$
|
| 573 |
+
|
| 574 |
+
For this interval to be valid, we must have $t_P.start \leq t_P.end$ , which is guaranteed when $t_A$ and $t_B$ have a non-empty intersection. When no such intersection exists, the pullback does not exist, correctly indicating that the plans cannot be composed with respect to their temporal constraints. For the precedence relations in $\mathcal{P}(\mathcal{I})$ , the pullback preserves all shared precedence constraints between $t_A$ and $t_B$ . Any precedence relation satisfied in both partial plans will be preserved in the pullback. Thus, when a pullback exists, the time intervals from both plans can be merged without temporal conflicts. Therefore, the existence of a pullback $P$ for morphisms $f: A \to C$ and $g: B \to C$ guarantees resource compatibility, logical consistency, and temporal coherence of the composed plan.
|
| 575 |
+
|
| 576 |
+
# B.2 Reachability
|
| 577 |
+
|
| 578 |
+
Proof of Theorem 4.1 ( $\epsilon$ -Reachability). Let $w_{1} = (r_{1}, s_{1}, l_{1}, t_{1})$ and $w_{2} = (r_{2}, s_{2}, l_{2}, t_{2})$ be states in $W$ with $D(w_{1}, w_{2}) < \epsilon$ , where $\epsilon$ is sufficiently small. We show the existence of a sequence of valid morphisms $f_{1}, f_{2}, \ldots, f_{k}$ such that $f_{k} \circ \dots \circ f_{1}(w_{1}) = w_{2}$ where $k \leq \lceil 1 / \epsilon \rceil$ .
|
| 579 |
+
|
| 580 |
+
We construct a sequence of intermediate states $\{w_{1} = \tilde{w}_{0},\tilde{w}_{1},\ldots ,\tilde{w}_{k} = w_{2}\}$ and corresponding morphisms $f_{i}:\tilde{w}_{i - 1}\to \tilde{w}_{i}$ such that each transition is valid according to our category definition.
|
| 581 |
+
|
| 582 |
+
**Construction:** Let $p:[0,1] \to W$ be a continuous path such that $p(0) = w_1$ and $p(1) = w_2$ , where $w_1$ to $w_2$ are our state space. Such a path exists since the state components:
|
| 583 |
+
|
| 584 |
+
- Resources $r$ and temporal components $t$ are continuous
|
| 585 |
+
- Symbolic states $\phi(s)$ are continuous and connected by valid transition function.
|
| 586 |
+
- Logical constraints $l$ that can be updated monotonically
|
| 587 |
+
|
| 588 |
+
We partition $[0,1]$ into $\lceil 1 / \epsilon \rceil$ equal intervals and define intermediate states:
|
| 589 |
+
|
| 590 |
+
$$
|
| 591 |
+
\tilde {w} _ {i} = p \left(\frac {i}{\lceil 1 / \epsilon \rceil}\right) \text {f o r} i = 0, 1, \dots , \lceil 1 / \epsilon \rceil
|
| 592 |
+
$$
|
| 593 |
+
|
| 594 |
+
Validity of Transitions: For each pair of consecutive states $\tilde{w}_{i-1}$ and $\tilde{w}_i$ , we have:
|
| 595 |
+
|
| 596 |
+
$$
|
| 597 |
+
D \left(\tilde {w} _ {i - 1}, \tilde {w} _ {i}\right) \leq \frac {D \left(w _ {1} , w _ {2}\right)}{\lceil 1 / \epsilon \rceil} < \frac {\epsilon}{\lceil 1 / \epsilon \rceil} \leq \epsilon^ {\prime}
|
| 598 |
+
$$
|
| 599 |
+
|
| 600 |
+
We now verify that there exists a valid morphism $f_{i}:\tilde{w}_{i - 1}\to \tilde{w}_{i}$ for each pair:
|
| 601 |
+
|
| 602 |
+
1. Resource Component: For resources, let $\tilde{r}_{i-1}$ and $\tilde{r}_i$ be the resource vectors of $\tilde{w}_{i-1}$ and $\tilde{w}_i$ . $||\tilde{r}_{i-1} - \tilde{r}_i|| \leq \frac{||r_1 - r_2||}{|1 / \epsilon|} < \frac{\epsilon / \alpha_r}{|1 / \epsilon|}$ is sufficiently small, given a sufficiently small $\epsilon$ . We can thus define a valid resource transformation $f_{ir}(\tilde{r}_{i-1}) = \tilde{r}_i$ that respects capacity bounds.
|
| 603 |
+
2. **Symbolic State:** For symbolic states, $\tilde{s}_{i-1}$ and $\tilde{s}_i$ , the distance $||\phi_s(\tilde{s}_{i-1}) - \phi_s(\tilde{s}_i)|| < \frac{\epsilon/\alpha_s}{|1/\epsilon|}$ . Given a sufficiently small $\epsilon$ , either $\tilde{s}_{i-1} = \tilde{s}_i$ or there exists a direct valid transition between them.
|
| 604 |
+
|
| 605 |
+
3. Logical Constraints: For logical constraints $\tilde{l}_{i-1}$ and $\tilde{l}_i$ , we have $||\phi_l(\tilde{l}_{i-1}) - \phi_l(\tilde{l}_i)|| < \frac{\epsilon / \alpha_l}{[1 / \epsilon]}$ . Given the monotonicity requirement (constraints can only be added, not removed), we ensure that each intermediate state only adds constraints that are satisfied in $w_2$ . In other words, for sufficiently small $\epsilon$ , at most one constraint changes per step.
|
| 606 |
+
|
| 607 |
+
4. Temporal Component: For temporal components $\tilde{t}_{i-1}$ and $\tilde{t}_i$ , we have $||\phi_t(\tilde{t}_{i-1}) - \phi_t(\tilde{t}_i)|| < \frac{\epsilon / \alpha_t}{|1 / \epsilon|}$ . Since temporal changes must preserve precedence relations and scheduling constraints, we define the transformation to gradually adjust time intervals while maintaining these properties.
|
| 608 |
+
|
| 609 |
+
Composition of Morphisms: We define each morphism $f_{i}:\tilde{w}_{i - 1}\to \tilde{w}_{i}$ as the tuple:
|
| 610 |
+
|
| 611 |
+
$$
|
| 612 |
+
f _ {i} = \left(f _ {i r}, f _ {i s}, f _ {i l}, f _ {i t}\right)
|
| 613 |
+
$$
|
| 614 |
+
|
| 615 |
+
Each component function is constructed to ensure the validity conditions of our category. By the category axioms, each $f_{i}$ is a valid morphism in $\mathcal{T}$ .
|
| 616 |
+
|
| 617 |
+
Plan Length: The total number of morphisms in our constructed sequence is $k = \lceil 1 / \epsilon \rceil$ , and the composition $f_{k} \circ \dots \circ f_{1}$ transforms $w_{1}$ into $w_{2}$ as required.
|
| 618 |
+
|
| 619 |
+
Therefore, for any two states $w_{1}, w_{2} \in W$ with $D(w_{1}, w_{2}) < \epsilon$ , there exists a sequence of at most $\lceil 1 / \epsilon \rceil$ valid morphisms connecting them.
|
| 620 |
+
|
| 621 |
+
# B.3 Completeness
|
| 622 |
+
|
| 623 |
+
Proof of Theorem 4.2 (Completeness). We need to prove that if a valid plan exists between initial state $w_0$ and goal state $w^*$ , then our bidirectional search algorithm will find it.
|
| 624 |
+
|
| 625 |
+
Step 1: Plan Existence and State Space Coverage. Let $P^{*} = \{f_{1}, f_{2}, \ldots, f_{n}\}$ be a valid plan from $w_{0}$ to $w^{*}$ , where each $f_{i}$ is a morphism in our category $\mathcal{T}$ . This plan induces a sequence of states $w_{0}, w_{1}, w_{2}, \ldots, w_{n} = w^{*}$ where $w_{i} = f_{i}(w_{i-1})$ .
|
| 626 |
+
|
| 627 |
+
Given our distance metric $D$ , we can choose $\epsilon > 0$ such that any state in our search space is within $\epsilon$ -distance of at least one state in the optimal plan $P^{*}$ . This is possible because:
|
| 628 |
+
|
| 629 |
+
1. The resource space $R$ is bounded by capacity constraints
|
| 630 |
+
2. The symbolic state space $S$ is finite
|
| 631 |
+
3. The logical constraint space $L$ is finite (with $2^k$ possible configurations)
|
| 632 |
+
4. The temporal space $T$ has bounded time windows
|
| 633 |
+
|
| 634 |
+
Therefore, we can construct a finite covering of the state space with $\epsilon$ -balls centered on states in the optimal plan.
|
| 635 |
+
|
| 636 |
+
Step 2: Bidirectional Search Properties. Our bidirectional search algorithm maintains two search graphs:
|
| 637 |
+
|
| 638 |
+
1. $\mathcal{G}^F$ expanding forward from $w_0$
|
| 639 |
+
2. $\mathcal{G}^B$ expanding backward from $w^{*}$
|
| 640 |
+
|
| 641 |
+
We use a planning distance function $D$ to guide expansions, where $\mathrm{val}^F(w) = V(w) + \min_{\gamma} D(w, \gamma)$ and $\mathrm{val}^B(w) = V(w) + \min_{\gamma} D(\gamma, w)$ .
|
| 642 |
+
|
| 643 |
+
At each iteration, the algorithm:
|
| 644 |
+
|
| 645 |
+
1. Selects the most promising state to expand from each frontier
|
| 646 |
+
2. Expands valid operators from these states
|
| 647 |
+
3. Attempts to merge partial plans via pullback checks
|
| 648 |
+
|
| 649 |
+
Step 3: Forward Reachability. We first show that all states in the optimal plan $P^{*}$ are eventually reached by the forward search.
|
| 650 |
+
|
| 651 |
+
For each state $w_{i}$ in the optimal plan, Let $V^{*}(w_{i})$ be the true optimal cost to reach $w_{i}$ from $w_{0}$ and $V(w_{i})$ be our algorithm's current estimate of this cost.
|
| 652 |
+
|
| 653 |
+
We claim that for each $w_{i}$ , there exists a time when a state $w^{\prime}$ with $D(w^{\prime},w_{i}) < \epsilon$ enters the forward frontier $\mathcal{F}^{F}$ .
|
| 654 |
+
|
| 655 |
+
Proof by induction:
|
| 656 |
+
|
| 657 |
+
1. Base case: $w_0$ is in $\mathcal{F}^F$ initially
|
| 658 |
+
2. Inductive step: Assume $w_{i-1}'$ with $D(w_{i-1}', w_{i-1}) < \epsilon$ is in $\mathcal{F}^F$
|
| 659 |
+
3. By Theorem 4.1, there exists a sequence of valid operators from $w_{i-1}'$ to a state $w_i'$ with $D(w_i', w_i) < \epsilon$ .
|
| 660 |
+
4. Since our algorithm expands all valid operators from frontier states, $w_{i}^{\prime}$ will eventually enter $\mathcal{F}^{F}$
|
| 661 |
+
|
| 662 |
+
Therefore, the forward search eventually reaches a state near each state in the optimal plan.
|
| 663 |
+
|
| 664 |
+
Step 4: Backward Reachability. Similarly, for the backward search, all states in the optimal plan are eventually reached by the backward search. For each state $w_{i}$ in the optimal plan, there exists a time when a state $w''$ with $D(w'', w_{i}) < \epsilon$ enters the backward frontier $\mathcal{F}^{B}$ .
|
| 665 |
+
|
| 666 |
+
Step 5: Meeting of Frontiers. Given Steps 3 and 4, there will eventually be states $w_{i}^{\prime} \in \mathcal{F}^{F}$ and $w_{j}^{\prime \prime} \in \mathcal{F}^{B}$ such that:
|
| 667 |
+
|
| 668 |
+
1. $D(w_{i}^{\prime},w_{i}) < \epsilon$
|
| 669 |
+
2. $D(w_{j}^{\prime \prime},w_{j}) < \epsilon$
|
| 670 |
+
3. $|i - j| \leq 1$ (the states are adjacent in the optimal plan)
|
| 671 |
+
|
| 672 |
+
Step 6: Pullback Existence. Given that our states $w_{i}^{\prime}$ and $w_{j}^{\prime \prime}$ are near adjacent states in the optimal plan, and that the optimal plan respects all constraints, a pullback exists that allows the composition of the forward and backward plans.
|
| 673 |
+
|
| 674 |
+
The existence of this pullback ensures that our algorithm can merge the partial plans to form a complete plan from $w_0$ to $w^*$ .
|
| 675 |
+
|
| 676 |
+
Step 7: Termination. Since our state space is finite under resource bounds and our algorithm systematically explores the space guided by the planning distance $D$ , it will eventually discover the merger point where the pullback exists.
|
| 677 |
+
|
| 678 |
+
Therefore, if a valid plan exists, our bidirectional search algorithm will find it.
|
| 679 |
+
|
| 680 |
+
# B.4 Probabilistic Completeness Theorem
|
| 681 |
+
|
| 682 |
+
Proof of Theorem 4.3 (Probabilistic Completeness). We need to prove that under bounded resources and finite constraints, the probability of finding a valid plan within $n$ steps is at least $1 - e^{-\lambda n}$ for some constant $\lambda > 0$ .
|
| 683 |
+
|
| 684 |
+
This proof addresses the stochastic nature of LLM-generated operators, which introduces uncertainty into the planning process. While our category-theoretic verification ensures that operators are valid when applied, the generation of candidate operators by the LLM involves randomness.
|
| 685 |
+
|
| 686 |
+
Probabilistic Model: Let us define the following:
|
| 687 |
+
|
| 688 |
+
- $P^{*}$ is a valid plan from initial state $w_{0}$ to goal state $w^{*}$ , known to exist by assumption.
|
| 689 |
+
- $p_{\mathrm{min}}$ is the minimum probability that the LLM generates a valid operator at any given step of the plan.
|
| 690 |
+
- At each step, the LLM may generate multiple candidate operators, but our focus is on whether at least one valid operator toward the solution is among them.
|
| 691 |
+
|
| 692 |
+
In practice, our algorithm adaptively refines the operator to further boost $p_{\mathrm{min}}$ .
|
| 693 |
+
|
| 694 |
+
Single-Step Success Probability: At each step of the planning process, the LLM generates candidate operators. Let's define:
|
| 695 |
+
|
| 696 |
+
- $E_{i}$ : the event that the LLM generates at least one operator at step $i$ that advances the plan toward the goal.
|
| 697 |
+
- $p_i = P(E_i)$ : the probability of event $E_i$ occurring.
|
| 698 |
+
|
| 699 |
+
Given our bounded resource assumptions and the categorical structure of our planning domain, the number of possible states is finite. Furthermore, since the LLM's operator generation is based on learned statistical patterns, there exists a minimum probability $p_{\mathrm{min}} > 0$ such that:
|
| 700 |
+
|
| 701 |
+
$$
|
| 702 |
+
p _ {i} \geq p _ {\min } \quad \text {f o r a l l} i \tag {3}
|
| 703 |
+
$$
|
| 704 |
+
|
| 705 |
+
This lower bound $p_{\mathrm{min}}$ represents the LLM's worst-case performance in generating useful operators for our planning domain.
|
| 706 |
+
|
| 707 |
+
Multi-Step Analysis: Finding a valid plan requires successfully generating valid operators for multiple consecutive steps. We model this as a sequence of Bernoulli trials, where each trial corresponds to an attempt to advance the plan by one step.
|
| 708 |
+
|
| 709 |
+
Let $X_{n}$ be the random variable representing the number of successful steps completed after $n$ attempts. We're interested in $P(X_{n} \geq L)$ , where $L$ is the length of the optimal plan.
|
| 710 |
+
|
| 711 |
+
Markov Chain Representation: We can model the planning process as a Markov chain where:
|
| 712 |
+
|
| 713 |
+
States correspond to the progress made (number of steps completed toward the goal).
|
| 714 |
+
- Transitions occur with probability at least $p_{\mathrm{min}}$ for advancement and at most $(1 - p_{\mathrm{min}})$ for staying in the same state.
|
| 715 |
+
|
| 716 |
+
This is a birth process with a minimum birth probability of $p_{\mathrm{min}}$ . The probability of reaching state $L$ (completing the plan) within $n$ steps can be analyzed using standard results from Markov chain theory.
|
| 717 |
+
|
| 718 |
+
Deriving the Bound: For a birth process with minimum birth probability $p_{\mathrm{min}}$ , the probability of not reaching state $L$ within $n$ steps is bounded by:
|
| 719 |
+
|
| 720 |
+
$$
|
| 721 |
+
P \left(X _ {n} < L\right) \leq \left(1 - p _ {\min } ^ {L}\right) ^ {\lfloor n / L \rfloor} \tag {4}
|
| 722 |
+
$$
|
| 723 |
+
|
| 724 |
+
This bound reflects that every sequence of $L$ consecutive steps has a probability of at least $p_{\min}^{L}$ of completing the entire plan.
|
| 725 |
+
|
| 726 |
+
For large $n$ , we can approximate this as:
|
| 727 |
+
|
| 728 |
+
$$
|
| 729 |
+
P \left(X _ {n} < L\right) \leq e ^ {- p _ {\min } ^ {L} \cdot \lfloor n / L \rfloor} \leq e ^ {- \lambda n} \tag {5}
|
| 730 |
+
$$
|
| 731 |
+
|
| 732 |
+
where $\lambda = p_{\mathrm{min}}^L / L$ is a positive constant.
|
| 733 |
+
|
| 734 |
+
Therefore, the probability of finding a valid plan within $n$ steps is:
|
| 735 |
+
|
| 736 |
+
$$
|
| 737 |
+
P \left(X _ {n} \geq L\right) = 1 - P \left(X _ {n} < L\right) \geq 1 - e ^ {- \lambda n} \tag {6}
|
| 738 |
+
$$
|
| 739 |
+
|
| 740 |
+
Connection to LLM Confidence: The parameter $\lambda$ in our bound is directly related to the LLM's operator generation capability:
|
| 741 |
+
|
| 742 |
+
$$
|
| 743 |
+
\lambda = \frac {p _ {\operatorname* {m i n}} ^ {L}}{L} \tag {7}
|
| 744 |
+
$$
|
| 745 |
+
|
| 746 |
+
A more capable LLM with higher confidence in generating valid operators would have a larger $p_{\mathrm{min}}$ , resulting in a larger $\lambda$ and faster convergence.
|
| 747 |
+
|
| 748 |
+
Practical Implications: This bound guarantees exponential convergence: the probability of failure decreases exponentially with the number of steps $n$ . For practical applications, we can calculate how many steps are needed to achieve a desired success probability.
|
| 749 |
+
|
| 750 |
+
For example, to achieve a success probability of at least $1 - \delta$ for some small $\delta > 0$ , we need:
|
| 751 |
+
|
| 752 |
+
$$
|
| 753 |
+
1 - e ^ {- \lambda n} \geq 1 - \delta \tag {8}
|
| 754 |
+
$$
|
| 755 |
+
|
| 756 |
+
which gives us:
|
| 757 |
+
|
| 758 |
+
$$
|
| 759 |
+
n \geq \frac {\ln (1 / \delta)}{\lambda} = \frac {L \cdot \ln (1 / \delta)}{p _ {\operatorname* {m i n}} ^ {L}} \tag {9}
|
| 760 |
+
$$
|
| 761 |
+
|
| 762 |
+
Therefore, under bounded resources and finite constraints, the probability of finding a valid plan in $n$ steps is at least $1 - e^{-\lambda n}$ , providing a formal guarantee of probabilistic completeness for our neural-symbolic planning approach.
|
| 763 |
+
|
| 764 |
+
# B.5 Time complexity
|
| 765 |
+
|
| 766 |
+
Proof of Theorem 5.1 (Time Complexity). We analyze the worst-case time complexity of our bidirectional search algorithm for finding a plan of length $L$ with branching factor $b$ in a state space with $n$ states.
|
| 767 |
+
|
| 768 |
+
Search Space Analysis: In classical forward-only search, the algorithm potentially explores all states reachable within $L$ steps from the initial state $w_{0}$ . With branching factor $b$ , this yields a search space of size:
|
| 769 |
+
|
| 770 |
+
$$
|
| 771 |
+
\left| S _ {\text {f o r w a r d}} \right| = \sum_ {i = 0} ^ {L} b ^ {i} = \frac {b ^ {L + 1} - 1}{b - 1} = O \left(b ^ {L}\right) \tag {10}
|
| 772 |
+
$$
|
| 773 |
+
|
| 774 |
+
Our bidirectional approach simultaneously expands from the initial state $w_0$ and the goal state $w^*$ . Let's analyze the size of both search frontiers:
|
| 775 |
+
|
| 776 |
+
1. Forward Search Frontier: Starting from $w_0$ , after $i$ expansions, we explore $O(b^i)$ states.
|
| 777 |
+
2. Backward Search Frontier: Starting from $w^{*}$ , after $j$ expansions, we explore $O(b^{j})$ states.
|
| 778 |
+
|
| 779 |
+
Meeting Point Analysis: For a plan of length $L$ , the forward and backward frontiers will meet when $i + j \geq L$ . The optimal allocation that minimizes the total number of explored states occurs when $i \approx j \approx L / 2$ .
|
| 780 |
+
|
| 781 |
+
At this balanced meeting point, the number of states explored by each frontier is:
|
| 782 |
+
|
| 783 |
+
$$
|
| 784 |
+
\left| S _ {\text {f o r w a r d}} \right| = O \left(b ^ {L / 2}\right) \quad \text {a n d} \quad \left| S _ {\text {b a c k w a r d}} \right| = O \left(b ^ {L / 2}\right) \tag {11}
|
| 785 |
+
$$
|
| 786 |
+
|
| 787 |
+
Therefore, the total number of states explored is:
|
| 788 |
+
|
| 789 |
+
$$
|
| 790 |
+
\begin{array}{l} \left| S _ {\text {t o t a l}} \right| = \left| S _ {\text {f o r w a r d}} \right| + \left| S _ {\text {b a c k w a r d}} \right| \tag {12} \\ = O \left(b ^ {L / 2}\right) + O \left(b ^ {L / 2}\right) = O \left(b ^ {L / 2}\right) \\ \end{array}
|
| 791 |
+
$$
|
| 792 |
+
|
| 793 |
+
Verification Overhead: At each iteration, our algorithm:
|
| 794 |
+
|
| 795 |
+
1. Selects the most promising state from each frontier using the planning distance function $D$ , which takes $O(\log |F|)$ time with a priority queue, where $|F|$ is the frontier size.
|
| 796 |
+
2. Expands the selected state by applying all possible operators, which takes $O(b)$ time.
|
| 797 |
+
3. Attempts to find meeting points between the frontiers, which requires checking $O(|F_{F}| \cdot |F_{B}|)$ potential state pairs in the worst case, where $|F_{F}|$ and $|F_{B}|$ are the sizes of the forward and backward frontiers.
|
| 798 |
+
4. Performs pullback verification for promising meeting candidates, which takes $O(d)$ time per candidate, where $d$ is the dimensionality of our state representation.
|
| 799 |
+
|
| 800 |
+
In the worst case, the frontier sizes grow to $O(b^{L/2})$ , making the meeting point search potentially expensive. However, our planning distance function $D$ provides an effective heuristic to limit the number of candidate pairs to consider.
|
| 801 |
+
|
| 802 |
+
Let $k$ be the number of most promising pairs we consider at each iteration, where $k$ is a constant that depends on the problem domain. The verification overhead per iteration becomes $O(k \cdot d) = O(1)$ for fixed $k$ and $d$ .
|
| 803 |
+
|
| 804 |
+
Total Complexity: Over the course of the search, we explore $O(b^{L/2})$ states, with each state requiring $O(b)$ time for expansion and $O(1)$ time for verification. Thus, the total time complexity is:
|
| 805 |
+
|
| 806 |
+
$$
|
| 807 |
+
T = O \left(b ^ {L / 2} \cdot b \cdot 1\right) = O \left(b ^ {L / 2 + 1}\right) = O \left(b ^ {L / 2}\right) \tag {13}
|
| 808 |
+
$$
|
| 809 |
+
|
| 810 |
+
where the last simplification assumes $b > 1$ .
|
| 811 |
+
|
| 812 |
+
Comparison with Unidirectional Search: The standard unidirectional forward search has time complexity $O(b^{L})$ . Our bidirectional approach achieves $O(b^{L / 2})$ , which represents a quadratic improvement in the exponent:
|
| 813 |
+
|
| 814 |
+
$$
|
| 815 |
+
\frac {b ^ {L}}{b ^ {L / 2}} = b ^ {L / 2} \tag {14}
|
| 816 |
+
$$
|
| 817 |
+
|
| 818 |
+
This exponential reduction makes problems with large $L$ tractable in practice. For example, with $b = 3$ and $L = 20$ , unidirectional search explores up to $3^{20} \approx 3.5 \times 10^9$ states, while our bidirectional approach explores only up to $3^{10} \approx 59,000$ states—a reduction by a factor of approximately 60,000.
|
| 819 |
+
|
| 820 |
+
Therefore, the bidirectional search algorithm has time complexity $O(b^{L / 2})$ .
|
| 821 |
+
|
| 822 |
+

|
| 823 |
+
|
| 824 |
+
# C Implementation Details
|
| 825 |
+
|
| 826 |
+
Our implementation uses Llama3.1-13B as the backbone LLM model. The model is finetuned on a server with AMD EPYC CPU and a single NVIDIA A100 (80GB) GPU.
|
| 827 |
+
|
| 828 |
+
Dataset Preparation For finetuning the morphism generator $\phi_f$ , we construct training examples through negative sampling of valid planning pathways. For each state node $w_i$ in the pathway rooted at $w^*$ , we create positive examples using the ground truth morphisms, and negative examples using invalid or suboptimal morphisms. We assign preference scores based on $V_t(w_i|G)$ values obtained through the bidirectional search methodology described in Section 4.2.
|
| 829 |
+
|
| 830 |
+
For the planning distance function $D$ , we collect training pairs from both forward and backward search spaces. From each valid pathway to $w^{*}$ , we extract state pairs and their corresponding labels $V_{w^{*}}(w_{i}|G_{R}) - sn(w_{i}|G_{R})$ , generating a dataset that captures both top-down and bottom-up planning distances.
|
| 831 |
+
|
| 832 |
+
For the value estimator $V_{w}$ given $w_{0}$ , which we model as MLP, we extract ground truth minimum cost values from completed search trees, using them as supervision signals.
|
| 833 |
+
|
| 834 |
+
Distance Function Components The symbolic state distance $d_{s}$ is implemented as $\mathrm{MLP}(h_{s_1} - h_{s_2})$ , where $h_{s_i}$ is the embedding of symbolic state $s_i$ generated by the LLM. For logical constraints, we use the Jaccard distance $d_{l}(l_{1}, l_{2}) = 1 - \frac{|l_{1} \cap l_{2}|}{|l_{1} \cup l_{2}|}$ . Temporal distance $d_{t}$ is computed as the summation of active time differences: $d_{t}(t_{1}, t_{2}) = \sum_{i \in \text{active}} |t_{1}(i) - t_{2}(i)|$ . Based on ablation studies, we set component weights to $\alpha_s = 0.85$ , $\alpha_r = 0.05$ , $\alpha_l = 0.05$ , $\alpha_t = 0.05$ .
|
| 835 |
+
|
| 836 |
+
Model Training We train all MLP components using the Adam optimizer with initial learning rate 0.001 and decay factor 0.3. We employ early stopping with patience 3 to prevent overfitting. Through hyperparameter tuning, we selected dropout ratio 0.2 (from [0.1-0.5]), 3 hidden layers (from [2-4]), and hidden dimensions of 1024 for $d_{s}$ and 256 for $V_{w}$ . The morphism generator $\phi_f$ is finetuned using Direct Preference Optimization (DPO) with the TRL library, training for learning rate 1e-5 with batch size 8, gradient accumulation step of 2, decay 0.1. The result is by default.
|
| 837 |
+
|
| 838 |
+
# D Decomposition Prompt
|
| 839 |
+
|
| 840 |
+
# LLM-Driven Structured Task Decomposition
|
| 841 |
+
|
| 842 |
+
User Query: Train a language model on Dataset X within 12 hours, ensuring memory usage stays under 16GB.
|
| 843 |
+
|
| 844 |
+
System Prompt: Parse the given task specification into a formal structured representation with the following schema (return as JSON):
|
| 845 |
+
|
| 846 |
+
1. Resources: $\{r_i\}$ - The set of resources with their types, capacities, and initial states (e.g., computational resources, data assets, model artifacts)
|
| 847 |
+
2. Operators: $\{O_j\} -$ The set of valid operations where:
|
| 848 |
+
|
| 849 |
+
- Unary operators: $O_{j}: S_{i} \to S_{i+1}$ (e.g., preprocess, validate)
|
| 850 |
+
- Binary operators: $O_{j}: S_{i} \times R_{k} \to S_{i+1}$ (e.g., train_on, evaluate_with)
|
| 851 |
+
|
| 852 |
+
3. Constraints: $\{C_l\}$ - The set of domain and resource constraints, where:
|
| 853 |
+
|
| 854 |
+
- Temporal constraints: $\{t_{min}, t_{max}\}$ for each operation
|
| 855 |
+
- Resource bounds: $\{r_{min}, r_{max}\}$ for each resource consumption
|
| 856 |
+
- Precedence constraints: $\{(O_j,O_k)|O_j\prec O_k\}$
|
| 857 |
+
|
| 858 |
+
This initial decomposition is then progressively refined through subsequent steps.
|
| 859 |
+
|
| 860 |
+
# Constraint Refinement Step
|
| 861 |
+
|
| 862 |
+
System Prompt: Identify and clarify any ambiguous or missing constraints in the initial specification:
|
| 863 |
+
|
| 864 |
+
- Initialization prerequisites
|
| 865 |
+
Resource contention:
|
| 866 |
+
- Constraint type:
|
| 867 |
+
|
| 868 |
+
# Resource Formalization Step
|
| 869 |
+
|
| 870 |
+
System Prompt: Formalize each resource with explicit typing, quantification and format:
|
| 871 |
+
|
| 872 |
+
- Specific units and measures for each resource
|
| 873 |
+
- Minimum/maximum values for each constraint
|
| 874 |
+
- Formal temporal expressions
|
| 875 |
+
|
| 876 |
+
The final categorical encoding step transforms these specifications into mathematical objects, morphisms, and constraints suitable for our category-theoretic framework. This iterative process significantly reduces manual engineering effort typically required for symbolic planning approaches, while ensuring the resulting formalization maintains the precision needed for categorical verification.
|
| 877 |
+
|
| 878 |
+
Meta-Prompt for Domain Adaptation. To adapt the decomposition framework to a new domain, replace the domain-specific primitives in the Resources, Operators, and Constraints fields with entities relevant to that setting. For example, in cooking, resources become ingredients and appliances, operators are actions such as chop or bake, and constraints encode nutritional or temporal limits; in robotics, resources map to robots and sensors, operators include move or pick, and constraints enforce energy, safety, or timing bounds. The schema and output format remain unchanged—the only modification is substituting examples and constraints that capture the new domain's requirements.
|
| 879 |
+
|
| 880 |
+
# D.1 Worked Example: Task Decomposition
|
| 881 |
+
|
| 882 |
+
We illustrate a full decomposition example with the task:
|
| 883 |
+
|
| 884 |
+
"Bake cookies with limited sugar for diabetes consideration while still tasting good."
|
| 885 |
+
|
| 886 |
+
Step 1: Initial Decomposition (via LLM). Extract candidate resources, operators, and constraints.
|
| 887 |
+
|
| 888 |
+
Resources: flour (2 cups), sugar (0.5 cups), erythritol (1/3 cups), oven, mixing bowl.
|
| 889 |
+
|
| 890 |
+
Operators:
|
| 891 |
+
|
| 892 |
+
- $O_{1}$ : mix(ingredients) $\rightarrow$ dough
|
| 893 |
+
- $O_2$ : bake(dough, oven) $\rightarrow$ cookies
|
| 894 |
+
|
| 895 |
+
Constraints:
|
| 896 |
+
|
| 897 |
+
- Resource: sugar $\leq 0.1$ cups
|
| 898 |
+
- Temporal: bake duration $\in$ [15, 20] minutes
|
| 899 |
+
- Precedence: mix $\prec$ bake
|
| 900 |
+
|
| 901 |
+
Step 2: Constraint Refinement. The system identifies implicit assumptions:
|
| 902 |
+
|
| 903 |
+
- Oven must be preheated before bake.
|
| 904 |
+
- Sugar substitution with erythritol is allowed but capped at 1/3 cup.
|
| 905 |
+
- Mixing requires all dry ingredients to be available simultaneously.
|
| 906 |
+
|
| 907 |
+
Step 3: Resource Formalization. Resources are typed and quantified explicitly:
|
| 908 |
+
|
| 909 |
+
{"flour": {"type": "ingredient", "quantity": "2c"}, "sugar": {"type": "ingredient", "quantity": "0.5c", "max": "0.1c"}, "erythritol": {"type": "ingredient", "quantity": "1/3c", "max": "1/3c"}, "oven": {"type": "appliance", "state": "preheated"}, "bowl": {"type": "container", "capacity": "5c"}}
|
| 910 |
+
|
| 911 |
+
Algorithm 1 Bidirectional Search with Planning Distance
|
| 912 |
+
Require: Initial state $w_0$ , Goal state $w^*$ , Planning distance $D$ , Budget $B$ $\mathcal{G}^F \gets \{w_0\}, \mathcal{G}^B \gets \{w^*\}$ $V(w_0) \gets 0, V(w^*) \gets 0$ $\mathcal{F}^F \gets \{w_0\}, \mathcal{F}^B \gets \{w^*\}$
|
| 913 |
+
steps $\leftarrow 0$
|
| 914 |
+
while steps $< B$ and $(|\mathcal{F}^F| > 0$ and $|\mathcal{F}^B| > 0)$ do
|
| 915 |
+
$w_{select,F} \gets \arg \min_{w \in \mathcal{F}^F} V_t(w|\mathcal{G}^F)$
|
| 916 |
+
for each valid morphism $f: w_{select,F} \to w'$ do
|
| 917 |
+
Add $w'$ to $\mathcal{G}^F$ and $\mathcal{F}^F$ if not already present
|
| 918 |
+
$V(w') \gets \min \{V(w'), V(w_{select,F}) + c(f)\}$ $sn(w'|\mathcal{G}^F) \gets V_{w'} = D(w', \gamma(w'))$
|
| 919 |
+
end for
|
| 920 |
+
Remove $w_{select,F}$ from $\mathcal{F}^F$ $w_{select,B} \gets \arg \min_{w \in \mathcal{F}^B} [V_t(w|\mathcal{G}^B) + \min (D_t(w|\mathcal{G}^B))]$
|
| 921 |
+
for each valid morphism $f: w' \to w_{select,B}$ do
|
| 922 |
+
Add $w'$ to $\mathcal{G}^B$ and $\mathcal{F}^B$ if not already present
|
| 923 |
+
$V(w') \gets \min \{V(w'), V(w_{select,B}) + c(f)\}$ $rn(w'|\mathcal{G}^B) \gets V_{w'}$ $sn(w'|\mathcal{G}^B) \gets \{D_{w'} - V_{w'}\} = \{D(\gamma(w'), w') - V_{w'}\}$
|
| 924 |
+
end for
|
| 925 |
+
Remove $w_{select,B}$ from $\mathcal{F}^B$
|
| 926 |
+
Update $sn$ and $D_t$ values via Uppropagation and Downpropagation for $\mathcal{G}^B$
|
| 927 |
+
Attempt pullback checks between states in $\mathcal{G}^F$ and $\mathcal{G}^B$
|
| 928 |
+
for each $w_F \in \mathcal{G}^F$ and $w_B \in \mathcal{G}^B$ with $D(w_F, w_B) < \epsilon$ do
|
| 929 |
+
if there exist morphisms $f_F: w_F \to w_C$ and $f_B: w_B \to w_C$ then
|
| 930 |
+
Attempt to construct pullback $w_P$ with projections $p_1: w_P \to w_F, p_2: w_P \to w_B$
|
| 931 |
+
if valid pullback $w_P$ exists then
|
| 932 |
+
plan $\leftarrow$ Compose path from $w_0$ to $w_F$ with path from $w_B$ to $w^*$
|
| 933 |
+
return plan
|
| 934 |
+
end if
|
| 935 |
+
end if
|
| 936 |
+
end for
|
| 937 |
+
Prune dominated states from $\mathcal{G}^F$ and $\mathcal{G}^B$
|
| 938 |
+
steps $\leftarrow$ steps + 1
|
| 939 |
+
end while
|
| 940 |
+
return no valid plan found
|
| 941 |
+
|
| 942 |
+
# F AI Assistant Usage
|
| 943 |
+
|
| 944 |
+
This research utilized AI assistants including Claude and GPT-4 for several aspects of the paper and dataset preparation. We employed these tools mainly for:
|
| 945 |
+
|
| 946 |
+
- Dataset enhancement:GPT-4 was used to augment the RecipeNLG dataset with explicit resource constraints (e.g., "2 cups flour maximum") and temporal intervals (e.g., "bake for 20 minutes") to create a more challenging testing environment for constraint satisfaction. This augmentation process was carefully designed and supervised by the authors to ensure consistency and validity of the constraints.
|
| 947 |
+
- Implementation support: AI assistants provided code debugging assistance for the implementation of our validation check and bidirectional search algorithm.
|
| 948 |
+
- Manuscript preparation: We used AI assistants for literature review to identify relevant papers, proofreading, language refinement, and formatting assistance.
|
| 949 |
+
- Proof check: We used AI assistant to refine and check the draft proofs.
|
| 950 |
+
- Benchmark: We use AI assistant GPT-4 as one of our benchmark on the dataset
|
acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dda8c624e2d1021a12e6e11b6411d9f812990f863da03f82e4a069105796fcde
|
| 3 |
+
size 481698
|
acategorytheoreticapproachtoneuralsymbolictaskplanningwithbidirectionalsearch/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10f3b88a8e4a7c36af0908603a535e616aded8e4c5996f6ffa134b59840cdd69
|
| 3 |
+
size 1315203
|
acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/152df871-1fb9-468c-a897-efb303997b08_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce4775e6b53395f0003709108d1a14cff0c81c939db1e72cf753c26d59082174
|
| 3 |
+
size 81619
|
acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/152df871-1fb9-468c-a897-efb303997b08_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:677af3feff07036dbd35ce526528959069f38f53e27335364994998a4618ebd9
|
| 3 |
+
size 102211
|
acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/152df871-1fb9-468c-a897-efb303997b08_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92c0fa39ed660436c8a2e23faa7aba99e5220983ef2aaf26f8e5e87af3d25c6f
|
| 3 |
+
size 648577
|
acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/full.md
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating LLM Reasoning via Early Rejection with Partial Reward Modeling
|
| 2 |
+
|
| 3 |
+
Seyyed Saeid Cheshmi $^{1*}$ Azal Ahmad Khan $^{1*}$
|
| 4 |
+
|
| 5 |
+
Xinran Wang $^{1}$ Zirui Liu $^{1}$ Ali Anwar $^{1}$
|
| 6 |
+
|
| 7 |
+
<sup>1</sup>University of Minnesota
|
| 8 |
+
|
| 9 |
+
{chesh014, khan1069, wang8740, zrliu, aanwar}@umn.edu
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Large Language Models (LLMs) are increasingly relied upon for solving complex reasoning tasks in domains such as mathematics, logic, and multi-step question answering. A growing line of work seeks to improve reasoning quality by scaling inference time compute particularly through Process Reward Models (PRMs), used to reward the reasoning at intermediate steps. While effective, these methods introduce substantial computational overhead, especially when generating large numbers of solutions in parallel. In this paper, we investigate whether PRMs can be used mid-generation to provide early signals that enable the rejection of suboptimal candidates before full generation of step is complete. We introduce the hypothesis that PRMs are also Partial Reward Models, meaning that the scores they assign to partially completed reasoning step are predictive of final output quality. This allows for principled early rejection based on intermediate token-level signals. We support this hypothesis both theoretically, by proving that the risk of discarding optimal beams decreases exponentially with generation length and empirically, by demonstrating a strong correlation between partial and final rewards across multiple reward models. On math reasoning benchmarks, our method achieves up to $1.4 \times -9 \times$ reduction in inference FLOPs without degrading final performance. These results suggest that early rejection is a powerful mechanism for improving the compute-efficiency of reasoning in LLMs. The code and implementation are available at https://github.com/scheshmi/accelerated-reasoning-ER-PRM.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Large Language Models (LLMs) are at the forefront of AI capabilities due to their emerging ability to perform complex reasoning tasks (Kojima et al.,
|
| 18 |
+
|
| 19 |
+
2022; Chan, 2024; Cheng et al., 2025; Hazra et al., 2025; Xu et al., 2025). They have demonstrated significant success in domains such as mathematical problem solving, multi-hop question answering, and logical inference (Creswell et al., 2022; Ahn et al., 2024; Akella, 2024). These advancements are critical because they signal a shift from surface-level pattern recognition to deeper, multistep deductive reasoning (Wei et al., 2022; Zhou et al., 2022). Enhancing these reasoning abilities is paramount for developing more capable, reliable models that can operate across various domains.
|
| 20 |
+
|
| 21 |
+
Prior Works. As the scaling of model parameters and pretraining data has started to become a bottleneck, recent efforts have shifted toward increasing compute at inference time to improve the reasoning capabilities of LLMs (Snell et al., 2024). Improving the reasoning capabilities of LLMs by scaling compute at inference time has been pursued through multiple strategies. A prominent approach leverages Outcome Reward Models, which train a separate evaluator to score the final output of the LLM based on correctness or quality (Cobbe et al., 2021; Hosseini et al., 2024; Mahan et al., 2024; Zhang et al., 2024b). Another approach uses, Process Reward Models (PRMs) to evaluate intermediate steps or reasoning trajectories generated during inference (Wang et al., 2023; Snell et al., 2024; Zhang et al., 2024a; Luo et al., 2024). Under this paradigm, the model generates multiple candidate reasoning paths, which are then evaluated by the PRM that assigns rewards at the end of each step. This step-wise evaluation enables the selection of promising trajectories for further expansion while allowing the rejection of less promising ones, thereby guiding the reasoning process more efficiently. Techniques such as beam search, Monte Carlo Tree Search (MCTS) guided by value models, and PRM-guided methods, exemplify this strategy (Feng et al., 2023; Yao et al., 2023). In this
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: Process-Reward Model (PRM) at full length vs. PRM reused for early rejection. (Left) Every beam is expanded to full depth before the PRM scores its complete solution, so all intermediate branches incur compute even if they are were to fail. (Right) The same PRM is invoked mid-generation after each block of few tokens, producing a partial reward that serves as an early-quality signal.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
Figure 2: Linear relationship between partial rewards (reward calculated at half step completion) and full rewards (rewards calculated at full step completion) with (left) Llemma-MetaMath-7b and (right) MathShepherd-Mistral-7b as reward models.
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
|
| 31 |
+
paper, we focus specifically on the PRM paradigm and explore how to improve its efficiency.
|
| 32 |
+
|
| 33 |
+
Challenges. While scaling test-time compute using methods like PRMs can significantly enhance performance, it also introduces substantial computational overhead, especially for long sequences where many generated beams contribute little value (Chen et al., 2024; Hu et al., 2025; Wang et al., 2025). To be competitive with state-of-the-art post-training approaches, the number of beams often needs to be scaled up to 1000-60000, resulting in a large number of output tokens (Sun et al., 2024). These output tokens are not only computationally expensive to generate, but are also produced sequentially, leading to considerable latency (Yang et al., 2024). A natural solution is to reject unpromising candidates early in the decoding process, after only a few tokens, before committing to full step generation, a strategy we refer to as Early Rejection. However, a major challenge in
|
| 34 |
+
|
| 35 |
+
early rejection is making sure that decisions based on only part of the output don't accidentally discard high-quality completions. This is difficult because the overall quality of a reasoning trace often depends on its full structure, which might not be obvious from the first few tokens. As a result, developing a reliable method that can make early yet accurate decisions about which traces to keep, based on partial generations, remains an important and open research problem.
|
| 36 |
+
|
| 37 |
+
Hypothesis. To address this challenge, we present the a hypothesis, Process Reward Models are also Partial Reward Models. That is, for structured reasoning tasks, the partial scores assigned by a PRM, when evaluated after a small but meaningful fraction of the generation, are sufficiently correlated with the final scores. This insight suggests that PRMs, which are conventionally applied at the end of a complete reasoning trace, can also be used mid-generation to provide partial rewards that act as early indicators of output quality. Figure 1 illustrates this distinction: while traditional PRM usage scores complete reasoning paths only at the end, our approach invokes the same PRM mid-generation to score partial traces, enabling early rejection of unpromising candidates. In doing so, they enable principled early rejection based on intermediate token-level signals. Preliminary results, as shown in Figure 2, reveal a consistent relationship between partial and final rewards, modeled as a monotonic mapping with added noise.
|
| 38 |
+
|
| 39 |
+
Contributions. This paper makes the following key contributions: (C1) We introduce the hypothesis
|
| 40 |
+
|
| 41 |
+
esis that Process Reward Models (PRMs) can be used as Partial Reward Models to enable early rejection of suboptimal beams. We support this hypothesis by showing that partial rewards computed after only a fraction of the generation are strongly correlated with final rewards, allowing for reliable early decisions in the reasoning process. (C2) We provide theoretical guarantees that justify the use of partial scores for early rejection. Specifically, we prove that under mild assumptions, the probability of prematurely rejecting the optimal trajectory decreases exponentially with the partial generation length. (C3) We empirically demonstrate that early rejection guided by PRMs is both effective and compute-efficient. On reasoning tasks such as AIME, Math-500 and AGI Eval, our approach reduces inference-time FLOPs by $1.4 \times -9 \times$ when using a mid-sized PRM (7B parameters) without any loss in task performance. Furthermore, when using a smaller PRM (1.5B parameters), we achieve up to $1.5 \times -4 \times$ reduction in FLOPs, demonstrating that even lightweight evaluators can enable highly efficient reasoning through early rejection.
|
| 42 |
+
|
| 43 |
+
# 2 Related Works
|
| 44 |
+
|
| 45 |
+
Generative Reward Models. Early approaches to guiding machine learning models relied on handcrafted heuristics, but as models have grown more complex, generative models have increasingly been used for supervision and alignment (Mahan et al., 2024). Generative models now serve as critics, verifiers, and, most notably, as reward models in RLHF (Mahan et al., 2024). Critics evaluate model outputs by providing detailed feedback (Luo et al., 2023; Lan et al., 2024; Lin et al., 2024; Du et al., 2024), while verifiers check the factual correctness or consistency of responses (Kouemo Ngassom et al., 2024; Qi et al., 2024; Kirchner et al., 2024). As reward models, they can be used to score either the final outcome (outcome reward models, ORMs) or provide feedback at intermediate steps (process reward models, PRMs) (Lightman et al., 2023a). ORMs deliver a single reward signal at the end of generation, while PRMs offer denser, stepwise supervision, which has been shown to improve reasoning and generalization (Cobbe et al., 2021; Wang et al., 2023; Hosseini et al., 2024; Zhang et al., 2024b; Snell et al., 2024; Luo et al., 2024). PRMs have also been shown to facilitate more interpretable learning dynamics by providing actionable feedback at each reasoning step, enabling finer
|
| 46 |
+
|
| 47 |
+
grained control over model behavior and accelerating convergence during training (Lightman et al., 2023a; Snell et al., 2024; Hosseini et al., 2024).
|
| 48 |
+
|
| 49 |
+
Early Rejection. In classification, confidence-based rejection and selective prediction methods (Geifman and El-Yaniv, 2019) allow models to withhold outputs for ambiguous or out-of-distribution inputs, while similar abstention strategies are used in regression (Mozannar and Sontag, 2020). In LLMs, early rejection began with Best-of-N (BoN) decoding, where all candidates are fully generated and only the best is selected (Cobbe et al., 2021; Zhou et al., 2022). Recent advances show that integrating PRMs as step-level re-rankers within beam search significantly boosts both accuracy and compute efficiency, as dense rewards allow for rejection of suboptimal reasoning paths and more effective exploration of diverse solutions (Wang et al., 2023; Snell et al., 2024; Zhang et al., 2024a; Luo et al., 2024). Speculative Rejection proposed using ORMs for early rejection in BoN by discarding weak candidates mid-generation (Sun et al., 2024). In this work, we study the principle of early rejection for PRMs and demonstrate how it can be effectively integrated into beam search methods.
|
| 50 |
+
|
| 51 |
+
# 3 Method
|
| 52 |
+
|
| 53 |
+
Beam Search for Reasoning. Beam search is a widely used decoding strategy in LLMs for structured generation tasks such as mathematical problem solving and multi-step reasoning (Yao et al., 2023; Feng et al., 2023; Snell et al., 2024). At each decoding step, the model expands a fixed-width set of $N$ candidate beams by sampling multiple possible continuations and retaining only the top-scoring ones based on a predefined heuristic (e.g., log-probability or reward score). This iterative expansion and rejecting process allows the model to explore a larger space of possible outputs than greedy decoding, while remaining tractable compared to exhaustive search. In PRM-guided reasoning, each beam is scored at the end of every reasoning step by a PRM, which evaluates the coherence or correctness of the generated step. The highest scoring beams are then selected for further expansion, enabling the model to gradually construct a valid multi-step reasoning trace.
|
| 54 |
+
|
| 55 |
+
# 3.1 Partial Scoring for Early Rejection
|
| 56 |
+
|
| 57 |
+
Standard inference-time reasoning with LLMs and PRMs involves generating multiple candidate reasoning trajectories, typically using beam search or tree-based strategies, and scoring each trajectory after every step generation. Based on these step-wise scores, a subset of beams is selected and expanded further. While this strategy has been instrumental in advancing long-horizon reasoning, it incurs substantial computational overhead, as all candidate steps must be fully generated before evaluation, regardless of their quality.
|
| 58 |
+
|
| 59 |
+
We introduce a modification to this pipeline by reusing the same PRM mid-step generation. A compact overview is shown in Algorithm 3, where instead of waiting for a full step to complete, we compute partial rewards after first block of $\tau$ tokens at each step. These intermediate scores serve as early indicators of downstream quality. Beams with low partial scores are rejected before completing the full step. The surviving beams are then completed to the end of the current step, after which expansion proceeds as in the standard pipeline. Early rejection is applied again at the next step. This process ensures that computation is focused on the most promising candidates, reducing the number of unnecessary tokens generated and minimizing redundant PRM evaluations. A full version of the algorithm, along with the standard PRM-guided baseline, is provided in Appendix A for reproducibility and implementation details.
|
| 60 |
+
|
| 61 |
+
# Algorithm 1 Beam Search with Early Rejection
|
| 62 |
+
|
| 63 |
+
1: Initialize $N$ beams
|
| 64 |
+
2: for each beam do
|
| 65 |
+
3: Generate up to $\tau$ tokens and compute partial reward using PRM
|
| 66 |
+
4: end for
|
| 67 |
+
5: Select top $N / M$ beams by partial reward and complete remaining beams to full step
|
| 68 |
+
6: Expand each remaining beam with $M$ new beams
|
| 69 |
+
7: Repeat scoring, early rejection, and expansion until stopping condition is met
|
| 70 |
+
8: return Best final sequence
|
| 71 |
+
|
| 72 |
+
Figure 3: Overview of beam search with early rejection.
|
| 73 |
+
|
| 74 |
+
# 3.2 Efficiency Gains from Early Rejection
|
| 75 |
+
|
| 76 |
+
This early rejection strategy is focused on reduction in the number of tokens generated. By rejecting weaker candidates after a partial generation, we avoid expending compute on beams unlikely to contribute to the final output. The impact of this optimization on both generation cost and reward model evaluation is summarized below:
|
| 77 |
+
|
| 78 |
+
# Early rejection reduces compute
|
| 79 |
+
|
| 80 |
+
Rejecting beams after generating first $\tau$ tokens leads to FLOPs reduction for each step generation and during PRM evaluation.
|
| 81 |
+
|
| 82 |
+
Beyond reducing total compute, early rejection also improves throughput through a two-tiered batching strategy. Since rejected beams only require $\tau$ tokens to be generated, they occupy significantly less memory. This enables to increase the batch size during the initial generation phase without getting OOM error. We then switch to a smaller batch size for completing the remaining beams, balancing exploration with memory efficiency. This batching decoupling is summarized below:
|
| 83 |
+
|
| 84 |
+
# Two-tiered batching improves throughput
|
| 85 |
+
|
| 86 |
+
We use a larger batch size for generating the first $\tau$ tokens, taking advantage of their lower memory cost, and a smaller batch size for completing the step to avoid OOM error.
|
| 87 |
+
|
| 88 |
+
# 4 Theoretical Guarantees
|
| 89 |
+
|
| 90 |
+
Background and Notation. At each decoding step, which we define as a block of $\tau$ tokens, a width of $N$ beams is maintained. For beam $i$ , let $P_{i}$ denote its partial reward after the first $\tau$ tokens and $F_{i}$ its final reward after completing the step. Our preliminary results in Figure 2 indicate that the final reward is related to the partial reward via a monotonic mapping with added noise:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
F _ {i} = g (P _ {i}) + \eta_ {i}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where $g\colon [0,1]\to [0,1]$ is a monotonic increasing function; which need not be linear and $\eta_{i}$ is a noise term with zero mean and variance $\sigma^2$ that can cause deviations from a perfect linear relationship. After the PRM assigns partial rewards, we keep only the top $\frac{N}{M}$ beams and expand each of them into $M$ new beams, restoring the total width $N$ . Let $p = \frac{N}{M}$ , the selection threshold $T$ is the $(1 - 1 / M)$ quantile of the partial-reward distribution (i.e., we keep the
|
| 97 |
+
|
| 98 |
+
top $N / M$ beams). Therefore, a beam survives only if $P_{i}\geq T$
|
| 99 |
+
|
| 100 |
+
Let the beam that would eventually yield the highest final score be
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
i ^ {*} = \arg \max _ {i \in N} F _ {i}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
Guarantee under noisy, nonlinear conditions. Although the mapping between $P_{i}$ and $F_{i}$ need not be linear, we assume (i) the noise terms $\eta_{i}$ are independent and $\sigma$ -sub-Gaussian, and (ii) the expected partial scores preserve the ordering of the expected final scores. Let
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\Delta = \min _ {j \neq i ^ {*}} \left(\mathbb {E} \left[ P _ {i ^ {*}} \right] - \mathbb {E} \left[ P _ {j} \right]\right) > 0
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
denote the smallest expected gap between the best beam $i^{*}$ and any other beam. Thus
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\begin{array}{l} \Pr \left(P _ {i ^ {*}} < T\right) \leq \Pr \left(\exists j \neq i ^ {*}: P _ {j} > P _ {i ^ {*}}\right) \\ \leq (N - 1) \exp \biggl (- \frac {\Delta^ {2}}{4 \sigma^ {2}} \biggr), \\ \end{array}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where the last step applies a sub-Gaussian tail bound to each pairwise difference $P_{i^*} - P_j$ and then takes a union bound over the $N - 1$ non-optimal beams. The bound decays exponentially in $\Delta^2 /\sigma^2$ ; thus, when the expected gap is appreciable and the noise is modest, the risk of pruning the optimal beam is negligible even for large beam widths.
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
Figure 4: (Top) Kendall's Tau and (Bottom) Pearson's correlation coefficient for the partial and final rewards.
|
| 124 |
+
|
| 125 |
+
Best $\tau$ for Early Rejection. A common toy model is to treat each token's (log-)score as an i.i.d. random variable. For beam $i$ , let $X_{i,1},\ldots ,X_{i,L}$ be i.i.d. with mean $\mu_{i}$ and variance $\sigma_i^2$ , where $L$ denotes the final sequence length (number of tokens at completion) and $1\leq \tau \leq L$ . The partial reward after $\tau$ tokens is $P_{i} = \sum_{t = 1}^{\tau}X_{i,t}$ , while the final reward is $F_{i} = \sum_{t = 1}^{L}X_{i,t}$ . Under this model the Pearson correlation reads
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\rho \left(P _ {i}, F _ {i}\right) = \frac {\operatorname {C o v} \left(P _ {i} , F _ {i}\right)}{\sqrt {\operatorname {V a r} \left(P _ {i}\right)} \sqrt {\operatorname {V a r} \left(F _ {i}\right)}} = \sqrt {\frac {\tau}{L}}.
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
The shared first $\tau$ tokens drive the entire covariance: as $\tau \to L$ the correlation approaches 1 meaning the partial score is an almost perfect proxy, whereas as $\tau \to 0$ it vanishes. Figure 4 shows that this $\sqrt{\tau / L}$ trend, tightening toward 1 as $\tau$ increases, is also true empirically.
|
| 132 |
+
|
| 133 |
+
If we require the correlation to exceed a target level $\rho^{*}$ , then
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\rho \left(P _ {i}, F _ {i}\right) = \sqrt {\frac {\tau}{L}} \geq \rho^ {*} \quad \Longrightarrow \quad \tau \geq \left(\rho^ {*}\right) ^ {2} L.
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
For example,attaining $\rho^{*} = 0.8$ demands $\tau \geq$ $0.64L$
|
| 140 |
+
|
| 141 |
+
Connection to the Sub-Gaussian Bound. Our rejection guarantee hinges on
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\Pr \left(P _ {i ^ {*}} < T\right) \leq (N - 1) \exp \left(- \frac {\Delta^ {2}}{4 \sigma^ {2}}\right),
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
where $\Delta = \min_{j\neq i^{*}}\left(\mathbb{E}[P_{i^{*}}] - \mathbb{E}[P_{j}]\right)$ is the expected partial-score gap and $\sigma$ is the sub-Gaussian parameter of the per-token noise.
|
| 148 |
+
|
| 149 |
+
A high correlation $\rho (P_i,F_i)$ does not automatically imply a large gap $\Delta$ , but it does indicate that beams ranking highly under the partial reward tend to rank highly under the final reward. In practice, choosing $\tau$ so that
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\rho \left(P _ {i}, F _ {i}\right) = \sqrt {\frac {\tau}{L}} \geq \rho^ {*}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
ensures the partial scores are sufficiently predictive; once this condition is met, the tail bound above tells us the probability of mistakenly pruning the optimal beam is exponentially small in $\Delta^2 /\sigma^2$ . In practice, after fixing $\tau$ we measure the empirical gap $\Delta$ on a held-out set and confirm it comfortably exceeds the estimated noise scale $\sigma$ .
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Figure 5: We evaluate our implementation of Early Rejection (ER) on the SAT-MATH dataset from AGIEval benchmark using two different LLMs and PRMs. The numbers indicate that ER rejection achieves performance similar to Vanilla Beam Search with while consuming far less compute.
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+
# 5 Experiments
|
| 167 |
+
|
| 168 |
+
We evaluate our method on three challenging math-reasoning benchmarks, MATH-500 (Lightman et al., 2023b), SAT-MATH from AGIEval (Zhong et al., 2023), and AIME 2024. For generation we use the instruct variants of two open-source LLMs, Llama-3.2-3B (Meta, 2024) and Qwen-2.5-3B (Qwen et al., 2024), selected for their strong reasoning ability at modest scale. Process evaluation is performed with two PRMs of different capacities, MathShepherd-Mistral-7B (Wang et al., 2023) and Skywork-PRM-1.5B, allowing us to study the impact of early rejection for PRMs of different sizes. Early rejection is triggered after a prefix of $\tau \in 32,64,128$ tokens. These thresholds are motivated by preliminary analysis (Figure 4), which shows that partial-reward scores at these lengths are already highly correlated with final rewards. At each decoding step we sample $N \in 4,8,16,32,64$ candidate beams and retain the top $M = 4$ , mirroring prior PRM-guided search settings (Snell et al., 2024). We compare our early-rejection decoder with the conventional pipeline that scores only fully completed beams, reporting average answer accuracy and total inference FLOPs for each run. All experiments are conducted on an HPC cluster, with each run executed using four NVIDIA A100 GPUs (40 GB memory each).
|
| 169 |
+
|
| 170 |
+
# 5.1 Experimental Results
|
| 171 |
+
|
| 172 |
+
Experimental results in Figure 5 on SAT-MATH dataset and 6 on Math-500 and AIME 2024 datasets highlight the effectiveness of Early Rejection (ER) in reducing compute while preserving end-task accuracy across different PRMs, LLMs, and $\tau$ values. For the results we observe that across all configurations, early rejection acts as a safe and compute-efficient strategy that adapts well to LLM characteristics and PRM granularity. Appendix A provides a comprehensive breakdown of accuracy and compute trade-offs across all datasets, $\tau$ values, beam sizes, and LLM-PRM configurations. Building on these results, we articulate five key observations that our subsequent experiments directly address.
|
| 173 |
+
|
| 174 |
+
Observation 0: Partial PRM scores at very short prefixes reliably predict final scores. Our empirical analysis confirms that partial rewards become highly predictive of final rewards after surprisingly short prefixes. Figure 4 shows as we sweep the decision prefix $\tau$ from 8 to 512 tokens. The two correlations rise monotonically and follow the $\sqrt{\tau / L}$ and at $\tau = 32$ tokens $\rho$ already exceeds 0.78 and $\tau = 64$ pushes both metrics above 0.9, after which they plateau. A complementary view is given in Figure 2, where a linear fit between half-step partial rewards and full-step rewards achieves $R^2 = 0.63$ with the MetaMath-7B PRM and $R^2 =$
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
Figure 6: We evaluate our implementation of Early Rejection (ER) on the Math-500 and AIME datasets using two different LLMs with MathShepard-7b as reward model. The numbers indicate that ER rejection achieves performance similar to Vanilla Beam Search with while consuming far less compute.
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
|
| 185 |
+
0.72 with MathShepherd-7B, demonstrating that the effect generalizes across reward models. These findings validate our Partial Reward Model hypothesis that even a one-third length prefix offers a stable ranking signal, and the probability of incorrectly rejecting the optimal beam decays exponentially once the expected partial-score gap $\Delta$ dominates the sub-Gaussian noise $\sigma$ , as formalized in Section 4. Practically, this means we can invoke early rejection after the first 32-64 tokens with negligible risk while removing $60 - 85\%$ of downstream PRM calls and generation FLOPs.
|
| 186 |
+
|
| 187 |
+
Observation ②: Smaller PRMs can match or exceed larger PRMs in accuracy while saving more compute, especially on well-structured outputs. The smaller Skywork-PRM-1.5B achieves equal or higher end-task accuracy than the MathShepherd-Mistral-7B baseline, while also enabling a higher number of FLOP reductions. Across both Llama-3.3-3B and Qwen2.5-3B, Skywork yields a $0.7 - 2.1\%$ accuracy gain for smaller beam sizes and stays within $0.3\%$ elsewhere, contradicting the common intuition that "bigger judge = better answers" (Leike et al., 2018). We also observe a greater number of FLOP reductions with Skywork-PRM-1.5B, primarily because the 3B-sized LLM becomes the computational bottleneck, and early rejection allows us to skip costly comple
|
| 188 |
+
|
| 189 |
+
tions, thereby saving compute more frequently.
|
| 190 |
+
|
| 191 |
+
Another key observation is that smaller PRMs benefit from more well-structured answers. Skywork-PRM-1.5B generally performs better with Llama-3.3-3B than with Qwen2.5-3B, as Llama tends to produce more structured and instruction-following responses compared to Qwen. Although both LLMs are instruction-tuned, Llama adheres to instructions more faithfully, making it easier for the smaller PRM (Skywork) to evaluate intermediate steps accurately. In contrast, larger PRMs like MathShepherd-Mistral-7B are more robust to such variations in LLM behavior.
|
| 192 |
+
|
| 193 |
+
Observation ③: Early rejection yields large accuracy gains for exploratory LLMs at small beam widths but offers diminishing accuracy returns for deterministic LLMs and wider beams. Qwen2.5-3B often generates long, exploratory reasoning traces, so many beams appear weak after the first $\tau = 32 - 64$ tokens, even though some of them would eventually reach correct solutions. In such cases, the partial reward filter discards the clearly unpromising beams early. Here early rejection frees up beam slots for new candidates. This allows the search to explore a broader set of reasoning paths, effectively expanding the search space without increasing the beam width $N$ .
|
| 194 |
+
|
| 195 |
+
In contrast, Llama-3.2-3B tends to produce
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
Figure 7: Total FLOPs consumed across different LLM-PRM combinations with and without Early Rejection. We observe consistent and substantial reductions in compute, with $\tau = 64$ yielding up to $9\times$ savings. Larger prefix lengths enable more reliable pruning, significantly lowering overall inference cost without compromising accuracy.
|
| 199 |
+
|
| 200 |
+
shorter, more deterministic traces where the top- $p$ beams already rank highly from the start. As a result, early rejection removes fewer low-quality candidates and provides limited additional exploration. Empirically, early rejection improves Qwen's accuracy by up to $3.5\%$ at $N = 4$ and $1.6\%$ at $N = 8$ , whereas Llama sees at most a $0.3\%$ gain. Once the beam width is sufficiently large ( $N \geq 32$ ), the baseline search already explores the space well, so the benefits of early rejection shift from accuracy gains to compute savings.
|
| 201 |
+
|
| 202 |
+
Observation ①: At $\tau = 64$ tokens, early rejection achieves higher accuracy while lesser compute than $\tau = 32$ tokens. Although we always retain the same number of beams per step (the top $N / M$ ), their quality improves significantly as we increase the prefix length $\tau$ . At $\tau = 32$ , the correlation between partial and full rewards is about 0.78. This means around $20\%$ of the beams are incorrectly ranked, so some low-quality beams make it through and have to be fully generated and evaluated, wasting compute. At $\tau = 64$ , the correlation exceeds 0.90 and flattens out, meaning nearly all retained beams are genuinely promising. Very few low-quality beams slip through. As a result, even though we keep the same number of survivors, the number of bad survivors and the FLOPs spent on them, drops when increasing $\tau$ from 32 to 64.
|
| 203 |
+
|
| 204 |
+
Observation ⑤: Language model behavior (not size) drives compute, and early rejection is most effective when it blocks exploratory failures early. Figure 7 shows that Qwen2.5-3B incurs significantly higher total FLOPs than Llama-3.2-3B under identical early rejection settings. While both models are similar in size, their generation behaviors differ: Qwen tends to produce longer, exploratory chains of thought, whereas Llama generates more concise, deterministic outputs. As a
|
| 205 |
+
|
| 206 |
+
result, when early rejection fails to prune a weak Qwen beam, it often leads to a long and costly completion, inflating total compute. Early rejection is most effective in these exploratory settings, where catching bad completions early prevents large downstream FLOPs. This explains why Qwen exhibits larger absolute FLOP reductions, especially when paired with a lightweight PRM like Skywork-1.5B. In contrast, Llama's beams tend to converge quickly, offering fewer opportunities for savings. These results highlight that the structure of the generation process, not just model size, governs the impact of early rejection on efficiency.
|
| 207 |
+
|
| 208 |
+
# 6 Conclusion
|
| 209 |
+
|
| 210 |
+
We demonstrate that PRMs can be effectively repurposed as Partial Reward Models, enabling a single mid-generation evaluation to provide a reliable accept or reject signal. This allows weak beams to be pruned early, well before full reasoning steps are completed, thereby reducing unnecessary computation without sacrificing final accuracy. Under mild noise assumptions, we provide theoretical guarantees showing that the probability of mistakenly discarding the optimal beam decays exponentially with prefix length, offering formal safety for early rejection. Extensive experiments across SATMATH, Math-500, and AIME confirm the practical benefits: early rejection reduces inference-time FLOPs by $1.4 \times -9 \times$ when using a mid-sized PRM (7B parameters), with no degradation in task performance. Even with a smaller PRM (1.5B), we observe $1.5 \times -4 \times$ compute savings, highlighting that lightweight evaluators are sufficient for effective and efficient reasoning. Together, these findings establish early rejection as a simple, model-agnostic plug-in that narrows the gap between compute
|
| 211 |
+
|
| 212 |
+
heavy tree search and fast single-pass decoding, offering state-of-the-art compute efficiency without compromising solution quality.
|
| 213 |
+
|
| 214 |
+
# Limitations
|
| 215 |
+
|
| 216 |
+
Our approach relies on the monotonicity and calibration of PRM scores, if partial rewards correlate weakly with final quality, as might occur in tasks with delayed or non-monotonic utilities (e.g., code synthesis with backtracking or creative writing), early rejection can mis-reject the eventual best beam. The study is confined to text-only, math-centric benchmarks. Larger models specially for multimodal tasks, or domains with sparse positive signals may exhibit different trade-offs. While we report FLOP reductions, we do not quantify the memory overhead of storing intermediate PRM states after $\tau$ tokens are generated. Finally, the theoretical guarantees assume independent step-wise noise and fixed $\tau$ , leaving open questions about adaptive $\tau$ schedules and integration with policy-learning frameworks such as RLHF or DPO.
|
| 217 |
+
|
| 218 |
+
# Ethical Considerations
|
| 219 |
+
|
| 220 |
+
While Early Rejection reduces inference compute by up to $9 \times$ , this efficiency could also facilitate misuse, such as the automated generation of spam or disinformation. The method's safety relies on the assumption that PRM scores are monotonic with respect to final output quality. However, this assumption may not hold beyond the math-focused benchmarks evaluated in this work, particularly for tasks involving delayed or non-monotonic rewards. As a result, the algorithm risks discarding high-quality candidates prematurely or reinforcing hidden biases.
|
| 221 |
+
|
| 222 |
+
# Acknowledgments
|
| 223 |
+
|
| 224 |
+
The work of Azal Ahmad Khan was supported in part by the Amazon Machine Learning Systems Fellowship and the UMN GAGE Fellowship. Xinran Wang and Ali Anwar were supported by the 3M Science and Technology Graduate Fellowship and the Samsung Global Research Outreach Award.
|
| 225 |
+
|
| 226 |
+
# References
|
| 227 |
+
|
| 228 |
+
Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. 2024. Large language models for mathematical reasoning: Progresses and challenges. arXiv preprint arXiv:2402.00157.
|
| 229 |
+
|
| 230 |
+
Amogh Akella. 2024. Improving math problem solving in large language models through categorization and strategy tailoring. arXiv preprint arXiv:2411.00042.
|
| 231 |
+
Emunah Chan. 2024. Understanding logical reasoning ability of large language models. Available at SSRN 4943448.
|
| 232 |
+
Zhaorun Chen, Zhuokai Zhao, Zhihong Zhu, Ruiqi Zhang, Xiang Li, Bhiksha Raj, and Huaxiu Yao. 2024. Autoprm: Automating procedural supervision for multi-step reasoning via controllable question decomposition. arXiv preprint arXiv:2402.11452.
|
| 233 |
+
Fengxiang Cheng, Haoxuan Li, Fenrong Liu, Robert van Rooij, Kun Zhang, and Zhouchen Lin. 2025. Empowering llms with logical reasoning: A comprehensive survey. arXiv preprint arXiv:2502.15652.
|
| 234 |
+
Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, and 1 others. 2021. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168.
|
| 235 |
+
Antonia Creswell, Murray Shanahan, and Irina Higgins. 2022. Selection-inference: Exploiting large language models for interpretable logical reasoning. arXiv preprint arXiv:2205.09712.
|
| 236 |
+
Jiangshu Du, Yibo Wang, Wenting Zhao, Zhongfen Deng, Shuaiqi Liu, Renze Lou, Henry Peng Zou, Pranav Narayanan Venkit, Nan Zhang, Mukund Srinath, and 1 others. 2024. Llms assist nlp researchers: Critique paper (meta-) reviewing. arXiv preprint arXiv:2406.16253.
|
| 237 |
+
Xidong Feng, Ziyu Wan, Muning Wen, Stephen Marcus McAleer, Ying Wen, Weinan Zhang, and Jun Wang. 2023. Alphazero-like tree-search can guide large language model decoding and training. arXiv preprint arXiv:2309.17179.
|
| 238 |
+
Yonatan Geifman and Ran El-Yaniv. 2019. SelectiveNet: A deep neural network with an integrated reject option. In Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pages 2151-2159. PMLR.
|
| 239 |
+
Rishi Hazra, Gabriele Venturato, Pedro Zuidberg Dos Martires, and Luc De Raedt. 2025. Have large language models learned to reason? a characterization via 3-sat phase transition. arXiv preprint arXiv:2504.03930.
|
| 240 |
+
Arian Hosseini, Xingdi Yuan, Nikolay Malkin, Aaron Courville, Alessandro Sordoni, and Rishabh Agarwal. 2024. V-star: Training verifiers for self-taught reasoners. arXiv preprint arXiv:2402.06457.
|
| 241 |
+
Pengfei Hu, Zhenrong Zhang, Qikai Chang, Shuhang Liu, Jiefeng Ma, Jun Du, Jianshu Zhang, Quan Liu, Jianqing Gao, Feng Ma, and 1 others. 2025. Prm-bas: Enhancing multimodal reasoning through
|
| 242 |
+
|
| 243 |
+
prm-guided beam annealing search. arXiv preprint arXiv:2504.10222.
|
| 244 |
+
Jan Hendrik Kirchner, Yining Chen, Harri Edwards, Jan Leike, Nat McAleese, and Yuri Burda. 2024. Prover-verifier games improve legibility of llm outputs. arXiv preprint arXiv:2407.13692.
|
| 245 |
+
Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large language models are zero-shot reasoners. Advances in neural information processing systems, 35:22199-22213.
|
| 246 |
+
Sylvain Kouemo Ngassom, Arghavan Moradi Dakhel, Florian Tambon, and Foutse Khomh. 2024. Chain of targeted verification questions to improve the reliability of code generated by llms. In Proceedings of the 1st ACM International Conference on AI-Powered Software, pages 122-130.
|
| 247 |
+
Tian Lan, Wenwei Zhang, Chen Xu, Heyan Huang, Dahua Lin, Kai Chen, and Xian-Ling Mao. 2024. Criticeval: Evaluating large-scale language model as critic. Advances in Neural Information Processing Systems, 37:66907-66960.
|
| 248 |
+
Jan Leike, David Krueger, Tom Everitt, Miljan Martic, Vishal Maini, and Shane Legg. 2018. Scalable agent alignment via reward modeling: a research direction. arXiv preprint arXiv:1811.07871.
|
| 249 |
+
Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023a. Let's verify step by step. arXiv preprint arXiv:2305.20050.
|
| 250 |
+
Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023b. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*.
|
| 251 |
+
Zicheng Lin, Zhibin Gou, Tian Liang, Ruilin Luo, Haowei Liu, and Yujiu Yang. 2024. Criticbench: Benchmarking llms for critique-correct reasoning. arXiv preprint arXiv:2402.14809.
|
| 252 |
+
Liangchen Luo, Zi Lin, Yinxiao Liu, Lei Shu, Yun Zhu, Jingbo Shang, and Lei Meng. 2023. Critique ability of large language models. arXiv preprint arXiv:2310.04815.
|
| 253 |
+
Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, and 1 others. 2024. Improve mathematical reasoning in language models by automated process supervision. arXiv preprint arXiv:2406.06592.
|
| 254 |
+
Dakota Mahan, Duy Van Phung, Rafael Rafailov, Chase Blagden, Nathan Lile, Louis Castricato, Jan-Philipp Franken, Chelsea Finn, and Alon Albalak. 2024. Generative reward models. arXiv preprint arXiv:2410.12832.
|
| 255 |
+
|
| 256 |
+
Meta. 2024. Llama 3.2: Revolutionizing edge ai and vision with open, customizable models.
|
| 257 |
+
Hussein Mozannar and David Sontag. 2020. Consistent estimators for learning to defer to an expert. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 7076-7087. PMLR.
|
| 258 |
+
Jianing Qi, Hao Tang, and Zhigang Zhu. 2024. Verifierq: Enhancing llm test time compute with q-learning-based verifiers. arXiv preprint arXiv:2410.08048.
|
| 259 |
+
Team Qwen, Baosong Yang, B Zhang, B Hui, B Zheng, B Yu, Chengpeng Li, D Liu, F Huang, H Wei, and 1 others. 2024. Qwen2 technical report. arXiv preprint.
|
| 260 |
+
Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. 2024. Scaling ltm test-time compute optimally can be more effective than scaling model parameters. arXiv preprint arXiv:2408.03314.
|
| 261 |
+
Hanshi Sun, Momin Haider, Ruiqi Zhang, Huitao Yang, Jiahao Qiu, Ming Yin, Mengdi Wang, Peter Bartlett, and Andrea Zanette. 2024. Fast best-of-n decoding via speculative rejection. arXiv preprint arXiv:2410.20290.
|
| 262 |
+
Peiyi Wang, Lei Li, Zhihong Shao, RX Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. 2023. Math-shepherd: Verify and reinforce lms step-by-step without human annotations. arXiv preprint arXiv:2312.08935.
|
| 263 |
+
Teng Wang, Zhangyi Jiang, Zhenqi He, Wenhan Yang, Yanan Zheng, Zeyu Li, Zifan He, Shenyang Tong, and Hailei Gong. 2025. Towards hierarchical multi-step reward models for enhanced reasoning in large language models. arXiv preprint arXiv:2503.13551.
|
| 264 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.
|
| 265 |
+
Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, and 1 others. 2025. Towards large reasoning models: A survey of reinforced reasoning with large language models. arXiv preprint arXiv:2501.09686.
|
| 266 |
+
Yuqing Yang, Yuedong Xu, and Lei Jiao. 2024. A queueing theoretic perspective on low-latency llm inference with variable token length. arXiv preprint arXiv:2407.05347.
|
| 267 |
+
Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. 2023. Tree of thoughts: Deliberate problem solving with large language models. Advances in neural information processing systems, 36:11809-11822.
|
| 268 |
+
|
| 269 |
+
Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. 2024a. Rest-mcts*: Llm self-training via process reward guided tree search. Advances in Neural Information Processing Systems, 37:64735-64772.
|
| 270 |
+
Lunjun Zhang, Arian Hosseini, Hritik Bansal, Mehran Kazemi, Aviral Kumar, and Rishabh Agarwal. 2024b. Generative verifiers: Reward modeling as next-token prediction. arXiv preprint arXiv:2408.15240.
|
| 271 |
+
Wanjun Zhong, Ruixiang Cui, Yiduo Guo, Yaobo Liang, Shuai Lu, Yanlin Wang, Amin Saied, Weizhu Chen, and Nan Duan. 2023. Agieval: A human-centric benchmark for evaluating foundation models. arXiv preprint arXiv:2304.06364.
|
| 272 |
+
Denny Zhou, Nathanael Scharli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc Le, and 1 others. 2022. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625.
|
| 273 |
+
|
| 274 |
+
Algorithm 2 Beam Search with Process Reward Models
|
| 275 |
+
1: Input: LLM Model, PRM Model, Beam count $N$ , Beam width $M$ , Temperature $T$ , Stopping criterion, EOS token or max search depth, Batch Size $b$
|
| 276 |
+
2: Initialize a set of $N$ candidate beams
|
| 277 |
+
3: for each beam do
|
| 278 |
+
4: Sample $N$ independent steps using the LLM with temperature $T$
|
| 279 |
+
5: Apply the stopping criterion (e.g., new line or double new line)
|
| 280 |
+
6: end for
|
| 281 |
+
7: Score each sampled step using the PRM
|
| 282 |
+
8: Select the top $N / M$ steps with the highest scores
|
| 283 |
+
9: Expand the selected steps:
|
| 284 |
+
10: for each selected step do
|
| 285 |
+
11: Sample $M$ next steps
|
| 286 |
+
12: end for
|
| 287 |
+
13: while EOS token not reached and max search depth not exceeded do
|
| 288 |
+
14: Repeat steps 7 - 12
|
| 289 |
+
15: end while
|
| 290 |
+
16: return The best sequence found
|
| 291 |
+
|
| 292 |
+
Algorithm. Algorithm 2 shows the conventional PRM-guided beam search and Algorithm 3 shows our early-rejection variant. Both algorithms maintain the same top-level structure of iterative beam expansion, but differ critically in how and when PRM scores are computed. The standard method evaluates only fully completed beams, resulting in redundant computation on unpromising candidates. In contrast, our early-rejection variant computes partial rewards after just $\tau$ tokens using the same PRM, enabling efficient early rejecting. This architectural shift introduces a two-tiered batching scheme, larger batch size for partial generations and smaller batch size for step completion, yielding significant compute savings without degrading performance, as shown in our experimental results.
|
| 293 |
+
|
| 294 |
+
Results. To supplement the main results presented in Section 5, we provide detailed tables reporting the accuracy and compute trade-offs for every combination of language model (LLM), process reward model (PRM), beam size, and early rejection threshold $\tau$ . These results span three math reasoning benchmarks: SAT-MATH, Math-500, and AIME.
|
| 295 |
+
|
| 296 |
+
Table 1 reports the results on the SAT-MATH dataset from AGIEval. For each LLM-PRM pair, we compare standard decoding ("Vanilla") with our early rejection method across multiple $\tau$ values. Each cell reports both the accuracy and the total FLOPs used for inference. We observe that early rejection achieves similar or higher accuracy at significantly reduced compute, especially with exploratory LLMs like Qwen-2.5B.
|
| 297 |
+
|
| 298 |
+
Table 2 extends the analysis to the Math-500 and AIME 2024 benchmarks, using MathShepherd-Mistral-7B as the PRM. Again, we observe consistent trends across datasets: as $\tau$ increases, early rejection becomes more selective and cost-efficient, with only minor losses (if any) in final accuracy.
|
| 299 |
+
|
| 300 |
+
Table 3 aggregates FLOP consumption across all LLM-PRM combinations under three decoding regimes: Vanilla, $\mathrm{ER}(\tau = 32)$ , and $\mathrm{ER}(\tau = 64)$ . The results reveal that early rejection with $\tau = 64$ consistently achieves the lowest compute cost without compromising output quality, yielding up to $9\times$ reduction in total inference FLOPs.
|
| 301 |
+
|
| 302 |
+
Together, these tables validate the scalability and robustness of our early rejection method across models, evaluators, datasets, and rejection thresholds.
|
| 303 |
+
|
| 304 |
+
Table 1: SAT-MATH results comparing vanilla decoding and Early Rejection (ER) across multiple beam sizes and $\tau$ values. Each cell reports (top) accuracy and (bottom) total inference FLOPs $\left( {\times {10}^{18}}\right)$ .
|
| 305 |
+
|
| 306 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">PRM</td><td rowspan="2">Setting</td><td colspan="5">Number of Samples (τ)</td></tr><tr><td>4</td><td>8</td><td>16</td><td>32</td><td>64</td></tr><tr><td rowspan="16">Llama-3.2 -3b</td><td rowspan="8">MathSheperd -7b</td><td rowspan="2">Vanilla</td><td>37.14</td><td>38.76</td><td>39.55</td><td>41.16</td><td>43.12</td></tr><tr><td>1.32</td><td>7.48</td><td>15.47</td><td>31.21</td><td>80.34</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>30.84</td><td>33.94</td><td>35.14</td><td>40.36</td><td>42.13</td></tr><tr><td>0.24</td><td>2.73</td><td>9.40</td><td>21.99</td><td>55.94</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>32.57</td><td>35.82</td><td>38.81</td><td>40.76</td><td>42.87</td></tr><tr><td>0.24</td><td>1.08</td><td>4.34</td><td>14.85</td><td>49.55</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>34.55</td><td>38.25</td><td>39.07</td><td>38.31</td><td>40.65</td></tr><tr><td>0.21</td><td>0.85</td><td>3.86</td><td>13.11</td><td>45.90</td></tr><tr><td rowspan="8">Skywork -1.5b</td><td rowspan="2">Vanilla</td><td>40.38</td><td>41.28</td><td>42.57</td><td>43.87</td><td>45.64</td></tr><tr><td>1.25</td><td>3.49</td><td>10.83</td><td>25.85</td><td>39.60</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>32.77</td><td>35.30</td><td>39.67</td><td>38.54</td><td>44.14</td></tr><tr><td>0.21</td><td>1.29</td><td>4.54</td><td>9.38</td><td>22.13</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>38.54</td><td>39.17</td><td>41.93</td><td>42.97</td><td>44.61</td></tr><tr><td>0.13</td><td>0.83</td><td>4.85</td><td>7.63</td><td>19.92</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>32.24</td><td>33.33</td><td>37.21</td><td>39.09</td><td>39.55</td></tr><tr><td>0.11</td><td>0.57</td><td>4.23</td><td>6.75</td><td>16.31</td></tr><tr><td rowspan="16">Qwen2.5 -3b</td><td rowspan="8">MathSheperd -7b</td><td rowspan="2">Vanilla</td><td>37.93</td><td>40.59</td><td>46.31</td><td>47.20</td><td>51.47</td></tr><tr><td>2.42</td><td>15.70</td><td>37.35</td><td>80.41</td><td>190.35</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>41.46</td><td>42.14</td><td>45.62</td><td>47.95</td><td>50.18</td></tr><tr><td>0.86</td><td>1.96</td><td>8.85</td><td>25.73</td><td>106.77</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>45.66</td><td>46.36</td><td>48.50</td><td>51.04</td><td>53.51</td></tr><tr><td>0.53</td><td>1.37</td><td>7.91</td><td>24.81</td><td>100.61</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>47.13</td><td>48.54</td><td>50.91</td><td>53.11</td><td>56.84</td></tr><tr><td>0.49</td><td>1.12</td><td>5.76</td><td>17.33</td><td>79.98</td></tr><tr><td rowspan="8">Skywork -1.5b</td><td rowspan="2">Vanilla</td><td>31.63</td><td>40.49</td><td>44.51</td><td>47.29</td><td>50.98</td></tr><tr><td>1.37</td><td>4.77</td><td>10.37</td><td>27.31</td><td>88.77</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>37.13</td><td>43.13</td><td>45.19</td><td>49.59</td><td>51.33</td></tr><tr><td>0.33</td><td>1.36</td><td>6.67</td><td>17.29</td><td>47.43</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>40.67</td><td>43.26</td><td>47.88</td><td>51.41</td><td>53.88</td></tr><tr><td>0.31</td><td>1.28</td><td>6.40</td><td>15.95</td><td>42.45</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>42.26</td><td>46.55</td><td>51.82</td><td>52.61</td><td>55.09</td></tr><tr><td>0.25</td><td>0.60</td><td>2.40</td><td>7.50</td><td>25.33</td></tr></table>
|
| 307 |
+
|
| 308 |
+
Table 2: Results on Math-500 and AIME datasets with MathShepherd-Mistral-7B as the PRM. Each configuration shows accuracy (top) and total FLOPs (bottom) for different beam sizes and $\tau$ thresholds.
|
| 309 |
+
|
| 310 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Model</td><td rowspan="2">Setting</td><td colspan="5">Number of Samples (τ)</td></tr><tr><td>4</td><td>8</td><td>16</td><td>32</td><td>64</td></tr><tr><td rowspan="16">Math-500</td><td rowspan="8">Llama-3.2 -3b</td><td rowspan="2">Vanilla</td><td>46.20</td><td>48.00</td><td>49.06</td><td>50.81</td><td>51.44</td></tr><tr><td>5.04</td><td>27.51</td><td>33.22</td><td>137.54</td><td>202.27</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>39.63</td><td>40.30</td><td>44.60</td><td>46.60</td><td>47.21</td></tr><tr><td>1.68</td><td>10.15</td><td>27.42</td><td>92.15</td><td>189.23</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>42.00</td><td>43.20</td><td>48.67</td><td>50.43</td><td>51.19</td></tr><tr><td>1.50</td><td>8.67</td><td>23.45</td><td>101.17</td><td>184.71</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>45.46</td><td>46.80</td><td>48.74</td><td>50.29</td><td>51.34</td></tr><tr><td>0.60</td><td>3.21</td><td>18.91</td><td>77.46</td><td>138.63</td></tr><tr><td rowspan="8">Qwen2.5 -3b</td><td rowspan="2">Vanilla</td><td>51.67</td><td>53.25</td><td>54.08</td><td>56.73</td><td>58.80</td></tr><tr><td>14.02</td><td>47.48</td><td>65.32</td><td>250.03</td><td>536.10</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>45.87</td><td>49.59</td><td>51.41</td><td>52.80</td><td>55.60</td></tr><tr><td>2.41</td><td>10.58</td><td>56.49</td><td>134.12</td><td>354.91</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>53.88</td><td>54.19</td><td>55.60</td><td>57.11</td><td>59.34</td></tr><tr><td>2.10</td><td>9.28</td><td>42.33</td><td>112.46</td><td>263.08</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>55.21</td><td>59.43</td><td>60.40</td><td>62.61</td><td>66.13</td></tr><tr><td>1.61</td><td>7.45</td><td>32.54</td><td>94.52</td><td>195.23</td></tr><tr><td rowspan="16">AIME</td><td rowspan="8">Llama-3.2 -3b</td><td rowspan="2">Vanilla</td><td>3.33</td><td>6.67</td><td>6.67</td><td>10.00</td><td>13.33</td></tr><tr><td>0.10</td><td>0.25</td><td>0.72</td><td>1.56</td><td>2.61</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>0.00</td><td>3.33</td><td>3.33</td><td>6.67</td><td>10.00</td></tr><tr><td>0.05</td><td>0.16</td><td>0.46</td><td>1.14</td><td>2.13</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>3.33</td><td>3.33</td><td>10.00</td><td>10.00</td><td>13.33</td></tr><tr><td>0.02</td><td>0.09</td><td>0.41</td><td>0.72</td><td>1.89</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>3.33</td><td>6.67</td><td>10.00</td><td>13.33</td><td>13.33</td></tr><tr><td>0.02</td><td>0.04</td><td>0.38</td><td>0.61</td><td>2.01</td></tr><tr><td rowspan="8">Qwen2.5 -3b</td><td rowspan="2">Vanilla</td><td>6.67</td><td>10.00</td><td>10.00</td><td>13.33</td><td>16.67</td></tr><tr><td>0.13</td><td>0.31</td><td>1.19</td><td>2.68</td><td>5.51</td></tr><tr><td rowspan="2">ER (τ = 32)</td><td>3.33</td><td>6.67</td><td>6.67</td><td>10.00</td><td>10.00</td></tr><tr><td>0.05</td><td>0.21</td><td>0.63</td><td>1.34</td><td>3.35</td></tr><tr><td rowspan="2">ER (τ = 64)</td><td>6.67</td><td>6.67</td><td>10.00</td><td>13.33</td><td>13.33</td></tr><tr><td>0.04</td><td>0.12</td><td>0.47</td><td>0.93</td><td>2.36</td></tr><tr><td rowspan="2">ER (τ = 128)</td><td>6.67</td><td>6.67</td><td>10.00</td><td>13.33</td><td>16.67</td></tr><tr><td>0.02</td><td>0.09</td><td>0.39</td><td>0.77</td><td>2.12</td></tr></table>
|
| 311 |
+
|
| 312 |
+
# Algorithm 3 Beam Search with Early Rejection
|
| 313 |
+
|
| 314 |
+
1: Input: LLM Model, PRM Model, Beam count $N$ , Beam width $M$ , Temperature $T$ , Stopping criterion, EOS token or max search depth, $b_{1} > b_{2}$
|
| 315 |
+
2: Initialize a set of $N$ candidate beams
|
| 316 |
+
3: for each beam do
|
| 317 |
+
4: Sample $N$ independent steps using the LLM with temperature $T$ and batch size $b_{1}$
|
| 318 |
+
5: Apply the stopping criterion ( $\tau$ tokens generated or EOS token.)
|
| 319 |
+
6: end for
|
| 320 |
+
7: Score each sampled step using the PRM
|
| 321 |
+
8: Select the top $N / M$ steps with the highest scores
|
| 322 |
+
9: Complete the selected steps:
|
| 323 |
+
10: for each selected step do
|
| 324 |
+
11: Complete the step until EOS token with batch size $b_{2}$ .
|
| 325 |
+
12: end for
|
| 326 |
+
13: Expand the selected steps:
|
| 327 |
+
14: for each selected step do
|
| 328 |
+
15: Sample $M$ next steps
|
| 329 |
+
16: end for
|
| 330 |
+
17: while EOS token not reached and max search depth not exceeded do
|
| 331 |
+
18: Repeat steps 7 - 16
|
| 332 |
+
19: end while
|
| 333 |
+
20: return The best sequence found
|
| 334 |
+
|
| 335 |
+
Table 3: Total FLOPs $(\times 10^{18})$ for each LLM-PRM combination under vanilla decoding and early rejection at $\tau = 32$ and $\tau = 64$ . Early rejection consistently reduces compute, with Qwen-based configurations showing the largest savings.
|
| 336 |
+
|
| 337 |
+
<table><tr><td rowspan="2">Model Combination</td><td colspan="2">Vanilla</td><td colspan="2">Early Rejection (τ=32)</td><td colspan="2">Early Rejection (τ=64)</td></tr><tr><td>LLM</td><td>PRM</td><td>LLM</td><td>PRM</td><td>LLM</td><td>PRM</td></tr><tr><td>Llama+Math</td><td>3.70</td><td>27.51</td><td>5.73</td><td>16.27</td><td>4.62</td><td>10.23</td></tr><tr><td>Llama+Skywork</td><td>19.79</td><td>6.06</td><td>7.54</td><td>2.29</td><td>5.67</td><td>1.96</td></tr><tr><td>Qwen+Math</td><td>13.22</td><td>67.19</td><td>7.46</td><td>18.27</td><td>7.54</td><td>17.27</td></tr><tr><td>Qwen+Skywork</td><td>19.22</td><td>8.08</td><td>12.17</td><td>5.12</td><td>10.89</td><td>5.06</td></tr></table>
|
acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba9ce1a387eecf06e885a2fcddcbb905e41882cdc2d5c6d4fad637cad38b4eb4
|
| 3 |
+
size 679058
|
acceleratingllmreasoningviaearlyrejectionwithpartialrewardmodeling/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f702eaed71e93ab68bd9575dc0d2364546cffd39f6d938de4ba9e904a29b3ec
|
| 3 |
+
size 455878
|
acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/b3b5090c-0c68-4816-a5b9-b46a98c166d2_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6cf331f9174626bcbc0a815a4c82f94aced64cd48e9d9097055b7436b8215bb
|
| 3 |
+
size 132807
|
acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/b3b5090c-0c68-4816-a5b9-b46a98c166d2_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:431b9d92651d375a91e8e80eebd4aeb2dccb705853c50287a273f530a91e77a4
|
| 3 |
+
size 151842
|
acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/b3b5090c-0c68-4816-a5b9-b46a98c166d2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0fff5c861875d00cbb37f6d616b73b468e71d68f5259a0bc170c1d332ac74195
|
| 3 |
+
size 4510948
|
acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/full.md
ADDED
|
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accept or Deny? Evaluating LLM Fairness and Performance in Loan Approval across Table-to-Text Serialization Approaches
|
| 2 |
+
|
| 3 |
+
Israel Abebe Azime * 1, Deborah D. Kanubala * 1, Tejumade Afonja * 1,2, Mario Fritz1,2, Isabel Valera1,3, Dietrich Klakow1, Philipp Slusallek1
|
| 4 |
+
|
| 5 |
+
$^{1}$ Saarland University, $^{2}$ CISPA Helmholtz Center for Information Security, $^{3}$ Max Planck Institute for Software Systems
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Large Language Models (LLMs) are increasingly employed in high-stakes decision-making tasks, such as loan approvals. While their applications expand across domains, LLMs struggle to process tabular data, ensuring fairness and delivering reliable predictions. In this work, we assess the performance and fairness of LLMs on serialized loan approval datasets from three geographically distinct regions: Ghana, Germany, and the United States. Our evaluation focuses on the model's zero-shot and in-context learning (ICL) capabilities. Our results reveal that the choice of serialization<sup>1</sup> format significantly affects both performance and fairness in LLMs, with certain formats such as GReaT and LIFT yielding higher F1 scores but exacerbating fairness disparities. Notably, while ICL improved model performance by $4.9 - 59.6\%$ relative to zero-shot baselines, its effect on fairness varied considerably across datasets. Our work underscores the importance of effective tabular data representation methods and fairness-aware models to improve the reliability of LLMs in financial decision-making.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Large Language Models (LLMs), trained on vast amounts of textual data, have demonstrated remarkable potential to generalize across tasks and provide accurate predictions (Naveed et al., 2023; AI4Science and Quantum, 2023). Given their growing presence in critical domains like financial decision-making, it is crucial to understand the behaviour and ethical implications of these systems due to their direct and severe impact on individuals (Aguirre et al., 2024). Financial decision-making is the systematic process of analyzing information to make informed choices in finan
|
| 14 |
+
|
| 15 |
+
cial tasks such as investment, loan approval, and more (Kazemian et al., 2022).
|
| 16 |
+
|
| 17 |
+
In this work, we focus on loan approval, where a bank must decide whether or not to grant a loan based on the applicant's creditworthiness. This task is typically performed by loan officers who consider various input factors to make informed decisions. Loan approval is a critical task to explore as it directly impacts financial inclusion, borrower outcomes, and institutional risk management, making it an ideal domain for assessing the effectiveness and fairness of LLM-driven decision-making systems. Moreover, given the diversity in financial practices and socioeconomic contexts, evaluating loan approval across datasets from three distinct geographical regions (Ghana, Germany, and the United States) provides valuable insights into how LLMs manage data diversity and fairness within varying economic environments. Additionally, the tabular nature of the datasets in this study underscores the importance of selecting an appropriate serialization method before feeding data into LLMs, as it can significantly influence model performance and fairness (Singha et al., 2023; Sui et al., 2024).
|
| 18 |
+
|
| 19 |
+
Building upon these observations, we frame our study around the following research questions: i) How do different serialization formats (e.g., JSON, Text, GReaT, LIFT) impact the fairness and performance of LLMs in loan approval tasks across diverse geographical datasets? ii) What effect does in-context learning (ICL) have on the fairness and predictive performance of LLMs in loan approval scenarios, particularly when applied to datasets from Ghana, Germany, and the United States? iii) How do financial domain-specific LLMs compare to general-purpose LLMs in their ability to accurately and fairly assess loan applications, especially under zero-shot and few-shot learning settings? iv) What key factors contribute to fairness disparities in LLM-generated loan approval predictions, and
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Figure 1: Overview of our approach. We first utilize different serialization approaches to acquire our serialized data, and we investigate the LLMs' performance and fairness by applying zero- and few-shot learning to the datasets.
|
| 23 |
+
|
| 24 |
+
how do these factors vary across different serialization methods and geographical regions?
|
| 25 |
+
|
| 26 |
+
To address the research questions outlined above, this work makes the following contributions:
|
| 27 |
+
|
| 28 |
+
1. Investigate the capability of LLMs in financial decision-making, focusing on loan approval tasks. This includes a comprehensive zero-shot benchmark evaluation of various LLMs and an analysis of the features they prioritize in their decision-making process.
|
| 29 |
+
2. Analyze the impact of different tabular serialization formats on the decision-making process of LLMs.
|
| 30 |
+
3. Evaluate the effectiveness of techniques, such as in-context learning, that aim to improve LLM performance in financial decision-making, with particular attention to their impact on accuracy and fairness.
|
| 31 |
+
4. Examine the presence of gender-related biases in LLM-generated financial decisions, assessing their implications and associated risks.
|
| 32 |
+
|
| 33 |
+
<table><tr><td>Data Name</td><td>Size</td><td>#Features</td><td>Output</td></tr><tr><td>Ghana</td><td>614</td><td>13</td><td>Yes/No</td></tr><tr><td>Germany</td><td>1000</td><td>21</td><td>Good/Bad</td></tr><tr><td>United States</td><td>1451</td><td>18</td><td>Yes/No</td></tr></table>
|
| 34 |
+
|
| 35 |
+
Table 1: Summary of the datasets used in the study. Ghana (Sackey and Amponsah, 2018), Germany (Statlog) and United States (Kaggle). See Appendix C for details of the feature description of each dataset.
|
| 36 |
+
|
| 37 |
+
# 2 Related Work
|
| 38 |
+
|
| 39 |
+
LLMs in financial decision-making. Large Language Models (LLMs) have been employed to
|
| 40 |
+
|
| 41 |
+
support various financial decision-making tasks, encompassing diverse applications such as stock trading (Ding et al., 2024), investment management (Kong et al., 2024), and credit scoring (Feng et al., 2023). These models either provide recommendations on optimal investment strategies to maximize returns or assess an individual's financial reliability and creditworthiness (Haque and Hassan, 2024). Loan approval tasks, in particular, carry significant risk due to their direct impact on financial inclusion and access to capital, making the evaluation of fairness and predictive accuracy in such models critically important (Kanubala et al., 2024).
|
| 42 |
+
|
| 43 |
+
**Serialization in LLMs.** LLMs require tabular data to be serialized into natural text, a process known as serialization (Jaitly et al., 2023). However, serialization methods, which convert tabular data into a format that LLMs can process, can introduce their own biases and limitations. For instance, Hegselmann et al. (2023) discusses how different serialization formats can lead to variations in LLMs' performance. Their study highlights that the choice of serialization method can influence how effectively an LLM understands and processes the data. A number of studies have proposed different serialization methods, including Hegselmann et al. (2023) Text and List formats, the GReaT format (Borisov et al., 2022), natural-like serialization as used in LIFT (Dinh et al., 2022), and HTML-like formatting (Sui et al., 2024). Additionally, works like Hollmann et al. (2022) introduce TabPFN, a tabular foundation model specifically designed for tabular datasets. However, in this work, we focus on the capabilities of general-purpose LLMs and their financial domain variants. We do not cover tabular foundation models due to the broad range of serialization formats considered in our study,
|
| 44 |
+
|
| 45 |
+
<table><tr><td>Serializable</td><td>Example Template</td></tr><tr><td>JSON (default)</td><td>{age: 32, sex: female, loan duration: 48 months, purpose: education}</td></tr><tr><td>GReaT (Borisov et al., 2022)</td><td>age is 32, sex is female, loan duration is 48 months, loan purpose is education</td></tr><tr><td>LIFT (Dinh et al., 2022)</td><td>A 32-year-old female is applying for a loan for 48 months for education purposes.</td></tr></table>
|
| 46 |
+
|
| 47 |
+
Table 2: Comparison of serialization formats for loan applicant information. This table presents example templates for representing loan applicant data with four features (age and sex, loan duration and purpose). JSON is assumed as the default format. Table 8 in Appendix D shows examples for the List, Text, HTML and Latex format.
|
| 48 |
+
|
| 49 |
+
which may not align well with such models.
|
| 50 |
+
|
| 51 |
+
Bias and unfairness of LLMs. LLMs are trained on large corpora of human-generated text, which often contain inherent societal biases (Garg et al., 2018; Navigli et al., 2023; Sun et al., 2019; Kotek et al., 2023). As a result, these biases can be encoded into the models and perpetuated in their decisions, leading to discriminatory outcomes. For instance, gender or racial biases present in the training data can result in unfair treatment of certain groups (Bolukbasi et al., 2016; Abid et al., 2021). Additionally, Aguirre et al. (2024) highlights that the choice of in-context examples significantly influences model fairness, particularly when these examples are not demographically representative. Addressing these biases is crucial to ensuring fair and ethical use of LLMs in decision-making processes.
|
| 52 |
+
|
| 53 |
+
Our study examines the use of LLMs for loan approval decisions across datasets from three geographical regions. We explore two key dimensions: the impact of serialization methods and the effect of zero-shot and few-shot prompting on decision accuracy and fairness.
|
| 54 |
+
|
| 55 |
+
# 3 Methodology
|
| 56 |
+
|
| 57 |
+
# 3.1 Problem Formalization
|
| 58 |
+
|
| 59 |
+
Given the tabular dataset $D = \{(x_{i},y_{i})\}_{i = 1}^{n}$ , where $x_{i}$ is a $d$ -dimensional feature vector and $y_{i}$ belongs to a set of classes $C$ , the columns or features are named $F = \{f_1,\dots ,f_d\}$ . Each feature $f_{i}$ is a natural-language string representing the name of the feature, such as "age" or "sex". For zero-shot learning, we provide the LLMs with features $F$ and task it to predict the class $C$ . For our k-shot classification experiments, we use a subset $D_{k}$ of size $k$ -sampled from the training set. Few-shot examples are top-n examples balanced by gender to align with fairness metrics.
|
| 60 |
+
|
| 61 |
+
# 3.2 Datasets
|
| 62 |
+
|
| 63 |
+
Dataset choice. Guided by data availability and relevance, we selected three distinct datasets representing the region's socioeconomic context. We posit that geographical, political, and ideological differences across regions directly influence financial practices, such as loan acquisition. The regions examined were arbitrarily chosen for this study; while expanding to more diverse regions is feasible, we have limited our scope to maintain a focused analysis. The distinct differences in data properties highlight the geographical variations central to this study. Although the task remains the same, subtle disparities within datasets from specific groups may introduce biases that can impact decision-making.
|
| 64 |
+
|
| 65 |
+
A comparison of dataset characteristics reveals distinct patterns across the German, Ghanaian, and U.S. datasets, as further detailed in the Appendix C. Only the Germany and Ghana datasets include age as a feature, with German applicants predominantly in their 20s and Ghanaian applicants in their 40s. The U.S. dataset primarily emphasizes employment status, whereas the other datasets provide additional information on the number of years employed. Across all datasets, male applicants consistently outnumber female applicants. Notable variations are also observed in loan amount distributions: the Germany dataset presents a broader and more evenly distributed range of loan amounts, while the U.S. and Ghana datasets are concentrated on smaller loan amounts with higher frequency.
|
| 66 |
+
|
| 67 |
+
Data processing. We provide a summary of the dataset we used in the study in Table 1 with a detailed description in Appendix C. For each dataset, we split the dataset into $80\%$ train and $20\%$ test using stratified sampling based on gender feature. To convert each dataset to the formats shown in Table 2 we created custom functions and also used
|
| 68 |
+
|
| 69 |
+
pandas $^{3}$ functions that change dataframe to HTML and Latex. See Table 8 in Appendix D for examples of Latex, Text, HTML and List formats.
|
| 70 |
+
|
| 71 |
+
# 3.2.1 Table-to-Text serialization
|
| 72 |
+
|
| 73 |
+
Converting tabular data to text (serialization) is essential, as the format can significantly influence LLM decision-making (Hegselmann et al., 2023). To investigate how this behaviour transfers to our loan approval task, we explored six serialization formats as shown in Table 2 and Table 8 in Appendix D. These formats ranged from straightforward default values, such as JSON and List, to more structured and natural language text-like formats, such as HTML, Latex, Text (Hegselmann et al., 2023), GReaT (Borisov et al., 2022) and LIFT (Dinh et al., 2022).
|
| 74 |
+
|
| 75 |
+
# 3.3 Models
|
| 76 |
+
|
| 77 |
+
# 3.3.1 Baseline and Benchmark Models
|
| 78 |
+
|
| 79 |
+
To comprehensively understand and accurately evaluate the investigated LLMs, we incorporated simple baseline models and a benchmark model.
|
| 80 |
+
|
| 81 |
+
Baseline models. The zero model, one model and Random model serve as our simple baselines, as shown in Figure 2. The zero model assumes that no one will repay the loan (i.e. zero output for all predictions), while the one model assumes that everyone will repay the loan (one output for all predictions). These models provide initial reference points for our experiment, illustrating the performance metrics under these extreme assumptions. Finally, the Random model serves as a baseline by comparing the model's performance against randomly generated predictions<sup>4</sup>.
|
| 82 |
+
|
| 83 |
+
Benchmark model. We trained a Logistic Regression model on the training set to serve as our benchmark model. This model allows us to compare the performance of the LLMs against traditional and well-understood machine learning models. In training the Logistic Regression model, we preprocessed the dataset by dropping missing values, applying label encoder to the categorical features, and scaling all numerical features using a standardScaler. Additionally, we used default parameters of scikit-learn<sup>5</sup> implementation for logistic regression to be used as basic comparison baseline. We acknowledge that other classical models,
|
| 84 |
+
|
| 85 |
+
such as decision trees or support vector machines, might be optimized for this task and potentially yield better performance. However, our primary objective was to establish a straightforward benchmark for comparison.
|
| 86 |
+
|
| 87 |
+
# 3.3.2 Large Language Models (LLMs)
|
| 88 |
+
|
| 89 |
+
We evaluated a total of ten (10) LLMs selected based on their open-source availability, instruction tuning, parameter size, and domain relevance (Table 3). To assess the effect of domain relevance, we included models specifically fine-tuned for financial tasks: FinMA-7B-NLP and FinMA-7B-full, introduced by Xie et al. (2023). To examine the effect of instruction tuning, we incorporated Meta's LLaMA-3-70B-Instruct and LLaMA-3-8B-Instruct, as well as Google's Gemma-2-27b-it and Gemma-2-9b-it. Each of these instruction-tuned variants was paired with its corresponding base model (LLaMA-3-70B, LLaMA-3-8B, Gemma-2-27b, and Gemma-2-9b) sourced from Touvron et al. (2023); Meta (2024); Team et al. (2024). This selection allows us to examine both the impact of instruction tuning and the role of model size, while also testing whether financial fine-tuning improves decision-making in domain-specific tasks such as loan approval. See Appendix B for model evaluation setup.
|
| 90 |
+
|
| 91 |
+
<table><tr><td>Model</td><td>Training</td><td>Params</td><td>Financial Dataset Only</td></tr><tr><td>LLaMA-3</td><td>Pretrained & Instruction-tuned</td><td>8B & 70B</td><td>X</td></tr><tr><td>Gemma-2</td><td>Pretrained & Instruction-tuned</td><td>9B & 27B</td><td>X</td></tr><tr><td>FinMA-full</td><td>Fine-tuned</td><td>7B</td><td>✓</td></tr><tr><td>FinMA-NLP</td><td>Fine-tuned</td><td>7B</td><td>✓</td></tr></table>
|
| 92 |
+
|
| 93 |
+
Table 3: Overview of the LLMs evaluated, including models fine-tuned and whether they were specifically trained on financial datasets or not.
|
| 94 |
+
|
| 95 |
+
# 3.4 Approaches to LLMs Improvement
|
| 96 |
+
|
| 97 |
+
# 3.4.1 In-Context Learning (ICL)
|
| 98 |
+
|
| 99 |
+
In-context learning involves providing examples that enhance the capabilities of LLMs (Zhang et al., 2024; Agarwal et al., 2024). This approach is widely used because it eliminates the need for parameter updates, reducing computational costs associated with training. Following a similar approach utilized by the work of Zhang et al. (2024) we experimented with different numbers of examples, specifically $n = 2, 4, 6, 8$ . Our few-shot ex
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
Figure 2: Zero-shot weighted average F1 score performance of LLMs on loan approval tasks. Evaluated across three prompts (variation shown by error bars) and multiple table-to-text serialization methods. The Logistic Regression model baseline (green dashed line) uses default JSON serialization with variables as individual features. Most LLMs underperform relative to this baseline, with only GReaT on Ghana, List/Text on Germany, and Gemma-2-27b-it on the U.S. showing modest improvements.
|
| 103 |
+
|
| 104 |
+
amples are strategically selected to ensure representational equity. For instance, when using two examples, one will correspond to a male and the other to a female, aligning with our fairness score metrics, which are based on gender representation.
|
| 105 |
+
|
| 106 |
+
# 3.5 Model and Fairness Evaluation
|
| 107 |
+
|
| 108 |
+
We use the weighted-average F1 score to evaluate model performance on the loan prediction task (see Appendix A for definitions). To assess fairness, we employ two standard metrics: equality of opportunity (EO) and statistical parity (SP). EO aligns with the goals of loan approval by ensuring that qualified applicants, regardless of group membership, have an equal chance of approval (Hardt et al., 2016; Kozodoi et al., 2022). In contrast, SP measures whether approval rates are independent of sensitive attributes (Dwork et al., 2012). Formal definitions of these metrics are provided below:
|
| 109 |
+
|
| 110 |
+
Definition 1 (Statistical Parity (SP)) A trained classifier's predictions $\hat{Y}$ satisfy Statistical Parity if the probability of a positive outcome is independent of the sensitive attribute (Dwork et al., 2012). Formally:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
P (\hat {Y} = 1 \mid A = 1) = P (\hat {Y} = 1 \mid A = 0)
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where $A$ denotes the sensitive attribute, which we consider to represent gender. For simplicity, we
|
| 117 |
+
|
| 118 |
+
assume $A$ is binary: $A \in \{\text{male}, \text{female}\}$ . Here, $\hat{Y}$ is the predicted label of the classifier, and $Y$ denotes the true target label.
|
| 119 |
+
|
| 120 |
+
# Definition 2 (Equality of Opportunity (EO))
|
| 121 |
+
|
| 122 |
+
Equality of Opportunity ensures that the classifier's true positive rate is the same across different demographic groups (Hardt et al., 2016). Formally, a classifier $\hat{Y}$ satisfies Equality of Opportunity if:
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
\begin{array}{l} P (\hat {Y} = 1 \mid Y = 1, A = 1) \\ = P (\hat {Y} = 1 \mid Y = 1, A = 0) \\ \end{array}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
where $A$ is the sensitive attribute. For our experiments, we consider females as the protected group and males as the non-protected group.
|
| 129 |
+
|
| 130 |
+
# 4 Results and Analysis
|
| 131 |
+
|
| 132 |
+
In this section, we present our results and analysis, structured around a set of research questions that guide the discussion. We begin by comparing the performance of different serialization methods across models for each dataset, as shown in Figure 2. We observe that the zero model outperforms the one model on the Ghana and United States (US) datasets, while the reverse is true for the Germany dataset. This suggests that the Germany dataset has a higher proportion of non-defaulters compared to
|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
|
| 140 |
+
Figure 3: Average weighted F1 score trends across serialization formats for few-shot examples, showing higher gains in U.S. data across formats, while Germany lags consistently despite increasing shot numbers.
|
| 141 |
+

|
| 142 |
+
Data Ghana Germany U.S.
|
| 143 |
+
|
| 144 |
+

|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
|
| 148 |
+
the other two datasets. We also conducted experiments on model token attribution, which is detailed in the Appendix H.
|
| 149 |
+
|
| 150 |
+
# 4.1 Do LLMs Perform Better Than Baseline or Benchmark Models on the Default Compilation Format (JSON)?
|
| 151 |
+
|
| 152 |
+
In Figure 2, we compare the zero-shot performance of LLMs against baseline models and analyse the results by country. The general trend indicates that most models do not outperform either the zero model or the one model. Some models achieved marginally higher F1 scores, including Gemma-2-9b-i t for Ghana and seven of the models for the US, while none did so for Germany. Importantly, none of the selected LLMs were able to outperform the simple Logistic Regression model, which serves as the benchmark.
|
| 153 |
+
|
| 154 |
+
For JSON serialization method, financial domain-specific models (FinMA-7B-full, FinMA-7B-NLP) do not demonstrate significantly better performance under zero-shot decision-making compared to models trained for general applications. Also, none of the models outperform the Logistic Regression model.
|
| 155 |
+
|
| 156 |
+
# 4.2 How Does the Zero-Shot Performance of LLMs Vary Across Different Compilation Methods Compared to Baseline Models?
|
| 157 |
+
|
| 158 |
+
Examining region-specific results, we observe the following from Figure 2: For the Ghana
|
| 159 |
+
|
| 160 |
+
dataset, the best performances are achieved using the GReaT (Gemma-2-9b-it) and LIFT (LLaMA-3-70B-Instruct) serialization method. In the Germany dataset, Gemma-2-9b-it shows the poorest performance, with three out of four models performing as poorly as the zero model. Financial domain-trained models (FinMA-7B-full1 and FinMA-7B-NLP) deliver the best results with List and Text serialization methods. For the U.S. dataset, results are generally more promising across all models, with Gemma-2-27b-it consistently achieving the best performance across all serialization methods tested except LIFT.
|
| 161 |
+
|
| 162 |
+
- Serialization methods can significantly influence loan approval or denial, which, in turn, may have long-term consequences for individuals wrongly denied loans.
|
| 163 |
+
|
| 164 |
+
# 4.3 Does serialization Using Natural Language Texts Improve Performance?
|
| 165 |
+
|
| 166 |
+
We hypothesized that using more natural input text would improve model performance, which motivated our inclusion of the LIFT and GReaT serialization method (see Table 2). LIFT produced the best results for LLaMA-3-70B-Instruct on the Ghana dataset. However, this improvement did not hold consistently across all models and datasets, indicating that while natural language formats can be beneficial, their effectiveness is context-dependent.
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
Figure 4: Few-shot weighted F1 trends. Adding a small number of in-context examples improves performance, while differences among serialization formats remain modest across datasets.
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
|
| 175 |
+
Our results show that increasing the naturalness of input formatting does not consistently enhance model performance.
|
| 176 |
+
|
| 177 |
+
# 4.4 Does Using Few-Shot Examples Improve the Decision-Making Abilities of LLMs?
|
| 178 |
+
|
| 179 |
+
Given LLMs' subpar performance in the zero-shot experiments, we explored various methods to improve their decision-making capabilities through in-context learning(ICL). Figure 3 presents the results from our ICL experiment, where we provide the model with varying numbers of n-shot examples, ranging from zero-shot ( $n = 0$ ) to 8-shot across datasets and serialization formats. From Figure 3, providing more examples improves the loan approval task. Similarly, in Figure 4, we see average improvement with more examples. This is shown across all the serialization methods.
|
| 180 |
+
|
| 181 |
+
Model performance improves with more example shots, improving LLM decision-making for loan approval.
|
| 182 |
+
|
| 183 |
+
# 4.5 How Does Model Fairness Vary Across Datasets?
|
| 184 |
+
|
| 185 |
+
Baseline models from Table 4 all show no discrimination in terms of equality of opportunity (EO) and statistical parity (SP) except the Random model. However, we see high discrimination in terms of both EO and SP with the FinMA-7B-full for the Germany dataset. Similarly, we see this model also returns the highest disparity in terms of EO in the Ghana dataset. It is interesting to note that this model, among the other models selected in this
|
| 186 |
+
|
| 187 |
+
<table><tr><td>Datasets:</td><td colspan="2">Germany</td><td colspan="2">Ghana</td><td colspan="2">U.S.</td></tr><tr><td>Fairness Metrics:</td><td>SP</td><td>EO</td><td>SP</td><td>EO</td><td>SP</td><td>EO</td></tr><tr><td colspan="7">Baseline models</td></tr><tr><td>Zero</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td></tr><tr><td>One</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td></tr><tr><td>Random</td><td>0.15</td><td>0.38</td><td>0.02</td><td>-0.03</td><td>0.04</td><td>0.35</td></tr><tr><td colspan="7">Benchmark model</td></tr><tr><td>Logistic Regression</td><td>-0.03</td><td>-0.08</td><td>-0.04</td><td>0.05</td><td>-0.02</td><td>-0.01</td></tr><tr><td colspan="7">Models Fine-tuned for Finance</td></tr><tr><td>FinMA-7B-full</td><td>0.13</td><td>0.16</td><td>0.03</td><td>0.06</td><td>0.00</td><td>0.00</td></tr><tr><td>FinMA-7B-NLP</td><td>0.07</td><td>0.07</td><td>0.00</td><td>0.01</td><td>0.00</td><td>0.00</td></tr><tr><td colspan="7">Mid range open-source base models</td></tr><tr><td>LLaMA-3-8B</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td></tr><tr><td>Gemma-2-9b</td><td>0.05</td><td>0.05</td><td>-0.03</td><td>-0.04</td><td>-0.06</td><td>-0.11</td></tr><tr><td colspan="7">Mid range open-source instruction tuned models</td></tr><tr><td>LLaMA-3-8B-Instruct</td><td>0.03</td><td>0.06</td><td>0.00</td><td>0.00</td><td>0.01</td><td>0.02</td></tr><tr><td>Gemma-2-9b-it</td><td>0.01</td><td>0.01</td><td>0.03</td><td>0.04</td><td>-0.04</td><td>0.13</td></tr><tr><td colspan="7">Large range open-source instruction tuned models</td></tr><tr><td>LLaMA-3-70B-Instruct</td><td>-0.03</td><td>0.01</td><td>0.00</td><td>0.00</td><td>-0.01</td><td>0.03</td></tr><tr><td>Gemma-2-27b-it</td><td>-0.01</td><td>-0.02</td><td>0.00</td><td>0.02</td><td>0.04</td><td>0.17</td></tr><tr><td colspan="7">Large range open-source base models</td></tr><tr><td>LLaMA-3-70B</td><td>-0.05</td><td>-0.05</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td></tr><tr><td>Gemma-2-27b</td><td>0.00</td><td>0.03</td><td>0.00</td><td>-0.02</td><td>0.01</td><td>0.07</td></tr></table>
|
| 188 |
+
|
| 189 |
+
Table 4: Zero-shot fairness metrics across regions for JSON serialization. The red colour shows high bias across comparing models, excluding baselines.
|
| 190 |
+
|
| 191 |
+
study, is the only one fine-tuned for finance. This, therefore, opens up interesting research directions for further investigating the fairness of downstream tasks that have been trained with this model. In a similar light, Gemma-2-27b-it returns the highest disparity in terms of EO for the U.S. dataset. On the contrary, LLaMA-3-8B has no disparity in terms of both fairness metrics on the Germany data. Further highlighting that different models penalize sensitive groups differently. Additionally, examining fairness by conducting few-shot experiments showed that few-shot examples (e.g., $n = 8$ ) can introduce significant fairness disparities in Equality of Opportunity (EO), with differences exceeding
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
Figure 5: Mean difference in EO for different serialization methods and models. Finance-based models show higher gender-based disparity for certain serializations, while the results are highly region and format-dependent.
|
| 199 |
+
|
| 200 |
+
0.10 for certain serialization methods in the Ghana dataset (see Figure 5).
|
| 201 |
+
|
| 202 |
+
LLMs fine-tuned on financial datasets have the potential to amplify existing historical gender bias.
|
| 203 |
+
|
| 204 |
+
# 4.6 What Is the Fairness F1 Score Tradeoffs?
|
| 205 |
+
|
| 206 |
+
Following the best-performing models, as shown in Figure 4, we assess the fairness of these models in Figure 5. The Gemma-2-27b-it model shows a degree of disparity for the U.S. data. In the case of the best-performing model for Germany, LLaMA-3-70B-Instruct does not show a higher level of unfairness compared to the LLaMA-3-70B-Instruct and FinMA-7B-full models. The Gemma-2-9b-it model shows a higher disparity in the EO difference. The FinMA-7B-full model shows a higher disparity in terms of EO in both the Ghana and Germany datasets. The negative EO difference highlights that the model discriminates against the non-protected group, which in this case is males.
|
| 207 |
+
|
| 208 |
+
Financial-based models exhibit greater disparities in EO mean difference and high performance does not equate to fairness.
|
| 209 |
+
|
| 210 |
+
# 4.7 How Does Prompt Sensitivity Vary Across Different Regions and Models?
|
| 211 |
+
|
| 212 |
+
The results in Figure 2 represent the average performance across three different prompts, with error bars indicating the sensitivity to prompt variations. We observe relatively low prompt sensitivity in the U.S. and Ghana datasets, whereas the German dataset exhibits significantly higher sensitivity to prompt differences.
|
| 213 |
+
|
| 214 |
+
LLM performance sensitivity to prompts varies across data sources—some datasets exhibit stable results across prompts, while others show significant variability.
|
| 215 |
+
|
| 216 |
+
# 4.8 How Does Model Size Relate to Performance and Fairness?
|
| 217 |
+
|
| 218 |
+
We assess the effect of model scale by evaluating multiple sizes of both the LLaMA and Gemma families in Figure 2. Across LLaMA variants, expanding parameter counts yields only marginal performance changes. In contrast, Gemma exhibits pronounced performance gains as size increases, a pattern that re-emerges in the fairness analysis (Figure 5), where the Gemma models' equality-of-opportunity scores are highly sensitive to model scale.
|
| 219 |
+
|
| 220 |
+
# 4.9 Does Instruction Tuning Affect Model Performance and Fairness Scores?
|
| 221 |
+
|
| 222 |
+
We further investigated the impact of instruction tuning on accuracy and fairness by comparing the base and instruction-tuned variants of the LLaMA and Gemma families (Figure 2). Instruction tuning has little effect on LLaMA, but its influence on Gemma depends on model size: the 9 B version loses accuracy. These shifts are also shaped by the choice of serialization. For example, the instruction-tuned Gemma improves fairness on the United States dataset in some formats, yet becomes more biased in others.
|
| 223 |
+
|
| 224 |
+
# 4.10 Do Few-Shot Examples Improve Fairness?
|
| 225 |
+
|
| 226 |
+
In the German dataset, with reference to Figure 7, Few-shot examples (e.g., $n = 8$ ) can lead to significant fairness disparities in equality of opportunity (EO), reaching differences of over 0.10 for some serialization methods in the Ghana datasets. The U.S. dataset shows greater sensitivity to few-shot examples, with models exhibiting a decline in fairness scores.
|
| 227 |
+
|
| 228 |
+
Fairness in few-shot learning is highly context-dependent. While more examples can sometimes reduce disparities, the impact is not universal, underscoring the importance of carefully selecting and evaluating serialization methods to ensure fairness.
|
| 229 |
+
|
| 230 |
+
# 5 Discussion and Conclusion
|
| 231 |
+
|
| 232 |
+
Summary. The ability of LLMs to handle structured tabular data for high-stakes tasks like loan approvals remains under-explored. This work evaluates how different serialization methods (JSON, LIFT, Text) and in-context learning (ICL) impact the fairness and accuracy of LLMs across diverse regional datasets (Ghana, Germany, United States). We find that, in zero-shot scenarios, all LLMs perform worse than a Logistic Regression model baseline, frequently defaulting to uniform approval or denial. Modest improvements only emerge with a few in-context examples, largely influenced by serialization format and dataset rather than model.
|
| 233 |
+
|
| 234 |
+
Fairness implications of LLMs in finance. The results indicate that LLMs fine-tuned on financial datasets cannot yet be fully trusted for high-stakes financial decisions. Therefore, careful attention
|
| 235 |
+
|
| 236 |
+
to data representation is at least as critical as the choice of model. To further address fairness concerns, employing more balanced datasets and ensuring a transparent decision-making process could be beneficial. This transparency is particularly important, as prior decisions made by banks can significantly impact the long-term creditworthiness of applicants (Majumdar et al., 2025).
|
| 237 |
+
|
| 238 |
+
Recommendation for practitioners. We recommend that practitioners retain thoroughly validated tabular models as a baseline and treat LLM outputs only as decision support until they demonstrably exceed that baseline in both accuracy and fairness. During and after deployment, models should be stress-tested on multiple serialization approaches and on regionally diverse datasets to ensure robustness. Benchmarking must extend beyond raw performance scores to include a suite of fairness and accuracy metrics so that improvements in prediction quality do not mask emerging biases.
|
| 239 |
+
|
| 240 |
+
Future work. Explore serialization-robust training, fairness-aware optimization, interpretability methods that expose feature reliance, and broader multilingual datasets that capture diverse regions.
|
| 241 |
+
|
| 242 |
+
# Limitations
|
| 243 |
+
|
| 244 |
+
Dataset Differences. In our work, we examined data sources from different regions, but a detailed study and analysis of the differences between these datasets is crucial. We used the default column names and values for all datasets. However, some of our serialization methods, such as LIFT, aimed to improve column names by correcting spelling errors and related mistakes inherent in the datasets. We acknowledge that there may still be variances that have not been captured and need further investigation.
|
| 245 |
+
|
| 246 |
+
More Datasets. This study focused on three datasets from distinct geographical regions. While incorporating additional datasets with greater variability could improve the research, we maintained this scope to align with the study's objectives and constraints.
|
| 247 |
+
|
| 248 |
+
LLMs Covered in the Work. This work covers a limited number of LLMs, and we mostly focused on models that we believed, to the best of our knowledge, would be adapted to several use cases because of popularity, open source and continued support by organizations that release them.
|
| 249 |
+
|
| 250 |
+
We purposefully left our closed-sourced model due to resource constraints and limited flexibility for experimentation, particularly around fine-grained control of inputs and internal mechanisms.
|
| 251 |
+
|
| 252 |
+
Prompt Design. In this study, we generated prompts by referencing similar research works. While certain prompt structures may outperform others, a comprehensive exploration of prompt engineering techniques is beyond this work's scope due to the extensive number of experiments conducted. We acknowledge the importance of this aspect and propose it as a direction for future research.
|
| 253 |
+
|
| 254 |
+
Explaining Model Behaviour. We conducted token attribution experiments to better understand the reasoning behind model behaviour. However, as the results were inconclusive, we have not included a detailed discussion in the main text. Instead, a comprehensive account of the findings can be found in Appendix H.
|
| 255 |
+
|
| 256 |
+
# Acknowledgment
|
| 257 |
+
|
| 258 |
+
Israel Abebe Azime would like to thank the German Federal Ministry of Education and Research and the German federal states (http://www.nhrverein.de/en/our-partners) for supporting this work/project as part of the National High-Performance Computing (NHR) joint funding program. Deborah D. Kanubala and Isabel Valera are supported by the European Union (ERC-2021-STG, SAML, 101040177). Tejumade Afonja is partially supported by ELSA - European Lighthouse on Secure and Safe AI funded by the European Union under grant agreement No. 101070617. Views and opinions expressed are, however, those of the author(s) only and do not necessarily reflect those of the funding organizations and neither can they be held responsible for them.
|
| 259 |
+
|
| 260 |
+
# References
|
| 261 |
+
|
| 262 |
+
Abubakar Abid, Maheen Farooqi, and James Zou. 2021. Persistent anti-muslim bias in large language models. In Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society, pages 298-306.
|
| 263 |
+
Rishabh Agarwal, Avi Singh, Lei Zhang, Bernd Bohnet, Luis Rosias, Stephanie Chan, Biao Zhang, Ankesh Anand, Zaheer Abbas, Azade Nova, et al. 2025. Many-shot in-context learning. Advances in Neural Information Processing Systems, 37:76930-76966.
|
| 264 |
+
|
| 265 |
+
Rishabh Agarwal, Avi Singh, Lei M. Zhang, Bernd Bohnet, Luis Rosias, Stephanie Chan, Biao Zhang, Ankesh Anand, Zaheer Abbas, Azade Nova, John D. Co-Reyes, Eric Chu, Feryal Behbahani, Aleksandra Faust, and Hugo Larochelle. 2024. Many-shot in-context learning. arXiv preprint arXiv:2404.11018.
|
| 266 |
+
Carlos Alejandro Aguirre, Kuleen Sasse, Isabel Alyssa Cachola, and Mark Dredze. 2024. Selecting shots for demographic fairness in few-shot learning with large language models. In Proceedings of the Third Workshop on NLP for Positive Impact, Miami, Florida, USA. Association for Computational Linguistics.
|
| 267 |
+
Microsoft Research AI4Science and Microsoft Azure Quantum. 2023. The impact of large language models on scientific discovery: a preliminary study using gpt-4. arXiv preprint arXiv:2311.07361.
|
| 268 |
+
Tolga Bolukbasi, Kai-Wei Chang, James Y Zou, Venkatesh Saligram, and Adam T Kalai. 2016. Man is to computer programmer as woman is to homemaker? debiasing word embeddings. Advances in neural information processing systems, 29.
|
| 269 |
+
Vadim Borisov, Kathrin Seßler, Tobias Leemann, Martin Pawelczyk, and Gjergji Kasneci. 2022. Language models are realistic tabular data generators. arXiv preprint arXiv:2210.06280.
|
| 270 |
+
Han Ding, Yinheng Li, Junhao Wang, and Hang Chen. 2024. Large language model agent in financial trading: A survey. arXiv preprint arXiv:2408.06361.
|
| 271 |
+
Tuan Dinh, Yuchen Zeng, Ruisu Zhang, Ziqian Lin, Michael Gira, Shashank Rajput, Jy-yong Sohn, Dimitris Papailiopoulos, and Kangwook Lee. 2022. Lift: Language-interfaced fine-tuning for non-language machine learning tasks. Advances in Neural Information Processing Systems, 35:11763-11784.
|
| 272 |
+
Cynthia Dwork, Moritz Hardt, Toniann Pitassi, Omer Reingold, and Richard Zemel. 2012. Fairness through awareness. In Proceedings of the 3rd innovations in theoretical computer science conference, pages 214-226.
|
| 273 |
+
Duanyu Feng, Yongfu Dai, Jimin Huang, Yifang Zhang, Qianqian Xie, Weiguang Han, Zhengyu Chen, Alejandro Lopez-Lira, and Hao Wang. 2023. Empowering many, biasing a few: Generalist credit scoring through large language models. arXiv preprint arXiv:2310.00566.
|
| 274 |
+
Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. 2024. A framework for few-shot language model evaluation.
|
| 275 |
+
Nikhil Garg, Londa Schiebinger, Dan Jurafsky, and James Zou. 2018. Word embeddings quantify 100
|
| 276 |
+
|
| 277 |
+
years of gender and ethnic stereotypes. Proceedings of the National Academy of Sciences, 115(16):E3635-E3644.
|
| 278 |
+
FM Haque and Md Mahedi Hassan. 2024. Bank loan prediction using machine learning techniques. arXiv preprint arXiv:2410.08886.
|
| 279 |
+
Moritz Hardt, Eric Price, and Nati Srebro. 2016. Equality of opportunity in supervised learning. Advances in neural information processing systems, 29.
|
| 280 |
+
Stefan Hegselmann, Alejandro Buendia, Hunter Lang, Monica Agrawal, Xiaoyi Jiang, and David Sontag. 2023. Tabllm: Few-shot classification of tabular data with large language models. In International Conference on Artificial Intelligence and Statistics, pages 5549-5581. PMLR.
|
| 281 |
+
Noah Hollmann, Samuel Müller, Katharina Eggensperger, and Frank Hutter. 2022. Tabpfn: A transformer that solves small tabular classification problems in a second. arXiv preprint arXiv:2207.01848.
|
| 282 |
+
Sukriti Jaitly, Tanay Shah, Ashish Shugani, and Razik Singh Grewal. 2023. Towards better serialization of tabular data for few-shot classification. arXiv preprint arXiv:2312.12464.
|
| 283 |
+
Kaggle. Loan approval prediction dataset. https://www.kaggle.com/altruistdelhite04/ loan-prediction-problem-dataset. Accessed: 2024-07-19.
|
| 284 |
+
Deborah D Kanubala, Isabel Valera, and Kavya Gupta. 2024. Fairness beyond binary decisions: A case study on german credit. European Workshop on Algorithmic Fairness.
|
| 285 |
+
Siavash Kazemian, Cosmin Munteanu, and Gerald Penn. 2022. A taxonomical NLP blueprint to support financial decision making through information-centred interactions. In Proceedings of the Fourth Workshop on Financial Technology and Natural Language Processing (FinNLP), pages 89-98, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.
|
| 286 |
+
Narine Kokhlikyan, Vivek Miglani, Miguel Martin, Edward Wang, Bilal Alsallakh, Jonathan Reynolds, Alexander Melnikov, Natalia Kliushkina, Carlos Araya, Siqi Yan, et al. 2020. Captum: A unified and generic model interpretability library for pytorch. arXiv preprint arXiv:2009.07896.
|
| 287 |
+
Yaxuan Kong, Yuqi Nie, Xiaowen Dong, John M Mulvey, H Vincent Poor, Qingsong Wen, and Stefan Zohren. 2024. Large language models for financial and investment management: Applications and benchmarks. Journal of Portfolio Management, 51(2).
|
| 288 |
+
Hadas Kotek, Rikker Dockum, and David Sun. 2023. Gender bias and stereotypes in large language models. In Proceedings of the ACM collective intelligence conference, pages 12-24.
|
| 289 |
+
|
| 290 |
+
Nikita Kozodoi, Johannes Jacob, and Stefan Lessmann. 2022. Fairness in credit scoring: Assessment, implementation and profit implications. European Journal of Operational Research, 297(3):1083-1094.
|
| 291 |
+
Ayan Majumdar, Deborah D Kanubala, Kavya Gupta, and Isabel Valera. 2025. A causal framework to measure and mitigate non-binary treatment discrimination. arXiv preprint arXiv:2503.22454.
|
| 292 |
+
Meta. 2024. Introducing Meta Llama 3: The most capable openly available LLM to date — ai.meta.com. https://ai.meta.com/blog/meta-llama-3/. [Accessed 01-06-2024].
|
| 293 |
+
Humza Naveed, Asad Ullah Khan, Shi Qiu, Muhammad Saqib, Saeed Anwar, Muhammad Usman, Nick Barnes, and Ajmal Mian. 2023. A comprehensive overview of large language models. arXiv preprint arXiv:2307.06435.
|
| 294 |
+
Roberto Navigli, Simone Conia, and Björn Ross. 2023. Biases in large language models: origins, inventory, and discussion. ACM Journal of Data and Information Quality, 15(2):1-21.
|
| 295 |
+
Frank Gyimah Sackey and Peter Nkrumah Amponsah. 2018. Gender discrimination in commercial banks' credit markets in ghana: a decomposition and counterfactual analysis. African Journal of Business and Economic Research, 13(2):121-140.
|
| 296 |
+
Ananya Singha, José Cambronero, Sumit Gulwani, Vu Le, and Chris Parnin. 2023. Tabular representation, noisy operators, and impacts on table structure understanding tasks in llms. arXiv preprint arXiv:2310.10358.
|
| 297 |
+
Statlog. Statlog (german credit data). https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29. Accessed: 2024-07-19.
|
| 298 |
+
Yuan Sui, Mengyu Zhou, Mingjie Zhou, Shi Han, and Dongmei Zhang. 2024. Table meets llm: Can large language models understand structured table data? a benchmark and empirical study. In Proceedings of the 17th ACM International Conference on Web Search and Data Mining, pages 645-654.
|
| 299 |
+
Tony Sun, Andrew Gaut, Shirlyn Tang, Yuxin Huang, Mai ElSherief, Jieyu Zhao, Diba Mirza, Elizabeth Belding, Kai-Wei Chang, and William Yang Wang. 2019. Mitigating gender bias in natural language processing: Literature review. arXiv preprint arXiv:1906.08976.
|
| 300 |
+
Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Riviere, Mihir Sanjay Kale, Juliette Love, Pouya Tafti, Leonard Hussenot, Pier Giuseppe Sessa, Aakanksha Chowdhery, Adam Roberts, Aditya Barua, Alex Botev, Alex CastroRos, Ambrose Slone, Amelie Heliou, Andrea Tacchetti, Anna Bulanova, Antonia Paterson, Beth
|
| 301 |
+
|
| 302 |
+
Tsai, Bobak Shahriari, Charline Le Lan, Christopher A. Choquette-Choo, Clément Crepy, Daniel Cer, Daphne Ippolito, David Reid, Elena Buchatskaya, Eric Ni, Eric Noland, Geng Yan, George Tucker, George-Christian Muraru, Grigory Rozhdestvenskiy, Henryk Michalewski, Ian Tenney, Ivan Grishchenko, Jacob Austin, James Keeling, Jane Labanowski, Jean-Baptiste Lespiau, Jeff Stanway, Jenny Brennan, Jeremy Chen, Johan Ferret, Justin Chiu, Justin Mao-Jones, Katherine Lee, Kathy Yu, Katie Millican, Lars Lowe Sjoesund, Lisa Lee, Lucas Dixon, Machel Reid, Maciej Mikula, Mateo Wirth, Michael Sharman, Nikolai Chinaev, Nithum Thain, Olivier Bachem, Oscar Chang, Oscar Wahltinez, Paige Bailey, Paul Michel, Petko Yotov, Rahma Chaabouni, Ramona Comanescu, Reena Jana, Rohan Anil, Ross McIlroy, Ruibo Liu, Ryan Mullins, Samuel L Smith, Sebastian Borgeaud, Sertan Girgin, Sholto Douglas, Shree Pandya, Siamak Shakeri, Soham De, Ted Klimenko, Tom Hennigan, Vlad Feinberg, Wojciech Stokowiec, Yu hui Chen, Zafarali Ahmed, Zhitao Gong, Tris Warkentin, Ludovic Peran, Minh Giang, Clément Farabet, Oriol Vinyals, Jeff Dean, Koray Kavukcuoglu, Demis Hassabis, Zoubin Ghahramani, Douglas Eck, Joelle Barral, Fernando Pereira, Eli Collins, Armand Joulin, Noah Fiedel, Evan Senter, Alek Andreev, and Kathleen Kenealy. 2024. Gemma: Open models based on gemini research and technology. Preprint, arXiv:2403.08295.
|
| 303 |
+
|
| 304 |
+
Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
|
| 305 |
+
|
| 306 |
+
Qianqian Xie, Weiguang Han, Xiao Zhang, Yanzhao Lai, Min Peng, Alejandro Lopez-Lira, and Jimin Huang. 2023. Pixiu: A large language model, instruction data and evaluation benchmark for finance. Preprint, arXiv:2306.05443.
|
| 307 |
+
|
| 308 |
+
Miaoran Zhang, Vagrant Gautam, Mingyang Wang, Jesujoba O Alabi, Xiaoyu Shen, Dietrich Klakow, and Marius Mosbach. 2024. The impact of demonstrations on multilingual in-context learning: A multidimensional analysis. arXiv preprint arXiv:2402.12976.
|
| 309 |
+
|
| 310 |
+
# Appendix
|
| 311 |
+
|
| 312 |
+
# A Metrics
|
| 313 |
+
|
| 314 |
+
In evaluating the performance of Large Language Models (LLMs), we employ several key metrics to assess their predictive accuracy. These metrics provide a comprehensive view of how well the models align with ground truth labels.
|
| 315 |
+
|
| 316 |
+
Definition 3 (Weighted-Average F1 Score:) The weighted average $F1$ score calculates the $F1$ score for each class independently and then combines them using weights that are proportional to the number of true labels in each class.
|
| 317 |
+
|
| 318 |
+
$$
|
| 319 |
+
\text {W e i g h t e d - A v e r a g e F 1 S c o r e} = \sum_ {i = 1} ^ {C} w _ {i} \times \mathrm {F 1 S c o r e} _ {i}
|
| 320 |
+
$$
|
| 321 |
+
|
| 322 |
+
where
|
| 323 |
+
|
| 324 |
+
$$
|
| 325 |
+
w _ {i} = \frac {\text {N o . o f s a m p l e s i n c l a s s} i}{\text {T o t a l n u m b e r o f s a m p l e s}}
|
| 326 |
+
$$
|
| 327 |
+
|
| 328 |
+
and $C$ is the number of classes in the dataset.
|
| 329 |
+
|
| 330 |
+
# B Model Evaluation Setup
|
| 331 |
+
|
| 332 |
+
For this task, we utilized EleutherAI's open-source Language Model Evaluation Harness (lm-eval) framework (Gao et al., 2024). We created custom configurations for each task and looked at log-likelihood prediction for each possible token and decided possible generation from the possible class outputs. we created 3 different prompts for each data sources and evaluated on same generation settings.
|
| 333 |
+
|
| 334 |
+
# C Dataset Description and Analysis
|
| 335 |
+
|
| 336 |
+
Table 5, 6, and 7 present the features included in the datasets. We use the target features as output classes, and for serializations that convert feature names to text, we correct spelling to improve clarity and expressiveness.
|
| 337 |
+
|
| 338 |
+
<table><tr><td>Feature Name</td><td>Description</td></tr><tr><td>Loan_ID</td><td>Unique identifier for the loan</td></tr><tr><td>Gender</td><td>Gender of the applicant</td></tr><tr><td>Married</td><td>Marital status of the applicant</td></tr><tr><td>Dependents</td><td>Number of dependents of the applicant</td></tr><tr><td>Education</td><td>Education level of the applicant</td></tr><tr><td>Self_Employed</td><td>Whether the applicant is self-employed</td></tr><tr><td>ApplicantIncome</td><td>Income of the applicant</td></tr><tr><td>CoapplicantIncome</td><td>Income of the co-applicant</td></tr><tr><td>LoanAmount</td><td>Loan amount requested</td></tr><tr><td>Loan_Amount_Term</td><td>Term of the loan in months</td></tr><tr><td>Credit_History</td><td>Credit history of the applicant</td></tr><tr><td>Property_Area</td><td>Area type of the property</td></tr><tr><td>Loan_Status</td><td>Status of the loan (e.g., Loan paid or not)</td></tr></table>
|
| 339 |
+
|
| 340 |
+
Table 5: Description of Features for US Loan Predictions Dataset
|
| 341 |
+
|
| 342 |
+
<table><tr><td>Feature Name</td><td>Description</td></tr><tr><td>sex</td><td>Gender of the applicant</td></tr><tr><td>amnt req</td><td>Amount requested for the loan</td></tr><tr><td>ration</td><td>Ratio of the amount granted to the amount requested</td></tr><tr><td>maturity</td><td>Maturity period of the loan</td></tr><tr><td>assets val</td><td>Value of the applicant's assets</td></tr><tr><td>dec profit</td><td>Decision on the profit potential</td></tr><tr><td>xperience</td><td>Experience of the applicant</td></tr><tr><td>educatn</td><td>Education level of the applicant</td></tr><tr><td>age</td><td>Age of the applicant</td></tr><tr><td>collateral</td><td>Collateral provided for the loan</td></tr><tr><td>locatn</td><td>Location of the applicant</td></tr><tr><td>guarantor</td><td>Guarantor for the loan</td></tr><tr><td>relationship</td><td>Relationship with the financial institution</td></tr><tr><td>purpose</td><td>Purpose of the loan</td></tr><tr><td>sector</td><td>Economic sector of the applicant</td></tr><tr><td>savings</td><td>Savings of the applicant</td></tr><tr><td>target</td><td>Loan amount requested granted or not</td></tr></table>
|
| 343 |
+
|
| 344 |
+
Table 6: Description of Features for Ghana Credit Rationing Dataset
|
| 345 |
+
|
| 346 |
+
<table><tr><td>Feature Name</td><td>Description</td></tr><tr><td>gender</td><td>The gender of the individual</td></tr><tr><td>checking_status</td><td>The status of the individual's checking account</td></tr><tr><td>duration</td><td>Duration of the credit in months</td></tr><tr><td>credit_history</td><td>Credit history of the individual</td></tr><tr><td>purpose</td><td>Purpose of the credit</td></tr><tr><td>credit_amount</td><td>Amount of credit requested</td></tr><tr><td>savings_status</td><td>Status of the individual's savings account</td></tr><tr><td>employment</td><td>Employment status of the individual</td></tr><tr><td>installment_commitment</td><td>Installment commitment as a percentage of disposable income</td></tr><tr><td>other_parties</td><td>Other parties related to the credit</td></tr><tr><td>residence_since</td><td>Number of years the individual has lived in their current residence</td></tr><tr><td>property_magnitude</td><td>Value or magnitude of property</td></tr><tr><td>age</td><td>Age of the individual</td></tr><tr><td>other-paymentplans</td><td>Other payment plans that the individual has</td></tr><tr><td>housing</td><td>Housing status of the individual</td></tr><tr><td>existing Credits</td><td>Number of existing credits at this bank</td></tr><tr><td>job</td><td>Job status of the individual</td></tr><tr><td>num_depends</td><td>Number of dependents</td></tr><tr><td>own_telephone</td><td>Whether the individual owns a telephone</td></tr><tr><td>foreign-worker</td><td>Whether the individual is a foreign worker</td></tr><tr><td>class</td><td>Classification of the credit (e.g., good or bad)</td></tr></table>
|
| 347 |
+
|
| 348 |
+
Table 7: Description of Features in German Credit Dataset
|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
|
| 352 |
+

|
| 353 |
+
Figure 6: KDE plot comparing age and loan amount distributions across datasets, highlighting inherent socioeconomic and cultural disparities. The age distribution reveals that the Ghana dataset skews older, with a concentration in the 30-50 age range, while the German dataset shows a relatively younger distribution peaking around the 20-30 age range. Loan amounts are predominantly smaller in both Ghana and U.S. datasets, with the German dataset exhibiting a broader distribution range, indicating socio-economic and lending disparities across regions.
|
| 354 |
+
|
| 355 |
+
# D Serialization
|
| 356 |
+
|
| 357 |
+
Table 8 shows examples of the six (6) different serialization methods employed in this work. We considered straightforward default values, such as JSON and List, to more structured and natural language text-like formats, such as HTML, Latex, Text (Hegselmann et al., 2023), GReaT (Borisov et al., 2022) and LIFT (Dinh et al., 2022).
|
| 358 |
+
|
| 359 |
+
<table><tr><td>Localization</td><td>Example Template</td></tr><tr><td>JSON (default)</td><td>{age: 32, sex: female, loan duration: 48 months, purpose: education}</td></tr><tr><td>List</td><td>- age: 32
|
| 360 |
+
- sex: female
|
| 361 |
+
- loan duration: 48 months
|
| 362 |
+
- purpose: education</td></tr><tr><td>GReaT (Borisov et al., 2022)</td><td>age is 32, sex is female, loan duration is 48 months, loan purpose is education</td></tr><tr><td>Text</td><td>The age is 32. The sex is female. The loan duration is 48 months. The purpose is education.</td></tr><tr><td>LIFT (Dinh et al., 2022)</td><td>A 32-year-old female is applying for a loan for 48 months for education purposes.</td></tr><tr><td>HTML</td><td>< table><thead>
|
| 363 |
+
< tr><th>age</th>
|
| 364 |
+
</ tr>
|
| 365 |
+
< tr><td>32</ td>
|
| 366 |
+
< td>female</ td>
|
| 367 |
+
</ tr>
|
| 368 |
+
</tbody></table></td></tr><tr><td>Latex</td><td>\begin{tabular}{lrrr}
|
| 369 |
+
\toprule
|
| 370 |
+
age & sex & loan duration & purpose \
|
| 371 |
+
\midrule
|
| 372 |
+
32 & female & 48 month & education \
|
| 373 |
+
\end{tabular}</td></tr></table>
|
| 374 |
+
|
| 375 |
+
Table 8: Comparison of serialization formats for loan applicant information. This table presents example templates for representing loan applicant data with four features (age and sex, loan duration and purpose). JSON is assumed as the default format. The selected serialization formats ensure diverse data representation, balancing availability across different formats, naturalness, and alignment with prior work.
|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
Figure 7: Equality of Opportunity Difference for Few-Shot Learning Across Serialization Methods and Datasets. In-context learning (ICL) does not consistently reduce bias; in some cases, models exhibit significantly unfair behavior, particularly in certain shot configurations.
|
| 379 |
+
|
| 380 |
+

|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
|
| 384 |
+
# E More Fairness Scores
|
| 385 |
+
|
| 386 |
+
We investigate additional questions, particularly the relationship between fairness scores and In-Context Learning (ICL) performance. Specifically, we analyze how variations in fairness scores impact ICL results, as illustrated in Figure 7. In Figure 8, we present the statistical parity difference across various serialization methods and models. This analysis aims to examine how different serialization techniques impact fairness, providing insights into potential biases introduced by these encoding strategies.. This exploration aims to provide deeper insights into potential biases and the extent to which fairness considerations influence model performance in different settings.
|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
|
| 390 |
+

|
| 391 |
+
|
| 392 |
+

|
| 393 |
+
Figure 8: Statistical Parity Difference Across Serialization Methods and Models This figure illustrates the statistical parity difference for various serialization methods and models. We observe that financial models exhibit notably high bias, particularly for the Ghana and Germany datasets.
|
| 394 |
+
|
| 395 |
+
# F Prompt Examples
|
| 396 |
+
|
| 397 |
+
In Figure 9, we employed straightforward and minimally complex prompts for the task to maintain simplicity and consistency. For each task, we carefully adapted the prompt while ensuring alignment with the specific requirements of the evaluation. However, we intentionally chose not to modify the output classes or introduce entirely new prompts across different tasks, as doing so could have introduced unintended variables that might influence the evaluation outcomes.
|
| 398 |
+
|
| 399 |
+
# Example of Loanpred Prompt
|
| 400 |
+
|
| 401 |
+
You are an expert LLM with extensive knowledge in financial analysis. Your task is to evaluate a person's profile and determine their likelihood of repaying a loan. Respond with 'Y' if the person is likely to repay the loan, and 'N' if they are not likely to repay.
|
| 402 |
+
|
| 403 |
+
Profile: {profile}
|
| 404 |
+
|
| 405 |
+
Answer:
|
| 406 |
+
|
| 407 |
+
You are a financial analysis expert with extensive knowledge in credit assessment. Review the individual's profile to evaluate their likelihood of repaying the loan. Respond with 'Y' if repayment is likely, or 'N' if it is not.
|
| 408 |
+
|
| 409 |
+
Profile: {profile}
|
| 410 |
+
|
| 411 |
+
Answer:
|
| 412 |
+
|
| 413 |
+
As a financial expert skilled in assessing creditworthiness, analyze the following profile and indicate the likelihood of loan repayment. Use 'Y' for likely repayment and 'N' for unlikely.
|
| 414 |
+
|
| 415 |
+
Profile: {profile}
|
| 416 |
+
|
| 417 |
+
Answer:
|
| 418 |
+
|
| 419 |
+
# Example Of Germany Prompt
|
| 420 |
+
|
| 421 |
+
You are an expert LLM with extensive knowledge in financial analysis. Your task is to evaluate a person's profile and determine their likelihood of repaying a loan. Respond with 'good' if the person is likely to repay the loan, and 'bad' if they are not likely to repay.
|
| 422 |
+
|
| 423 |
+
Profile: {profile}
|
| 424 |
+
|
| 425 |
+
Answer:
|
| 426 |
+
|
| 427 |
+
You are a financial assessment specialist with deep insights into creditworthiness. Review the profile below and indicate the repayment likelihood with 'good' if the individual is likely to repay the loan, or 'bad' if they are not.
|
| 428 |
+
|
| 429 |
+
Profile: {profile}
|
| 430 |
+
|
| 431 |
+
Answer:
|
| 432 |
+
|
| 433 |
+
Imagine you are a loan assessment expert with extensive experience in evaluating repayment potential. Analyze the details provided to judge whether repayment is probable. Use 'good' for likely repayment and 'bad' for unlikely.
|
| 434 |
+
|
| 435 |
+
Profile: {profile}
|
| 436 |
+
|
| 437 |
+
Answer:
|
| 438 |
+
|
| 439 |
+
# Example Of Ghana Prompt
|
| 440 |
+
|
| 441 |
+
You are an expert LLM with extensive knowledge in financial analysis. Your task is to evaluate a person's profile and determine their likelihood of repaying a loan. Respond with 'Yes' if the person is likely to repay the loan, and 'No' if they are not likely to repay.
|
| 442 |
+
|
| 443 |
+
Profile: {profile}
|
| 444 |
+
|
| 445 |
+
Answer:
|
| 446 |
+
|
| 447 |
+
You are a financial risk evaluator with expertise in creditworthiness. Review the individual's profile and indicate their repayment likelihood. Use 'Yes' for likely repayment, or 'No' if repayment is unlikely.
|
| 448 |
+
|
| 449 |
+
Profile: {profile}
|
| 450 |
+
|
| 451 |
+
Answer:
|
| 452 |
+
|
| 453 |
+
As an expert in financial analysis, assess the following profile to determine the likelihood of loan repayment. Respond with 'Yes' if repayment is probable, and 'No' if it is not.
|
| 454 |
+
|
| 455 |
+
Profile: {profile}
|
| 456 |
+
|
| 457 |
+
Answer:
|
| 458 |
+
|
| 459 |
+
Table 9: Example Prompts Used for the Task. For each task, we created three distinct prompts, and the reported results represent the average performance across all three.
|
| 460 |
+
|
| 461 |
+
# G In-Context Learning (ICL)
|
| 462 |
+
|
| 463 |
+
In the In-Context Learning (ICL) experiment shown in Figure 9, we selected balanced few-shot examples from the training set, ensuring that each set of $n$ examples was predetermined and included a balanced representation of the gender feature. Our findings indicate that ICL yields the most significant improvement when increasing from zero to two examples; however, subsequent increments in the number of examples does not result in similar returns. This observation aligns with existing research, which suggests that while ICL can be effective with a limited number of examples, its performance gains tend to plateau as more examples are added (Agarwal et al., 2025).
|
| 464 |
+
|
| 465 |
+
Looking at Figure 9, we observe that decisions are more dependent on datasets than models. Particularly, finance-based models tend to show low performance in U.S. and Ghana data while Gemma-2-9b-it shows lower performance in German data. Looking at the average across the formats Gemma-2-27b-it performs best for the U.S., LLaMA-3-8B performs well for Germany.
|
| 466 |
+
|
| 467 |
+
# H Token Attribution explainability experiments
|
| 468 |
+
|
| 469 |
+
In understanding the decision processes made by LLMs we used captum (Kokhlikyan et al., 2020), an open-source model explainability library that provides a variety of generic interpretability methods. Our main question of interest in this work was to understand the interesting features that are used by LLMs in decision-making. In addition, we seek to understand the different decision-making characteristics observed between each LLM.
|
| 470 |
+
|
| 471 |
+
In this work, the main questions we have are; if LLMs are looking at interesting attributes to make decisions and what different decision-making characteristics are observed between each LLM.
|
| 472 |
+
|
| 473 |
+
We calculated token attribution for examples by replacing them with every possible item in the test set and assuming specific generation output. The results reported show representative values for the whole test set since we built our baseline tokens to be representative of the whole test set. Detailed visualization of the attribution is shown in Figures below.
|
| 474 |
+
|
| 475 |
+
The models explored in this study are medium-sized open-source models, chosen to balance computational efficiency and feasibility. The inclusion of larger models was limited due to computational overhead, while architectural complexities in Captum prevented the integration of financial models.
|
| 476 |
+
|
| 477 |
+
For the Ghana dataset, as shown in Figure 10 and Figure 11, we observed that Gemma-2-9b-i t models primarily exhibit negative or neutral attributions from surrounding features for both positive and negative predictions. This behavior results in a slight performance gain, as presented in Table 2. Additionally, we found no consistent feature that LLMs consistently focus on, making the decision-making process highly model-dependent.
|
| 478 |
+
|
| 479 |
+
For the US data, as shown in Figure 15 and Figure 14, we observed that most decisions are influenced by the Loan_ID column, which contradicts the patterns observed by manual decision-makers. Unlike other datasets, the US data exhibits more consistent feature selection by LLMs, indicating a stronger alignment in the features they prioritize.
|
| 480 |
+
|
| 481 |
+

|
| 482 |
+
|
| 483 |
+

|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
|
| 487 |
+

|
| 488 |
+
|
| 489 |
+

|
| 490 |
+
|
| 491 |
+

|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
|
| 497 |
+

|
| 498 |
+
|
| 499 |
+

|
| 500 |
+
|
| 501 |
+

|
| 502 |
+
|
| 503 |
+

|
| 504 |
+
|
| 505 |
+

|
| 506 |
+
Figure 9: Average F1 Score for Few-Shot Learning Across Different Localization Methods This figure presents the average F1 scores across various serialization methods for each dataset. We observe that the same models exhibit similar performance trends within each dataset, regardless of format. While the text format of the Ghana dataset may not share characteristics with the text format of the Germany dataset, Ghana's text and JSON formats display notable similarities.
|
| 507 |
+
|
| 508 |
+

|
| 509 |
+
|
| 510 |
+

|
| 511 |
+
|
| 512 |
+

|
| 513 |
+
Meta-Llama-3-70B
|
| 514 |
+
|
| 515 |
+

|
| 516 |
+
Meta-Llama-3-8B
|
| 517 |
+
finma-7b-full
|
| 518 |
+
- - finma-7b-nlp
|
| 519 |
+
|
| 520 |
+

|
| 521 |
+
Meta-Llama-3-8B-Instruct
|
| 522 |
+
gemma-2-27b
|
| 523 |
+
gemma-2-27b-it
|
| 524 |
+
- gemma-2-9b
|
| 525 |
+
--- gemma-2-9b-it
|
| 526 |
+
|
| 527 |
+
\{'sex': 1, 'amnt req': 1500, 'ration': 1, 'maturity': 30.0, 'assets val': 2000, 'dec profit': 300.0, 'xperience': 1.0, 'educatn': 1, 'age': 53, 'collateral': 1500, 'locatn': 0, 'guarantor': 0, 'relationship': 1, 'purpose': 1, 'sector': 4, 'savings': 0\}
|
| 528 |
+
|
| 529 |
+

|
| 530 |
+
Figure 10: Attribution scores of Ghana data for example 1. Positive attribution scores are indicated in green, while negative scores are shown in red. We can see Gemma-2-9b-i t models have more negative and neutral attribution scores completely different from their original model Gemma-2-9b.
|
| 531 |
+
|
| 532 |
+
\{'sex': 0, 'amnt req': 9000, 'ration': 0, 'maturity': 30.0, 'assets val': 10000, 'dec profit': 900.0, 'xperience': 3.0, 'educatn': 3, 'age': 35, 'collateral': 9000, 'locatn': 1, 'guarantor': 0, 'relationship': 0, 'purpose': 1, 'sector': 4, 'savings': 1\}
|
| 533 |
+
|
| 534 |
+

|
| 535 |
+
|
| 536 |
+

|
| 537 |
+
|
| 538 |
+

|
| 539 |
+
|
| 540 |
+

|
| 541 |
+
|
| 542 |
+

|
| 543 |
+
|
| 544 |
+

|
| 545 |
+
|
| 546 |
+

|
| 547 |
+
|
| 548 |
+

|
| 549 |
+
Figure 11: Attribution scores of Ghana data for example 2. Positive attribution scores are indicated in green, while negative scores are shown in red. Gemma-2-9b-it models show more negative and neutral token attribution.
|
| 550 |
+
|
| 551 |
+
\{'gender': 'male', 'checking_status': "'no checking"', 'duration': 54, 'credit_history':
|
| 552 |
+
|
| 553 |
+
''no credits/all paid'', 'purpose': "'used car'', 'credit_amount': 9436, 'savings_status':
|
| 554 |
+
|
| 555 |
+
'no known savings',"employment": "'1<=X<4',"installment_commitment': 2, 'other_parties':
|
| 556 |
+
|
| 557 |
+
'none', 'residence_since': 2, 'propertyMagnitude': "'life insurance"', 'age': 39,
|
| 558 |
+
|
| 559 |
+
other Payment Plans': 'none', 'housing': 'own', 'existing Credits': 1,'job': "'unskilled resident' ",
|
| 560 |
+
|
| 561 |
+
'numdependents': 2,'own_telephone': 'none', 'foreign-worker': 'yes\}
|
| 562 |
+
|
| 563 |
+

|
| 564 |
+
Figure 12: This figure displays the attribution scores for Example 1 of the Germany dataset. Positive attribution scores are indicated in green, while negative scores are shown in red. Gemma-2-9b-i t models show high negative attribution from most features and we don't see a focus on specific features throughout the models.
|
| 565 |
+
|
| 566 |
+
\{'gender': 'female', 'checking_status': $$<0$', 'duration': $18$, 'credit_history': 'existing paid', 'purpose': 'radio/tv', 'credit_amount': $3190$, 'savings_status': $$<100$', 'employment': $1 \leq x < 4$', 'installment_commitment': $2$, 'other_parties': 'none', 'residence_since': $2$, 'propertyMagnitude': 'real estate', 'age': $24$, 'other-paymentplans': 'none', 'housing': 'own', 'existing Credits': $1$, 'job': 'skilled', 'num_depends': $1$, 'own_telephone': 'none', 'foreign-worker': 'yes'\}
|
| 567 |
+
|
| 568 |
+
gemma-2-9b-it
|
| 569 |
+
|
| 570 |
+
good -1.0469 -2.3125 -2.3594 -0.6562 -1.2188 -3.0938 -1.2188 -0.5156 -2.3438 -2.0625 -2.3125 -2.3438 -2.2031 -0.6875 -0.9375 -2.0312 -2.7344 -2.5156 -1.2812 -2.9844
|
| 571 |
+
|
| 572 |
+
feml 0.2812 0.2266 0.5703 0.0234 -0.1406 -0.4609 0.0469 -0.0312 -0.1328 0.2109 0.0547 0.1562 -0.0469 -0.0156 -0.1328 -0.0234 -0.0859 -0.1719 -0.0547 0.0469
|
| 573 |
+
|
| 574 |
+
good - 0.6172 0.0859 0.2969 0.2109 0.1562 -0.2500 -0.0469 0.2344 0.0391 0.2891 0.2031 0.2266 0.8203 0.1406 0.1406 0.4531 0.2578 0.1875 0.0938 0.0391
|
| 575 |
+
|
| 576 |
+
f 0.0625 0.0703 0.8516 -0.9375 -0.2422 -0.1875 0.1484 -0.4375 -0.0703 0.0938 -0.0156 -0.2109 0.1016 0.3594 0.3359 0.6016 -0.1172 0.2344 0.0469 1.0078
|
| 577 |
+
|
| 578 |
+
good -0.1953 -0.2070 -0.1094 0.3594 -0.0938 -0.5000 -0.2227 0.1406 0.2422 0.0195 0.1641 -0.0859 0.0000 -0.0195 0.0078 0.0352 -0.1328 0.0859 0.1562 -0.2109
|
| 579 |
+
|
| 580 |
+
0.5 Token Attribution
|
| 581 |
+
|
| 582 |
+
g 0.0938 -0.1328 -0.0078 -0.0234 -0.0391 0.1016 0.0391 -0.0078 -0.0625 0.0469 -0.0547 -0.0859 0.0859 -0.0469 -0.0391 -0.0547 -0.0547 -0.0156 0.0000 0.0078
|
| 583 |
+
|
| 584 |
+
Figure 13: This figure displays the attribution scores for Example 2 of the Germany dataset. Positive attribution scores are indicated in green, while negative scores are shown in red. Gemma-2-9b-i t models show high negative attribution from most features, and we don't see a focus on specific features throughout the models.
|
| 585 |
+
|
| 586 |
+
\{'Gender': 'Male', 'Loan_ID': 'LP002101', 'Married': 'Yes', 'Dependents': '0', 'Education': 'Graduate', 'Self_Employed': None, 'ApplicantIncome': 63337, 'CoapplicantIncome': 0.0, 'LoanAmount': 490.0, 'Loan_Amount_Term': 180.0, 'Credit_History': 1.0, 'Property_Area': 'Urban'\}
|
| 587 |
+
|
| 588 |
+

|
| 589 |
+
|
| 590 |
+

|
| 591 |
+
|
| 592 |
+

|
| 593 |
+
|
| 594 |
+

|
| 595 |
+
|
| 596 |
+

|
| 597 |
+
|
| 598 |
+

|
| 599 |
+
|
| 600 |
+

|
| 601 |
+
|
| 602 |
+

|
| 603 |
+
Figure 14: This figure displays the attribution scores for Example 1 of the US dataset. Positive attribution scores are indicated in green, while negative scores are shown in red. We can see the "Loan_ID" feature significantly influences the model's output.
|
| 604 |
+
|
| 605 |
+
\{'Gender': 'Female', 'Loan_ID': 'LP002978', 'Married': 'No', 'Dependents': '0', 'Education': 'Graduate', 'Self_Employed': 'No', 'ApplicantIncome': 2900, 'CoapplicantIncome': 0.0, 'LoanAmount': 71.0, 'Loan_Amount_Term': 360.0, 'Credit_History': 1.0, 'Property_Area': 'Rural'\}
|
| 606 |
+
|
| 607 |
+

|
| 608 |
+
|
| 609 |
+

|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
|
| 613 |
+

|
| 614 |
+
|
| 615 |
+

|
| 616 |
+
|
| 617 |
+

|
| 618 |
+
|
| 619 |
+

|
| 620 |
+
|
| 621 |
+

|
| 622 |
+
Figure 15: This figure displays the attribution scores for Example 2 of the US dataset. Positive attribution scores are indicated in green, while negative scores are shown in red. We can see the "Loan_ID" feature significantly influences the model's output.
|
acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e12a330555c02d57f4843e3d4b3fb253b82062301505ec3ecf3695dc5a99297f
|
| 3 |
+
size 1979080
|
acceptordenyevaluatingllmfairnessandperformanceinloanapprovalacrosstabletotextserializationapproaches/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69ec93d27c24cc0bd4b1c7ff86d84389b9c691bd444cd151a3d9875d8dc3ba51
|
| 3 |
+
size 580987
|
acebenchacomprehensiveevaluationofllmtoolusage/3bea8f8a-7404-4fd2-8b58-05b614620c68_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:58a838fc71296e0c19f59f3bbc83ee8a34a88c3ef193210408a9ab62b101a26a
|
| 3 |
+
size 172896
|
acebenchacomprehensiveevaluationofllmtoolusage/3bea8f8a-7404-4fd2-8b58-05b614620c68_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2564c05fc633150a801a9fab03e4b59371b236a54b4dd3e9ea8ac1b8c4499c76
|
| 3 |
+
size 208942
|