Chelsea707 commited on
Commit
885af38
·
verified ·
1 Parent(s): 98bd7e3

MinerU Batch bf050d1b-7cc0-4553-9038-095f9c6a4d04 (Part 3/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_content_list.json +0 -0
  3. data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_model.json +0 -0
  4. data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_origin.pdf +3 -0
  5. data/2025/2504_10xxx/2504.10478/full.md +960 -0
  6. data/2025/2504_10xxx/2504.10478/images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg +3 -0
  7. data/2025/2504_10xxx/2504.10478/images/010df19603f6a8b8aed4c150ff7104474ebf1313d06df35b56b879fe8d0142f9.jpg +3 -0
  8. data/2025/2504_10xxx/2504.10478/images/025400c251313c19885eeda9c411127b9f8264a71c6d8cea574e0029094f187b.jpg +3 -0
  9. data/2025/2504_10xxx/2504.10478/images/02544de3a14cfeaa6aea139b5b0ab1cbd6ec4f541559f7e7213fbf4c3e2553a9.jpg +3 -0
  10. data/2025/2504_10xxx/2504.10478/images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg +3 -0
  11. data/2025/2504_10xxx/2504.10478/images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg +3 -0
  12. data/2025/2504_10xxx/2504.10478/images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg +3 -0
  13. data/2025/2504_10xxx/2504.10478/images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg +3 -0
  14. data/2025/2504_10xxx/2504.10478/images/083eced91fb46849603d1ad72fa239d4acbfb899ad82936da65f408e5a04fcc8.jpg +3 -0
  15. data/2025/2504_10xxx/2504.10478/images/08617bf65e4ebad736f26039f5ce3bc40693f92d1f1c5988d045480873e04806.jpg +3 -0
  16. data/2025/2504_10xxx/2504.10478/images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg +3 -0
  17. data/2025/2504_10xxx/2504.10478/images/0c4250ef61690cb6f1d8c4915cae968117a343314b9954e0c0942cc5689c68ec.jpg +3 -0
  18. data/2025/2504_10xxx/2504.10478/images/0c94c758263527048a63f3968a11597da849450b1792cda84ac408bad5f98b58.jpg +3 -0
  19. data/2025/2504_10xxx/2504.10478/images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg +3 -0
  20. data/2025/2504_10xxx/2504.10478/images/0d716e431c635ba9b4ddf9553c1ba062384da0054901a2617578c183216748d1.jpg +3 -0
  21. data/2025/2504_10xxx/2504.10478/images/0f296249734c6e78fe73f1f7ad3efd2ffa14c033f2625ceb5f3ec2ec4cadfdf1.jpg +3 -0
  22. data/2025/2504_10xxx/2504.10478/images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg +3 -0
  23. data/2025/2504_10xxx/2504.10478/images/106be94ca90f21b22c1046ac94af48a64aa8412f6ffde8449c502aa85c296ee4.jpg +3 -0
  24. data/2025/2504_10xxx/2504.10478/images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg +3 -0
  25. data/2025/2504_10xxx/2504.10478/images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg +3 -0
  26. data/2025/2504_10xxx/2504.10478/images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg +3 -0
  27. data/2025/2504_10xxx/2504.10478/images/1c07139c31aa052e9802591db451bb95cd466333559e1496c77fb8de1be1c789.jpg +3 -0
  28. data/2025/2504_10xxx/2504.10478/images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg +3 -0
  29. data/2025/2504_10xxx/2504.10478/images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg +3 -0
  30. data/2025/2504_10xxx/2504.10478/images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg +3 -0
  31. data/2025/2504_10xxx/2504.10478/images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg +3 -0
  32. data/2025/2504_10xxx/2504.10478/images/2129eafde804b7c6253031f96635d28714155c8f61913d221bec62adc0d05819.jpg +3 -0
  33. data/2025/2504_10xxx/2504.10478/images/219b5d70f1361d0524e9e43e4ca877a094875ed4025d0d18eae4a0143396e6c9.jpg +3 -0
  34. data/2025/2504_10xxx/2504.10478/images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg +3 -0
  35. data/2025/2504_10xxx/2504.10478/images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg +3 -0
  36. data/2025/2504_10xxx/2504.10478/images/268a4214b18b790ae18f4c66932eb03706a3662c1ff2bbd2235424d0bcd92783.jpg +3 -0
  37. data/2025/2504_10xxx/2504.10478/images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg +3 -0
  38. data/2025/2504_10xxx/2504.10478/images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg +3 -0
  39. data/2025/2504_10xxx/2504.10478/images/2afd9d9121757c862096b1a3d7ace4ff98bfb6719ea911ea906426aa924ec8b7.jpg +3 -0
  40. data/2025/2504_10xxx/2504.10478/images/2b5bc1934d7c465d670fdb3f31d7c83014925d80d56f23229733f5fb3a5e4176.jpg +3 -0
  41. data/2025/2504_10xxx/2504.10478/images/2ba83d43ee06d46a52d1c0a7af9c34e2f33d4173d7a4793a62c64f013efa1d66.jpg +3 -0
  42. data/2025/2504_10xxx/2504.10478/images/2f678190c34bc263f209d564e733b949a49f0c5b80c07037e411d4cdccc2776b.jpg +3 -0
  43. data/2025/2504_10xxx/2504.10478/images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg +3 -0
  44. data/2025/2504_10xxx/2504.10478/images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg +3 -0
  45. data/2025/2504_10xxx/2504.10478/images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg +3 -0
  46. data/2025/2504_10xxx/2504.10478/images/34dd8473c7bea803c37a13a33b349b2cd610686e846013957bdec7e504f82175.jpg +3 -0
  47. data/2025/2504_10xxx/2504.10478/images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg +3 -0
  48. data/2025/2504_10xxx/2504.10478/images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg +3 -0
  49. data/2025/2504_10xxx/2504.10478/images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg +3 -0
  50. data/2025/2504_10xxx/2504.10478/images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg +3 -0
.gitattributes CHANGED
@@ -1156,3 +1156,11 @@ data/2025/2504_10xxx/2504.10957/aee32c72-0906-4851-a50f-6b02b7f21eea_origin.pdf
1156
  data/2025/2504_11xxx/2504.11054/4145d5b1-8b48-4617-bddf-807b21a8d9a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
1157
  data/2025/2504_11xxx/2504.11171/b768317e-61d3-4f19-a242-b9cdc2cab557_origin.pdf filter=lfs diff=lfs merge=lfs -text
1158
  data/2025/2504_11xxx/2504.11346/58cb6b1b-7ad5-4619-9d3e-81f1c5a39bc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1156
  data/2025/2504_11xxx/2504.11054/4145d5b1-8b48-4617-bddf-807b21a8d9a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
1157
  data/2025/2504_11xxx/2504.11171/b768317e-61d3-4f19-a242-b9cdc2cab557_origin.pdf filter=lfs diff=lfs merge=lfs -text
1158
  data/2025/2504_11xxx/2504.11346/58cb6b1b-7ad5-4619-9d3e-81f1c5a39bc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
1159
+ data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_origin.pdf filter=lfs diff=lfs merge=lfs -text
1160
+ data/2025/2504_10xxx/2504.10479/71273ce6-5170-4939-8354-af535b974810_origin.pdf filter=lfs diff=lfs merge=lfs -text
1161
+ data/2025/2504_10xxx/2504.10481/29785fca-1f46-4ab1-92e1-b0b4c9aee15b_origin.pdf filter=lfs diff=lfs merge=lfs -text
1162
+ data/2025/2504_10xxx/2504.10483/9b7bb575-f36f-48cf-a562-8fb8239c8a45_origin.pdf filter=lfs diff=lfs merge=lfs -text
1163
+ data/2025/2504_10xxx/2504.10685/2b7c0cf2-f712-45f3-86c5-afe1fcf3d48b_origin.pdf filter=lfs diff=lfs merge=lfs -text
1164
+ data/2025/2504_10xxx/2504.10686/02e14e26-d981-43b7-bd68-0bb6d5c44d72_origin.pdf filter=lfs diff=lfs merge=lfs -text
1165
+ data/2025/2504_10xxx/2504.10861/a4e0028e-483e-4013-a80e-4d616bb12d80_origin.pdf filter=lfs diff=lfs merge=lfs -text
1166
+ data/2025/2504_11xxx/2504.11491/0400dc9e-bb51-4dac-9ac6-e38f3b9731ae_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_10xxx/2504.10478/c3757ad9-b534-4066-bb62-c73ee2cc8c04_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b472ea59c5763afc03c43e3f3dd60e3fdf06da4497077fceea24202074486ce2
3
+ size 5283981
data/2025/2504_10xxx/2504.10478/full.md ADDED
@@ -0,0 +1,960 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Weight Ensembling Improves Reasoning in Language Models
2
+
3
+ Xingyu Dang\*,1 Christina Baek\*,2 Kaiyue Wen3 Zico Kolter2 Aditi Raghunathan2
4
+
5
+ $^{1}$ Tsinghua University $^{2}$ Carnegie Mellon University $^{3}$ Stanford University
6
+
7
+ $\text{品}$ dangxy20@mails.tsinghua.edu.cn,kbaek@andrew.cmu.edu
8
+
9
+ # Abstract
10
+
11
+ We investigate a failure mode that arises during the training of reasoning models, where the diversity of generations begins to collapse, leading to suboptimal test-time scaling. Notably, the Pass@1 rate reliably improves during supervised finetuning (SFT), but Pass@k rapidly deteriorates. Surprisingly, a simple intervention of interpolating the weights of the latest SFT checkpoint with an early checkpoint, otherwise known as WiSE-FT, almost completely recovers Pass@k while also improving Pass@1. The WiSE-FT variant achieves better test-time scaling (Best@k, majority vote) and achieves superior results with less data when tuned further by reinforcement learning. Finally, we find that WiSE-FT provides complementary performance gains that cannot be achieved only through diversity-inducing decoding strategies, like temperature scaling. We formalize a bias-variance tradeoff of Pass@k with respect to the expectation and variance of Pass@1 over the test distribution. We find that WiSE-FT can reduce bias and variance simultaneously, while temperature scaling inherently trades off between bias and variance.
12
+
13
+ # 1 Introduction
14
+
15
+ Recent advances in large language models (LLMs) have showcased their remarkable ability to perform complex reasoning, yet these successes often hinge on test-time scaling strategies (Lightman et al., 2023; Snell et al., 2024; Wu et al., 2024). In many applications, such as math problems, puzzles, and logical reasoning, LLMs employ a verification framework where it is significantly easier for the model to verify a candidate solution than to generate one from scratch. This distinction has given rise to strategies that sample multiple "reasoning traces" or sequences of reasoning steps during inference, selecting the best final guess through an outcome reward model (ORM) or majority vote. In this setting, an upper bound on the performance a model could achieve is measured by Pass@K, or the probability that at least one out of $K$ independently sampled reasoning traces is correct.
16
+
17
+ Unfortunately, while the standard training pipeline of supervised finetuning (SFT) followed by reinforcement learning (RL) dependably improves Pass@1 for reasoning, Pass@K tends to drop early into finetuning (Cobbe et al., 2021; Chow et al., 2024a; Chen et al., 2025). This mismatch arises from a symptom of finetuning called diversity collapse, where overtuned models yield less diverse generations. This is detrimental to Pass@K since the model wastes $K$ attempts on only a handful of guesses. In fact, by analyzing the model's error rate i.e., 1 - Pass@1, across the test distribution, we derive a Pass@K bias-variance trade-off. To improve expected test Pass@K, one can either reduce the bias which is the expected error rate or how much the model's error rate varies across problems. The latter term is connected to diversity - more diversity allows models to hedge and do uniformly well across all test questions. In particular, during SFT, Pass@1 improves (bias ↓) at the cost of diversity collapse (variance ↑).
18
+
19
+ Surprisingly, common ways of alleviating diversity collapse, such as early stopping at peak Pass@K or decoding with high temperature, suffer from the reverse trade-off: diversity improves (variance $\downarrow$ ) at the cost of overall Pass@1 degrading (bias $\uparrow$ ). Consequently, in this paper we are concerned with a central question:
20
+
21
+ Figure 1: Pass@k of WiSE-FT versus SFT on GSM8k Gemma-2-2B supervised finetuned and evaluated on GSM8k. At each SFT timestep $t$ , we evaluate Pass@k of checkpoint $w_{t}$ (in dashed) with its WiSE-FT variant $1/2 \cdot w_{t} + 1/2 \cdot w_{0}$ (in solid), where traces are independently sampled with temperature $T = [0.7, 1.0, 1.3, 1.6]$ .
22
+ ![](images/9a00ddd660bf8f8eeda9cd85892cb8a7e3465e904ab2b4e6cf073e2f5f617379.jpg)
23
+ --- SFT T=0.7 --- SFT T=1.0 WiSE-FT T=1.0 SFT T=1.3 SFT T=1.6
24
+
25
+ ![](images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg)
26
+
27
+ ![](images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg)
28
+
29
+ Is it possible to simultaneously improve both Pass@1 and Pass@K, thereby overcoming the bias-variance tradeoff inherent in current approaches?
30
+
31
+ In our work, we introduce a simple, scalable and effective intervention that allows models to achieve both high Pass@K and Pass@1 across mathematical reasoning tasks GSM8k, MATH, and AIME. The specific technique we use is a variant of WiSE-FT (Wortsman et al., 2022) where we interpolate the weights of the latest SFT checkpoint $\boldsymbol{w}_t$ with an early checkpoint $w_0$ as $\boldsymbol{w}_{\mathrm{WiSE}(t)} = \frac{1}{2} \cdot \boldsymbol{w}_0 + \frac{1}{2} \cdot \boldsymbol{w}_t$ . Our key finding is that WiSE-FT successfully merges the diverse sampling capabilities of earlier checkpoints while retaining or surpassing the Pass@1 of later checkpoints. In Figure 1, we observe that the WiSE-FT model achieves both higher Pass@K and Pass@1 with more SFT steps $t$ , unlike naive SFT which suffers from an early decay in Pass@K. Moreover, the gains with WiSE-FT is unachievable by early-stopping or diversity-aware decoding alone.
32
+
33
+ Thus, we propose a new paradigm of training reasoning models: 1.) Train extensively using SFT as long as Pass@1 improves, 2.) Perform WiSE-FT with an earlier SFT checkpoint, 3.) Continue tuning the WiSE-FT variant using RL. Overall, the WiSE-FT model has the following immediate practical benefits:
34
+
35
+ - Better Test-Time Scaling Across all datasets and base models, the WiSE-FT variant achieves the highest performance with test-time scaling (Majority Vote, ORM) compared to an overtrained SFT model paired with diversity-aware decoding.
36
+ - Better Reinforcement Learning Since RL uses self-generated data to tune models, to generalize reliably, it is important for generations to provide sufficient learning signal while also having high coverage over the data space. We find that continued RL training starting from WiSE-FT weights achieves superior results with less synthetic data compared to initializing RL from the last SFT checkpoint and even early-stopped SFT.
37
+
38
+ In summary, we provide a comprehensive analysis of how reasoning models suffer from diversity collapse during SFT and its negative downstream impact during RL and test-time scaling. We first discuss our WiSE-FT findings in §4. Motivated by this discovery, we investigate two fundamental questions. First, we investigate diversity collapse during SFT and RL of reasoning models in §5. Diversity collapse not only impacts the model's ability to attempt different guesses. In fact, we make an even stronger observation - the generations of reasoning models converge towards a single reasoning trace for each test question. We theoretically prove that standard RL algorithms (i.e., REINFORCE and GRPO) fail to recover lost diversity in a simplified discrete bandit setting.
39
+
40
+ Second, we formalize the competing goals of Pass@1 and Pass@K as a bias-variance trade-off in §6. We empirically measure and compare the bias and variance of WiSE-FT versus early-stopping versus high temperature decoding. Notably, only WiSE-FT reduces both bias and variance. We conclude with a remark on the limitations of decoding strategies such as top-k (Shao et al., 2017), nucleus (Holtzman et al., 2020), and min-p (Nguyen et al., 2024), at eliciting the maximum capabilities with test-time scaling from current reasoning models.
41
+
42
+ # 2 Related Works
43
+
44
+ Diversity collapse with SFT: The standard pipeline for enhancing reasoning in LLMs involves an initial phase of supervised fine-tuning (SFT) followed by reinforcement learning (RL) (Guo et al., 2025; Setlur et al., 2024). SFT is critical for instilling interpretable and readable reasoning chains and ensuring that the model adheres to a consistent rollout templates (Guo et al., 2025). However, a number of recent works have identified critical pitfalls of SFT that hinders the model's ability to explore and ultimately it's overall problem solving ability. Notably, Cobbe et al. (2021) observe diversity collapse when finetuning on GSM8k training dataset, during which the Pass@1 continuously improves whereas Pass@k starts to fall shortly into the training. Similar diversity collapse phenomenon also exists in the self-improvement setting with SFT (Song et al., 2024), and is theoretically investigated as the sharpening effect (Huang et al., 2024). This is not desirable as diverse sampling at inference is important for test-time scaling using majority voting (Wang et al., 2023) or reward model guided search (Setlur et al., 2024; Beeching et al., 2024). Yeo et al. (2025); Chu et al. (2025) attribute this behavior to overfitting, memorization of samples and overfixation to a template style leading to reduced generalization. In our work, we corroborate similar findings and propose ensembling over the course of SFT as a mitigation strategy.
45
+
46
+ Mitigating diversity collapse: Given the importance of diversity for effectively scaling inference-time compute, several recent works have proposed auxiliary finetuning objectives and decoding strategies to mitigate diversity collapse. Li et al. (2025) regularize the SFT process using a game-theoretic framework that encourages sparse updates, thereby preserving output diversity. Zhang et al. (2024b) directly optimizes for diversity during finetuning. Other approaches modify the finetuning procedure to directly optimize for Best-of-N sampling at inference time (Chow et al., 2024b; Sessa et al., 2024; Chen et al., 2025). Another line of work focuses on inference-time decoding, explicitly encouraging diverse solutions through modified beam search strategies (Vijayakumar et al., 2018; Olausson et al., 2024; Chen et al., 2024; Beeching et al., 2024). Li et al. (2023) improve diversity during parallel decoding by appending curated prompts to the input. In formal reasoning settings e.g., Lean, methods such as Monte Carlo tree search have been used to diversify intermediate reasoning steps, as demonstrated in AlphaProof (AlphaProof and AlphaGeometry teams, 2024). In this work, we identify a simple and complementary intervention during the finetuning process to maintain the diversity of generations. We especially care about enforcing diversity while preserving the overall accuracy of generations.
47
+
48
+ # 3 Preliminaries and Experimental Setup
49
+
50
+ # 3.1 Pass@k, Best@k, and Majority Vote
51
+
52
+ Given a reasoning model $f(\cdot)$ , a decoding strategy $D$ , and problem $x$ , the model's solution is obtained by sampling a reasoning trace $r := [x, s^{(1)}, s^{(2)}, \dots, s^{(n)}, \hat{y}]$ consisting of a sequence of intermediate steps $s^{(i)}$ and a final guess $\hat{y}$ . Given $k$ independently sampled traces, Pass@K measures the probability that at least one guess matches the true answer $y$ :
53
+
54
+ $$
55
+ \operatorname {P a s s} @ \mathrm {K} (x) = \mathbb {E} _ {[ \boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \sim D (f (x))} [ \mathbb {1} \{\exists i \in [ k ] \text {s . t .} \hat {y} _ {i} = y \} ] = 1 - (1 - \rho_ {x}) ^ {K} \tag {1}
56
+ $$
57
+
58
+ where $\rho_{x} = P(\hat{y} = y\mid x,f,D)$ is the Pass@1 or marginal probability of sampling the ground truth answer. Then $(1 - \rho_x)^K$ is the probability that all $K$ guesses are incorrect. We will refer to Pass@1 as $\rho_{x}$ interchangeably in our paper.
59
+
60
+ In practice, test-time compute is scaled by selecting one of $K$ guesses either by a output reward model (ORM) or Majority Vote. Then we can measure Best@K as
61
+
62
+ $$
63
+ \operatorname {B e s t} @ \mathrm {K} (x) = \mathbb {E} _ {[ \boldsymbol {r} _ {i} ] _ {i = 1} ^ {k} \sim D (f (x))} [ \hat {y} _ {i ^ {*}} = y ] \text {w h e r e} i ^ {*} = \arg \max _ {i \in [ K ]} \sum_ {j = 1} ^ {K} \mathbb {1} \left\{\hat {y} _ {i} = \hat {y} _ {j} \right\} \text {o r} \operatorname {O R M} (\boldsymbol {r} _ {i})
64
+ $$
65
+
66
+ Notably, Pass@K is equivalent to Best@K using a perfect ORM verifier. As we will observe, WiSE-FT achieves both higher Pass@1 and Pass@K and this directly translates to achieving better Best@K with an ORM verifier and by Majority Vote.
67
+
68
+ # 3.2 Weight-Space Ensembling (WiSE-FT)
69
+
70
+ WiSE-FT is a weight-space ensembling technique proposed by Wortzman et al. (2022) to improve the out-of-distribution accuracy of finetuned models at no extra computational cost. In particular, while models tend to achieve better in-distribution performance after finetuning, they tend to be less robust to distribution shift. Surprisingly, by simply interpolating the weights of the finetuned model $\boldsymbol{w}_t$ with the pretrained weights $\boldsymbol{w}_0$
71
+
72
+ $$
73
+ \boldsymbol {w} _ {\mathrm {W i S E} (t)} = \delta \cdot \boldsymbol {w} _ {0} + (1 - \delta) \cdot \boldsymbol {w} _ {t} \tag {2}
74
+ $$
75
+
76
+ WiSE-FT can achieve best of both words: the out-of-distribution accuracy of models improves without incurring a drop in in-distribution accuracy. Similar to this philosophy, we apply weight ensembling to achieve both the diverse generation ability of early SFT checkpoints while maintaining the high Pass@1 accuracy of later SFT checkpoints.
77
+
78
+ # 3.3 Training and Evaluation Pipeline
79
+
80
+ The majority of our experiments are conducted on Gemma-2-2B and Qwen-2.5-0.5B. We perform SFT on a 30K subset of rephrased augmentations of GSM8k (Cobbe et al., 2021) and MATH (Hendrycks et al., 2021) in MetaMath40k (Yu et al., 2023) for 1710 steps or 10 epochs. We then continue finetuning on another 30K subset of rephrased training questions from MetaMath using Group Relative Policy Optimization (GRPO) with a binary reward of the correctness of the model's final answer. Finally, we evaluate models on GSM8K and MATH500, respectively. To estimate the true Pass@K and Pass@1 marginalized over the distribution of sampled traces, we sample 100 reasoning traces per test example and average over them to estimate Pass@1, i.e. $\rho_{x}$ . Then to calculate Pass@K, we use the theoretical formula $1 - (1 - \rho_{x})^{K}$ in Equation 1. Unless noted otherwise, we employ a naive decoding strategy with top-p threshold 0.9, temperature $T = 0.8$ , and top-k with $K = 50$ .
81
+
82
+ # 4 Improving Diverse Reasoning Capabilities by WiSE-FT
83
+
84
+ We first carefully track Pass@K for $K \in \{1, 4, 32\}$ across the SFT trajectory of Qwen-2.5-0.5B and Gemma-2-2B. Similar to findings from Cobbe et al. (2021); Chen et al. (2025), we observe that Pass@1 continues to improve with longer SFT, whereas for larger $K = 4, 32$ , Pass@K tends to peak much earlier on in training (in Figure 1, 17, and 19). In other words, while later SFT checkpoints achieve higher Pass@1, earlier SFT checkpoint achieve higher Pass@K. This tradeoff in model selection is not ideal downstream for test-time scaling.
85
+
86
+ Building upon this intuition, we propose weight ensembling between earlier and later SFT checkpoints. We apply a variant of WiSE-FT where instead of the pretrained model, we interpolate between the earliest SFT checkpoint (in our case, after 1 epoch of training) and the weights of later checkpoint. As shown in Figure 2, we observe a "sweet spot" of interpolation coefficients $\delta \in (0,1)$ where the WiSE-FT model achieves both higher Pass@1 than the last SFT model and higher Pass@K than the early SFT model. We will fix $\delta = 1/2$ , which generally performs decently for all of the datasets we've tested. In fact, after WiSE-FT $w_{\mathrm{WiSE}(t)}$ , both Pass@1 and Pass@k grow monotonically with SFT steps $t$ (see Figure 1).
87
+
88
+ ![](images/546890eae7b6307835210a35ac4546692989e395b2041aa92121b17022a37557.jpg)
89
+ Figure 2: Pass@1 vs. Pass@K across Interpolation Coefficients We perform WiSEFT with $\delta \in [0.1, 0.9]$ between the first and last checkpoints of model (in legend) finetuned on GSM8K, MATH, and OpenThoughts-114K, then evaluate on GSM8K, MATH500, and AIME24, respectively. Early SFT model observe higher Pass@K (y-axis) while later SFT model observes higher Pass@1 (x-axis). The interpolated model observe best of both metrics.
90
+
91
+ ![](images/6c5fa0e260a30825c41c9e0eaa75949a07f90449475e4e78fc1c1a49f05915b4.jpg)
92
+
93
+ ![](images/5137cb2f59693ab411059d8208f1a96ff60109df3970efcaaad046b30b11e30c.jpg)
94
+
95
+ Better Test-Time Scaling This boost in both Pass@1 and Pass@K directly translates to better performance with test-time scaling. We measure Best@K by Majority Vote and by selecting the reasoning trace with highest reward using an off-the-shelf ORM RLHFlow/Llama3.1-8B-PRM-Deepseek-Data (Xiong et al., 2024). We evaluate the performance of the last SFT checkpoint with highest Pass@1 versus the corresponding WiSE-FT variant with $\delta = 1/2$ . In Figure 3, we see that the performance gap on MATH500 between the final Gemma-2-2B SFT checkpoint and Wise-FT model widens with larger $K$ . The WiSE-FT model achieves $5 - 7\%$ better performance with test-time scaling.
96
+
97
+ Better RL Scaling WiSE-FT's ability to achieve both high Pass@1 and Pass@K is particularly advantageous for continued RL training where models are further trained by policy gradient methods using self-generated data. In particular, WiSE-FT is able to generate data rich in learning signal (high Pass@1) while still having high coverage over the data space (high Pass@K). We continue training on rephrased training questions of GSM8K and MATH using GRPO paired with a binary reward of the correctness of the final guess. Across runs, we observe that continued RL training starting from the final WiSE-FT model improves performance more stably than finetuning starting from the final SFT checkpoint. Notably the final SFT checkpoint suffers low coverage over the data space, causing Pass@1 to improve slowly. We also try continued RL training from an earlier SFT checkpoint with peak Pass@4 performance. While RL scales better over the early SFT checkpoint in comparison to the final checkpoint, the performance still remains subpar compared to WiSE-FT.
98
+
99
+ # 4.1 General Purpose Reasoning Models
100
+
101
+ So far we have studied the effect of WiSE-FT on models tuned on reasoning data for the same specific reasoning task (e.g., train on GSM8k and evaluate on GSM8k). We've additionally tested how well our findings generalize to models trained on general purpose reasoning datasets and tested on a out-of-distribution reasoning task. We take Qwen2.5-7B-Instruct and SFT for 5 epochs on OpenThoughts-114k, a high-quality synthetic dataset of math, science, and coding questions paired with DeepSeek-R1 completions, then evaluate its performance on AIME24 competition problems (with ASY code for figures from Muennighoff et al. (2025)). In this setting, the Pass@K trends during SFT on is more subtle. We still observe diversity collapse in Figure 12, but the affect is not strong enough for Pass@K to drop back down. However, we observe that the rate at which Pass@K improves for $K \in \{16,32\}$ slows down early while Pass@1 grows at a constant rate (Figure 10). We then perform WiSE-FT between the final and earlier checkpoint with
102
+
103
+ ![](images/4784db23bc7951ee2fc6656f65f8c2d3d009a5771392f48ebe1e24aa859028e1.jpg)
104
+
105
+ ![](images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg)
106
+
107
+ ![](images/e3b0978da07922462da538218a0295754fb4a5a2ab33dcf01d466532d3e49fa5.jpg)
108
+
109
+ ![](images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg)
110
+ (a)
111
+ (b)
112
+
113
+ ![](images/e8032220c0101b093c7abbf09b817da62b4a68f6faf283ab9fe582fdf70c1c5e.jpg)
114
+ (c)
115
+ Figure 3: Downstream Advantages of WiSE-FT: (a) Best@K on MATH500 of the final SFT Gemma2-2B checkpoint and its WiSE-FT counterpart. (b) Pass@K on AIME24 WiSE-FT after SFT on general purpose reasoning dataset OpenThoughts-114k achieves higher Pass@K on AIME24. (c) RL Scaling Gemma and Qwen SFT checkpoints further tuned by GRPO on GSM8K and MATH, respectively. RL from the final WiSE-FT model achieves higher Pass@1 with less data compared to GRPO starting from both early and late SFT checkpoints.
116
+
117
+ higher diversity. We choose early checkpoint at epoch 3 where improvements in Pass@K begin to slow. Similarly, we observe that WiSE-FT improves both Pass@1 and Pass@K in Figure 2.
118
+
119
+ # 5 Diversity Collapse during Finetuning
120
+
121
+ In previous sections we alluded to the phenomenon where $\mathrm{Pass}@\mathrm{K}$ decreases because SFT and RL induces diversity collapse in reasoning traces. To verify this hypothesis, we sample 100 traces per test GSM8k problem and measure diversity using three metrics:
122
+
123
+ 1. Answer Diversity: The fraction of unique guesses $\hat{y}$ among reasoning traces.
124
+ 2. Operation Diversity: The fraction of unique sequence of arithmetic operations performed among reasoning traces (In GSM8k, each intermediate step consists of a basic arithmetic operation, e.g. $5 + 3 = 8$ ).
125
+ 3. Semantic Diversity: The average cosine similarity between the text embeddings of the reasoning traces, computed using Stella-400M-v5 (Zhang et al., 2024a)
126
+
127
+ ![](images/97906774fe3390f2c8dce9d365178b77721d1991265e137bf86ad95237738532.jpg)
128
+ Figure 4: Diversity Collapse The answer, semantic, and operation diversity of Gemma-2-2B reasoning traces across GSM8k test examples. Colors map to different SFT checkpoints.
129
+
130
+ ![](images/c09374f5d22f90dca1bd1db2225d9246275d5d6c3a44de40384a8f4f0172de11.jpg)
131
+
132
+ ![](images/6b301ba7c226d983f2678b214edd7278f1cfb4320bcae13b40fbe1456b86da77.jpg)
133
+ Diversity Across SFT $[T = 0.8]$
134
+
135
+ ![](images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg)
136
+
137
+ ![](images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg)
138
+ Figure 5: Pass@k for SFT and RL of Qwen-2.5-0.5B on GSM8K. The purple solid line measures Pass@K across SFT steps, while the dashed lines correspond to further training different checkpoints by Proximal Policy Optimization (PPO). While Pass@1 continues to improve, Pass@k for larger K can decrease even with RL.
139
+
140
+ As shown in Figure 4, we observe a stark trend where longer SFT on Gemma-2-2B incrementally suffers from clear diversity collapse across all diversity metrics. Specifically, the model places most of its probability mass not only on one particular guess, but on a single reasoning trace, as evidenced by the reduced semantic and operation diversity.
141
+
142
+ # 5.1 Theoretical Discussion of Diversity Collapse During SFT and RL
143
+
144
+ We assess theoretically why diversity collapse tends to arise during SFT and RL training. Our analysis reveals that while SFT and RL operate on different principles, they share common pathways that lead to reduced generation diversity when optimizing for accuracy.
145
+
146
+ Diversity Collapse during SFT Overparameterized models are well-known to exhibit overconfidence in their predictions, an effect that has been studied extensively in classification (Guo et al., 2017). In particular, the model's confidence towards the most likely class $P(\hat{y} = k_{\max} \mid x)$ is often much higher than the model's accuracy. In binary classification with linear models $f(x) = \sigma(\langle \boldsymbol{w}, \boldsymbol{x} \rangle)$ and linearly separable training data, gradient descent provably drives the norm of the weights to infinity, causing probabilities to collapse to 0 or 1 (Soudry et al., 2018). We demonstrate this in linear models in Appendix A. A similar phenomenon likely arises in large reasoning models, which may also be prone to overfitting during SFT, ultimately leading to overly confident solutions in spite of limited coverage over the space of traces (Cobbe et al., 2021).
147
+
148
+ Diversity Collapse during RL We further prove why applying reinforcement learning to a low-diversity policy yields suboptimal results—and sometimes even exacerbates diversity collapse—in a discrete bandit setting (see Figure 5). In this scenario, we assume there exist $K$ equally good arms, corresponding to a set of successful strategies, and one bad arm that the policy should learn to avoid. We show two key results in this setting:
149
+
150
+ 1. Implicit Collapse of Policy Diversity without KL Regularization. Our analysis demonstrates that when standard reinforcement learning algorithms—REINFORCE and GRPO—are applied without KL regularization, the training dynamics inevitably lead to a collapse in output diversity. Although multiple arms (actions) are equally optimal, the updates become self-enforcing as training progresses. Once one of the good arms is randomly reinforced, its probability increases at the expense of the others, ultimately driving the policy to converge on a single-arm strategy (Theorem C.1).
151
+ 2. Diversity Does Not Increase with KL Regularization. When KL regularization is incorporated to constrain the divergence from the initial policy in REINFORCE, the final policy no longer collapses into a single-arm strategy. However, the diversity of the converged policy cannot exceed the initial diversity. Concretely, we show that the probability distribution over the good arms remains proportional to the initial distribution when the RL algorithm converges (Theorem C.8). This explains why initializing with a diverse policy is critical for the generalization of reinforcement learning.
152
+
153
+ # 6 Bias-Variance Tradeoff of Pass@K
154
+
155
+ So far, we saw a mismatch in growth of Pass@1 and Pass@K during SFT and alluded to the impact of diversity collapse to Pass@K. We now formalize the relationship between Pass@1, Pass@K, and diversity collapse. Notably, we show that the upper bound of expected Pass@K over the test distribution can be decomposed into bias and variance quantities.
156
+
157
+ # 6.1 Diversity Collapse leads to Bimodal Pass@1 Distribution
158
+
159
+ Consider the expected $\mathrm{Pass}@\mathrm{K}$ over the entire test distribution $x, y \sim \mathcal{D}$ . By Jensen's inequality, we can derive a straightforward upper bound of expected $\mathrm{Pass}@\mathrm{K}$ that decomposes into the bias and variance of $1 - \rho_x$ (See proof in Appendix B). Note that the upper bound falls monotonically with larger bias and variance:
160
+
161
+ $$
162
+ \textbf {P r o p o s i t i o n 6 . 1 .} \mathbb {E} _ {x, y \sim \mathcal {D}} [ \operatorname {P a s s} @ \mathrm {K} (x) ] \leq 1 - ((\underbrace {\mathbb {E} _ {x , y \sim \mathcal {D}} [ 1 - \rho_ {x} ]} _ {\text {B i a s}}) ^ {2} + \underbrace {\operatorname {V a r} (\rho_ {x})} _ {\text {V a r i a n c e}}) ^ {k / 2}
163
+ $$
164
+
165
+ In Figure 6b, we plot the distribution of error $1 - \rho_{x}$ , estimated using 100 sampled traces, over GSM8K test examples. We notice two trends with longer SFT. First, bias decreases, i.e., the expected error shifts towards 0. However, the distribution becomes increasingly bimodal with the densities converging towards the two extremes 0 and 1. As a result, the variance increases with longer SFT. This increase in variance directly explains the drop in Pass@k.
166
+
167
+ The bimodality of the $1 - \rho_{x}$ distribution means that the Pass@1 of any test problem is either very high or very low. Interestingly, one explanation for the increased bimodality of the distribution of $1 - \rho_{x}$ is in fact when models suffer from diversity collapse. In other words, a particular guess to be oversampled for each test problem. If the model places high probability on an incorrect guess, Pass@1 is very low. On the other hand, if the model places high probability on the correct guess, Pass@1 is very high. We illustrate this relationship in Figure 6a. All in all, Pass@K can be improved in two ways - either reduce bias by improving Pass@1 or reduce variance by increasing diversity.
168
+
169
+ ![](images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg)
170
+ (a)
171
+
172
+ ![](images/401631f09a461d1e42f689302a470ca43729d0d0654d63edcc98eccaf75cd6ba.jpg)
173
+ (b)
174
+ Figure 6: Histogram of error $1 - \rho_{x}$ of Gemma-2-2B SFT checkpoints across GSM8k test. SFT progressively decreases bias but increases variance of error i.e., $1 - \mathrm{Pass}@\mathrm{l}$ , across the test distribution, causing Pass@K to fall. Applying Wise-FT reduces both bias and variance, but temperature scaling trades off decreasing variance with increased bias.
175
+
176
+ # 6.2 WiSE-FT vs. Diverse Decoding
177
+
178
+ While we've proposed inducing diversity by WiSE-FT, another common alternative for inducing diversity is temperature scaling the logits. High temperature smoothens the logits allowing the model to more likely sample low probability tokens. In Figure 1, we see that while high temperatures indeed improve Pass@K, the Pass@K at any SFT timestep notably never reaches the Pass@K of our final WiSE-FT model. If temperature scaling also increases diversity, why does WiSE-FT strictly outperform sampling with high temperature? In Figure 6b, we plot the distribution of $1 - \rho_{x}$ if we sample from the last SFT checkpoint with high temperature $T = 1.5$ . As expected, we see that the model reasons more diversely. This smoothens the bimodal peaks and reduces the variance. However, the average accuracy of the model generations also degrades, causing the bias goes back up. We suspect bias-variance tradeoff is inherent in diversity-inducing decoding approaches. For example, min-p (Nguyen et al., 2024) combines temperature scaling with adaptive thresholding to not sample outlier tokens. However, this additional control is unable to reduce bias (Figure 16). Surprisingly, WiSE-FT uniquely manages to reduce both bias and variance.
179
+
180
+ # 7 Discussion
181
+
182
+ In this work, we investigated the phenomenon of diversity collapse during the training of reasoning models. Our analysis reveals that standard SFT and RL pipelines can deteriorate in Pass@ $K$ due to the convergence of model generations toward a single reasoning trace. We demonstrated that WiSE-FT, which interpolates between early and late SFT checkpoints, significantly improves both Pass@1 and Pass@ $K$ across multiple math datasets and model scales. This is unlike alternative approaches such as temperature scaling or early
183
+
184
+ stopping, which face an inherent tradeoff. Furthermore, improving on these metrics corresponded with better adaptation to test-time scaling and RL. But other limitations of WiSE-FT may exist at larger scale, which we leave for future work.
185
+
186
+ Overall, our work reveals the importance of maintaining diversity in reasoning models. Current decoding strategies (e.g., min-p, nucleus, and top-k) are still unable to fully extract a model's capabilities. We estimate that a significant gap, of tens of percent, remains compared to the optimal decoding strategy for Pass@K, i.e., top-K sampling over the model's marginal answer distribution $P(\hat{y} \mid x)$ (see Table 1 and Appendix G). We encourage future works to address downstream limitations more carefully in earlier stages of the training pipeline.
187
+
188
+ <table><tr><td>Method</td><td>Pass@2</td><td>Pass@4</td></tr><tr><td>Nucleus</td><td>0.57</td><td>0.67</td></tr><tr><td>Min-p</td><td>0.57</td><td>0.67</td></tr><tr><td>Top-k</td><td>0.56</td><td>0.67</td></tr><tr><td>Optimal</td><td>0.76</td><td>0.83</td></tr></table>
189
+
190
+ Table 1: Best Pass@k of Gemma on GSM8k across SFT checkpoints
191
+
192
+ # 8 Acknowledgements
193
+
194
+ We'd like to thank Aviral Kumar, Sean Welleck, Amrith Setlur and Yiding Jiang for insightful discussions about test-time scaling and reinforcement learning. We'd also like to thank Alex Li, Sachin Goyal, and Jacob Springer for their meaningful contribution to our figures and literature review. We gratefully acknowledge support from Apple, Google, Cisco, OpenAI, NSF, Okawa foundation, the AI2050 program at Schmidt Sciences (Grant #G2264481), and Bosch Center for AI.
195
+
196
+ # References
197
+
198
+ AlphaProof and AlphaGeometry teams. Ai achieves silver-medal standard solving international mathematical olympiad problems, jul 2024. URL https://deepmind.google/discover/blog/ai-solves-imo-problems-at-silver-medal-level/.
199
+ Edward Beeching, Lewis Tunstall, and Sasha Rush. Scaling test-time compute with open models, 2024. URL https://huggingface.co/spaces/HuggingFaceH4/blogpost-scaling-test-time-compute.
200
+ Jeff Bilmes. Submodularity in machine learning and artificial intelligence. arXiv preprint arXiv:2202.00132, 2022.
201
+ Feng Chen, Allan Raventos, Nan Cheng, Surya Ganguli, and Shaul Druckmann. Rethinking fine-tuning when scaling test-time compute: Limiting confidence improves mathematical reasoning. arXiv preprint arXiv:2502.07154, 2025.
202
+ Guoxin Chen, Minpeng Liao, Chengxi Li, and Kai Fan. Alphamath almost zero: Process supervision without process, 2024. URL https://arxiv.org/abs/2405.03553.
203
+ Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models. arXiv preprint arXiv:2412.15287, 2024a.
204
+ Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for best-of-n sampling in large language models, 2024b. URL https://arxiv.org/abs/2412.15287.
205
+ Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V. Le, Sergey Levine, and Yi Ma. Sft memorizes, rl generalizes: A comparative study of foundation model post-training, 2025. URL https://arxiv.org/abs/2501.17161.
206
+ Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems, 2021. URL https://arxiv.org/abs/2110.14168.
207
+
208
+ Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Weinberger. On calibration of modern neural networks. In International conference on machine learning, pp. 1321-1330. PMLR, 2017.
209
+ Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.
210
+ Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874.
211
+ Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration, 2020. URL https://arxiv.org/abs/1904.09751.
212
+ Audrey Huang, Adam Block, Dylan J Foster, Dhruv Rohatgi, Cyril Zhang, Max Simchowitz, Jordan T Ash, and Akshay Krishnamurthy. Self-improvement in language models: The sharpening mechanism. arXiv preprint arXiv:2412.01951, 2024.
213
+ Yifei Li, Zeqi Lin, Shizhuo Zhang, Qiang Fu, Bei Chen, Jian-Guang Lou, and Weizhu Chen. Making large language models better reasoners with step-aware verifier, 2023. URL https://arxiv.org/abs/2206.02336.
214
+ Ziniu Li, Congliang Chen, Tian Xu, Zeyu Qin, Jiancong Xiao, Zhi-Quan Luo, and Ruoyu Sun. Preserving diversity in supervised fine-tuning of large language models. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=NQEe7B7bSw.
215
+ Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv preprint arXiv:2305.20050, 2023.
216
+ Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393.
217
+ Minh Nguyen, Andrew Baker, Clement Neo, Allen Roush, Andreas Kirsch, and Ravid Shwartz-Ziv. Turning up the heat: Min-p sampling for creative and coherent llm outputs, 2024. URL https://arxiv.org/abs/2407.01082.
218
+ Theo X. Olausson, Jeevana Priya Inala, Chenglong Wang, Jianfeng Gao, and Armando Solar-Lezama. Is self-repair a silver bullet for code generation?, 2024. URL https://arxiv.org/abs/2306.09896.
219
+ Pier Giuseppe Sessa, Robert Dadashi, Léonard Hussenot, Johan Ferret, Nino Vieillard, Alexandre Ramé, Bobak Shariari, Sarah Perrin, Abe Friesen, Geoffrey Cideron, Sertan Girgin, Piotr Stanczyk, Andrea Michi, Danila Sinopalnikov, Sabela Ramos, Amélie Héliou, Aliaksei Severyn, Matt Hoffman, Nikola Momchev, and Olivier Bachem. Bond: Aligning llms with best-of-n distillation, 2024. URL https://arxiv.org/abs/2407.14622.
220
+ Amrith Setlur, Chirag Nagpal, Adam Fisch, Xinyang Geng, Jacob Eisenstein, Rishabh Agarwal, Alekh Agarwal, Jonathan Berant, and Aviral Kumar. Rewarding progress: Scaling automated process verifiers for llm reasoning, 2024. URL https://arxiv.org/abs/2410.08146.
221
+ Louis Shao, Stephan Gouws, Denny Britz, Anna Goldie, Brian Strope, and Ray Kurzweil. Generating high-quality and informative conversation responses with sequence-to-sequence models. arXiv preprint arXiv:1701.03185, 2017.
222
+ Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314.
223
+ Yuda Song, Hanlin Zhang, Carson Eisenach, Sham Kakade, Dean Foster, and Udaya Ghai. Mind the gap: Examining the self-improvement capabilities of large language models. arXiv preprint arXiv:2412.02674, 2024.
224
+
225
+ Daniel Soudry, Elad Hoffer, Mor Shpigel Nacson, Suriya Gunasekar, and Nathan Srebro. The implicit bias of gradient descent on separable data. Journal of Machine Learning Research, 19(70):1-57, 2018.
226
+ Ashwin K Vijayakumar, Michael Cogswell, Ramprasath R. Selvaraju, Qing Sun, Stefan Lee, David Crandall, and Dhruv Batra. Diverse beam search: Decoding diverse solutions from neural sequence models, 2018. URL https://arxiv.org/abs/1610.02424.
227
+ Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models, 2023. URL https://arxiv.org/abs/2203.11171.
228
+ Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo-Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, and Ludwig Schmidt. Robust fine-tuning of zero-shot models, 2022. URL https://arxiv.org/abs/2109.01903.
229
+ Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv preprint arXiv:2408.00724, 2024.
230
+ Wei Xiong, Hanning Zhang, Nan Jiang, and Tong Zhang. An implementation of generative prm. https://github.com/RLHFlow/RLHF-Reward-Modeling, 2024.
231
+ Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms, 2025. URL https://arxiv.org/abs/2502.03373.
232
+ Longhui Yu, Weisen Jiang, Han Shi, Jincheng Yu, Zhengying Liu, Yu Zhang, James T Kwok, Zhenguo Li, Adrian Weller, and Weiyang Liu. Metamath: Bootstrap your own mathematical questions for large language models. arXiv preprint arXiv:2309.12284, 2023.
233
+ Dun Zhang, Jiacheng Li, Ziyang Zeng, and Fulong Wang. Jasper and stella: distillation of sota embedding models. arXiv preprint arXiv:2412.19048, 2024a.
234
+ Yiming Zhang, Avi Schwarzschild, Nicholas Carlini, Zico Kolter, and Daphne Ippolito. Forcing diffuse distributions out of language models, 2024b. URL https://arxiv.org/abs/2404.10859.
235
+
236
+ # A SFT in Binary Classification
237
+
238
+ Data and Model Setup We train a linear classifier $f(\pmb{x}) = \langle \pmb{w}, \pmb{x} \rangle$ from random initialization over a binary Gaussian mixture distribution:
239
+
240
+ $$
241
+ x \mid y \sim \mathcal {N} (y \boldsymbol {\mu}, I ^ {d \times d}) \tag {3}
242
+ $$
243
+
244
+ $$
245
+ y \in \{1, - 1 \} \text {u n i f o r m l y} \tag {4}
246
+ $$
247
+
248
+ Given a model, we sample predictions, namely $\hat{y} = 1$ with probability $\sigma (\langle \pmb {w},\pmb {x}\rangle) = (1 + \exp (-\langle \pmb {w},\pmb {x}\rangle))^{-1}$ , or $\hat{y} = 0$ . Then, per-example Pass@1 is equal to $\rho_{x} = \sigma (y\cdot \langle \pmb {w},\pmb {x}\rangle)$ . Similarly, the expected Pass@k is equal to $1 - (1 - \rho_{x})^{k}$ .
249
+
250
+ In our experiment, we train an overparametrized linear classifier over binary Gaussian data mixture $x \mid y \sim \mathcal{N}(y \cdot \frac{1}{\sqrt{d}} \mathbf{1}, \frac{1}{2} I)$ where $y = \{-1, 1\}$ and $d = 1000$ . We then evaluate $\rho_x$ of 400 test samples. As training progresses, the distribution of $\rho_x$ over the test data becomes bimodal due to the norm of $w$ monotonically increasing once it separates the training examples. Similarly, we observe that this leads to a drop in Pass@k while Pass@1 continues to improve.
251
+
252
+ ![](images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg)
253
+
254
+ ![](images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg)
255
+ Figure 8: Pass@k across Training in Binary Classification
256
+
257
+ ![](images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg)
258
+
259
+ ![](images/bd859f21719d1d07b48f5e32d3cf6033dc8039de333fc86dee2565d05cfa3961.jpg)
260
+
261
+ ![](images/3b4b375b012d324a664828356e9ada89ab2d796bd0c978efdd3c6ccc0233b487.jpg)
262
+ Figure 7: Weight Norm
263
+
264
+ ![](images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg)
265
+
266
+ ![](images/672c0c412effe3f158ea40afc7de6980e89697915f6ff25df8aa79757cb6e4a2.jpg)
267
+ Figure 9: Histogram of $\rho_{x}$ across training steps
268
+
269
+ ![](images/9314a20b371980d52ecbb3503a73f93a81b6bb2a673d0623309bc41fbb73253e.jpg)
270
+
271
+ ![](images/525268009e6075c1325653ae3ac1f4d2d550342360d9da06d7710798d251f1d6.jpg)
272
+
273
+ ![](images/5a57a6efe0c941d086713a9c86373744f480001ca15dc55fec49bde54b75585e.jpg)
274
+
275
+ ![](images/b1e638e906973e22c65e08bce79f7b716ac0fa8700587808cf2a8fb6a77abce6.jpg)
276
+
277
+ # B Expected Pass@k
278
+
279
+ Proposition B.1.
280
+
281
+ $$
282
+ \mathbb {E} _ {x, y \sim \mathcal {D}} \left[ \mathrm {P a s s @ K} (x) \right] \leq 1 - \left(\left(\mathbb {E} _ {x, y \sim \mathcal {D}} [ 1 - \rho_ {x} ]\right) ^ {2} + \mathrm {V a r} (\rho_ {x})\right) ^ {k / 2}
283
+ $$
284
+
285
+ Proof.
286
+
287
+ $$
288
+ \begin{array}{l} \mathbb {E} \left[ (1 - \rho_ {x}) ^ {k} \right] \geq \mathbb {E} \left[ (1 - \rho_ {X}) ^ {2} \right] ^ {k / 2} (5) \\ = \left(1 - 2 \mathbb {E} \left[ \rho_ {x} \right] + \mathbb {E} \left[ \rho_ {x} ^ {2} \right]\right) ^ {k / 2} (6) \\ = \left(\left(1 - 2 \mathbb {E} [ \rho_ {x} ] + \mathbb {E} [ \rho_ {x} ] ^ {2}\right) + \left(\mathbb {E} \left[ \rho_ {x} ^ {2} \right] - \mathbb {E} [ \rho_ {x} ] ^ {2}\right)\right) ^ {k / 2} (7) \\ = \left(\left(1 - \mathbb {E} [ \rho_ {x} ]\right) ^ {2} + \operatorname {V a r} (\rho_ {x})\right) ^ {k / 2} (8) \\ \end{array}
289
+ $$
290
+
291
+ ![](images/d6563642ee50cd803af3a0e79797353c9f1d469062b359c5cc621ad0702e4063.jpg)
292
+
293
+ # C RL Theory
294
+
295
+ # C.1 Overview
296
+
297
+ We will prove that in a discrete bandit setting with $K$ equally good arms that is the best arm, both REINFORCE and GRPO without KL regularization will eventually collapse into a single-arm strategy.
298
+
299
+ We will further prove that, with KL regularization with respect to the initial policy, the converged policy of REINFORCE have the same action distribution as the initial policy when constrained on the set of best arms. Therefore, diversity within good actions will not increase through REINFORCE training.
300
+
301
+ # C.2 Notations and Setup
302
+
303
+ Formally we consider the following setting. We consider a $K + 1$ -armed bandit, with arms $\{1,2,\dots ,K + 1\}$ . Arms $1,\ldots ,K$ are "good," each yielding reward 1, and the other arm is "bad," yielding reward 0. We use a softmax parameterization:
304
+
305
+ $$
306
+ p _ {i} = \frac {e ^ {\theta_ {i}}}{\sum_ {j = 1} ^ {K + 1} e ^ {\theta_ {j}}}, \quad i = 1, \dots , K + 1.
307
+ $$
308
+
309
+ to denote the action distribution. We will use $\theta_i^{(t)}$ to denote the parameter at step $t$ .
310
+
311
+ It is standard to consider using the KL divergence between the current policy with a reference policy (which we set as $p_0$ here) as a regularization term.
312
+
313
+ $$
314
+ \mathrm {K L} (p ^ {(t)} | p ^ {(0)}) = \sum_ {i = 1} ^ {K + 1} p _ {i} ^ {(t)} \log \frac {p _ {i} ^ {(t)}}{p _ {i} ^ {(0)}}
315
+ $$
316
+
317
+ For REINFORCE, we will consider the following training setup. At step $t$ :
318
+
319
+ 1. We sample an arm $I_{t}$ according to $p(\cdot) = (p_1^{(t)},\dots ,p_{K + 1}^{(t)})$ and receive reward $r_t$
320
+ 2. We update using policy gradient.
321
+
322
+ $$
323
+ \theta_ {i} ^ {(t + 1)} = \theta_ {i} ^ {(t)} + \eta r _ {t} \nabla_ {\theta_ {i}} (\log p _ {I _ {t}} ^ {(t)}) - \eta \beta \nabla_ {\theta_ {i}} \mathrm {K L} (p ^ {(t)} | p ^ {(0)}), i = 1, \dots , K + 1,
324
+ $$
325
+
326
+ where $\eta > 0$ is the step size and $\beta$ is the hyperparameter controlling the strength of KL regularization.
327
+
328
+ For GRPO, we will consider the following simplified training setup. This is equivalent to the empirical version of GRPO with online sampling.
329
+
330
+ 1. Sample $G$ arms $\{I_t^{(1)},\dots ,I_t^{(G)}\}$ i.i.d. from the current policy $p(\cdot)$ and receive rewards $r_t^{(g)}$ .
331
+ 2. Compute
332
+
333
+ $$
334
+ \mu_ {t} = \frac {1}{G} \sum_ {g = 1} ^ {G} r _ {t} ^ {(g)}, \quad \sigma_ {t} = \sqrt {\frac {1}{G} \sum_ {g = 1} ^ {G} \left(r _ {t} ^ {(g)} - \mu_ {t}\right) ^ {2}},
335
+ $$
336
+
337
+ and define the normalized advantage
338
+
339
+ $$
340
+ \begin{array}{r} \tilde {r} _ {t} ^ {(g)} = \left\{ \begin{array}{l l} \frac {r _ {t} ^ {(g)} - \mu_ {t}}{\sigma_ {t}}, & \sigma_ {t} \neq 0, \\ 0, & \sigma_ {t} = 0. \end{array} \right. \end{array}
341
+ $$
342
+
343
+ We will skip the update if $\sigma_t = 0$ .
344
+
345
+ 3. Update each parameter $\theta_{i}$ via
346
+
347
+ $$
348
+ \theta_ {i} \gets \theta_ {i} + \frac {\eta}{G} \sum_ {g = 1} ^ {G} \widehat {r} _ {t} ^ {(g)} \nabla_ {\theta_ {i}} (\log p _ {I _ {t} ^ {(g)}} ^ {(t)}) - \eta \beta \nabla_ {\theta_ {i}} \mathrm {K L} (p ^ {(t)} | p ^ {(0)}). i = 1, \ldots , K + 1,
349
+ $$
350
+
351
+ # C.3 Implicit Diversity Collapse without KL regularization
352
+
353
+ Theorem C.1 (Collapse to Deterministic Policy). Under REINFORCE or GRPO updates without KL regularization $(\beta_0 = 0)$ , given a sufficient small $\eta$ , with probability 1:
354
+
355
+ $$
356
+ \limsup_{t\to \infty}\max_{i\in [K]}p_{i}^{(t)} = 1.
357
+ $$
358
+
359
+ Thus, the policy collapses to a single-arm strategy during training.
360
+
361
+ Proof. The proof is two-fold.
362
+
363
+ Using Lemma C.3 and C.4, we can show that bad arm probability diminishes,
364
+
365
+ $$
366
+ \lim _ {t \to \infty} p _ {K + 1} ^ {(t)} = 0
367
+ $$
368
+
369
+ We will then define a property named Self-enforcing Stochastic
370
+
371
+ Definition C.2 (Self-enforcing Stochastic Policy Update Rule). We define three properties of policy update rule that will lead to diversity collapse
372
+
373
+ 1. The policy update takes the form of $\sum_{k=1}^{B} A_k \nabla \log p_i(\theta_{i_k})$ where $i_k$ is the $k$ -th sampled arm in the batch and $A_k$ is a function determined by (i) the sum of reward $\sum_{i=1}^{K} r_{i_k}$ with in the batch; (ii) the reward $r_{i_k}$ and (iii) the batch size $B$ .
374
+ 2. A policy update rule is said to be self-enforcing, if $\mathbb{E}[\theta_i^{(t + 1)} - \theta_i^{(t)}]$ is monotonous with $\theta_{i}^{(t)}$ for all $i\in [K]$ and $t$ . Further $\mathbb{E}[\theta_i^{(t + 1)} - \theta_i^{(t)}]$ is non-positive if $i\geq K + 1$ and is non-negative if $i\leq K$ .
375
+ 3. A policy update rule is said to be self-enforcing stochastic if it is self-enforcing and there exists constants $C_1, C_2 > 0$ such that for any $\epsilon > 0$ , whenever the current policy satisfies $\max_{i \in [K]} p_i^{(t)} \in [1/2K, 1 - \epsilon]$ (i.e., no single good arm dominates), for $i^* = \arg \max_{i \in [K]} p_i^{(t)}$ the conditional second moment of the parameter updates for every arm $i \in [K + 1]$ and $i \neq i^*$ satisfies:
376
+
377
+ $$
378
+ \mathbb {E} \left[ \left(\left(\theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)}\right) - \left(\theta_ {i ^ {*}} ^ {(t + 1)} - \theta_ {i ^ {*}} ^ {(t)}\right)\right) ^ {2} \mid \theta^ {(t)} \right] \geq C _ {1} \epsilon^ {2}.
379
+ $$
380
+
381
+ and
382
+
383
+ $$
384
+ | \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} | < C _ {2}
385
+ $$
386
+
387
+ Lemma C.5 shows that for any self-enforcing stochastic policy update rule, the final policy collapses into a single-arm policy.
388
+
389
+ Using Lemma C.6 and C.7, we can show that REINFORCE and GRPO are self-enforcing stochastic policy update rules when bad arm probability is lower than $1 / 2$ . The proof is then complete.
390
+
391
+ Lemma C.3 (Bad Arm Probability Diminishes Using REINFORCE). Under the REINFORCE algorithm without KL regularization $(\beta = 0)$ , $\lim_{t\to \infty}p_{K + 1}^{(t)} = 0$ almost surely.
392
+
393
+ Proof. We can first simplify the REINFORCE update rule to
394
+
395
+ $$
396
+ \theta_ {i} ^ {(t + 1)} = \theta_ {i} ^ {(t)} + \eta r _ {t} (\mathbf {1} (I _ {t} = i) - p _ {i} ^ {(t)}), \quad i = 1, \dots , K + 1.
397
+ $$
398
+
399
+ Noted that $\sum_{i}\theta_{i}^{(t)}$ will not change with $t$ , WLOG, assume
400
+
401
+ $$
402
+ \sum_ {i} \theta_ {i} ^ {(t)} = 0.
403
+ $$
404
+
405
+ Because $r_{K + 1} = 0$ , we can then assume without loss of generality, for all $t$ , $I_t \leq K$ .
406
+
407
+ This then suggests that
408
+
409
+ $$
410
+ \theta_ {K + 1} ^ {(t + 1)} = \theta_ {K + 1} ^ {(t)} - \eta p _ {K + 1} ^ {(t)}
411
+ $$
412
+
413
+ monotonically decrease.
414
+
415
+ For any $\epsilon$ , if $p_{K + 1}^{(t)} > \epsilon$ holds for infinite $t$ , then there exists $t_0$ , where $\theta_{K + 1}^t < \log \epsilon$ for any $t > t_0$ . For any $t > t_0$ , there exists $i \in [K]$ , such that $\theta_i^{(t)} > 0$ . This then suggests that
416
+
417
+ $$
418
+ p _ {K + 1} ^ {(t)} \leq \exp (\theta_ {K + 1} ^ {(t)} - \theta_ {i} ^ {(t)}) \leq \epsilon .
419
+ $$
420
+
421
+ This leads to a contradiction. The proof is then complete.
422
+
423
+ Lemma C.4 (Bad Arm Probability Diminishes Using GRPO). Under the GRPO algorithm without KL regularization $(\beta = 0), \lim_{t \to \infty} p_{K+1}^{(t)} = 0$ almost surely.
424
+
425
+ Proof. For GRPO, we can show that $\tilde{r}_t^{(g)}$ is negative iff $I_t^{(g)} = K + 1$ . Therefore, we can show that $\theta_{K+1}^{(t)}$ monotonically decreases, similar to the case in REINFORCE.
426
+
427
+ If $p_{K+1}^{(t)} > \epsilon$ holds for some $t$ , one can prove that $\theta_{K+1}^{(t)}$ will decrease by a constant depending on $\epsilon$ in expectation. Therefore, following the same line as in C.3, we can prove that $\lim_{t \to \infty} p_{K+1}^{(t)} = 0$ almost surely.
428
+
429
+ Lemma C.5 (Collapse Happens for All Self-enforcing Stochastic Policy Update Rule). Consider a policy update process that is self-enforcing stochastic (Definition C.2), then $\lim \sup_{t\to \infty}\max_{i\in [K]}p_i^{(t)} = 1$ almost surely.
430
+
431
+ Proof. We will inductively prove that for different $K$ the following induction hypotheses, for any $\epsilon, \delta > 0$ , there exists $T_{\epsilon, \delta, K} > 0$ ,
432
+
433
+ $$
434
+ \Pr \left(\max _ {t < T _ {\epsilon , \delta , K}} \max _ {i \in [ K ]} p _ {i} ^ {(t)} < 1 - \epsilon\right) < \delta .
435
+ $$
436
+
437
+ We first consider the case where $K = 2$ .
438
+
439
+ Consider the stopping time,
440
+
441
+ $$
442
+ \tau_ {\epsilon} = \arg \min _ {t} \max _ {i \in [ K ]} p _ {i} ^ {(t)} > 1 - \epsilon
443
+ $$
444
+
445
+ For any $\mathcal{I} = \{1,2\}$ , define $\Delta_{\mathcal{I}}^{t} = \max_{j\in [K]}\theta_{j}^{t} - \min_{j\in \mathcal{I}}\theta_{i}^{t}$ .
446
+
447
+ Assume $\theta_{i*}^t = \max_{j\in [K]}\theta_j^t$ , because $|\mathcal{I}|\geq 2$ , there exists $i\neq i^{*}$ , $\min_{j\in \mathcal{I}}\theta_i^t >0$ . We will show three properties of $\Delta_I^t$
448
+
449
+ First $\Delta_{\mathcal{I}}^{(t)}$ is a submartingale defined on the filtration of the distribution of $\theta^{(t)}$ because
450
+
451
+ $$
452
+ \mathbb {E} [ \Delta_ {\mathcal {I}} ^ {(t)} | \theta_ {t} ] - \Delta_ {\mathcal {I}} ^ {(t - 1)} > \mathbb {E} [ (\theta_ {i ^ {*}} ^ {t + 1} - \theta_ {i ^ {*}} ^ {t}) - (\theta_ {i} ^ {t + 1} - \theta_ {i} ^ {t}) | \theta_ {t} ] > 0.
453
+ $$
454
+
455
+ as the policy is self-enforcing.
456
+
457
+ Further $\Delta_{\mathcal{I}}^{(t)}$ has bounded growth of $2C_2$ as
458
+
459
+ $$
460
+ \begin{array}{l} | \max _ {j \in [ K ]} \theta_ {j} ^ {t + 1} - \max _ {j \in [ K ]} \theta_ {j} ^ {t} | < C _ {2}. \\ \bigl|\min_{j\in \mathcal{I}}\theta_{j}^{t + 1} - \max_{j\in \mathcal{I}}\theta_{j}^{t}\bigr| < C_{2}. \\ \end{array}
461
+ $$
462
+
463
+ Furthermore, the second-momentum of $\Delta_{\mathcal{I}}^{(t)}$ needs to increase with $t$ by a constant for any $t < \tau_{\epsilon}$ .
464
+
465
+ $$
466
+ \begin{array}{l} \mathbb {E} \left[ \left(\Delta_ {\mathcal {I}} ^ {(t + 1)}\right) ^ {2} \mid \theta_ {t} \right] \geq \left(\Delta_ {\mathcal {I}} ^ {(t)}\right) ^ {2} + \mathbb {E} \left[ \left(\Delta_ {\mathcal {I}} ^ {(t + 1)} - \Delta_ {\mathcal {I}} ^ {(t)}\right)\right) ^ {2} \mid \theta_ {t} ] \\ \geq \left(\Delta_ {I} ^ {(t)}\right) ^ {2} + C _ {1} \epsilon^ {2}. \\ \end{array}
467
+ $$
468
+
469
+ When $t < \tau_{\epsilon}$ , it holds that $\Delta_{\mathcal{I}}^{(t)} < \log \frac{2}{\epsilon}$ , otherwise we can prove that
470
+
471
+ $$
472
+ \max _ {i, j \in \{1, 2 \}} p _ {i} / p _ {j} = \exp (\Delta_ {\mathcal {I}} ^ {(t)}) > \frac {2 - 2 \epsilon}{\epsilon}. \Rightarrow \max _ {i \in \{1, 2 \}} p _ {i} > 1 - \epsilon .
473
+ $$
474
+
475
+ This is a contradiction. Further, by Martingale inequality, we have that
476
+
477
+ $$
478
+ \mathbb {E} [ \left(\Delta^ {\min \{t, \tau_ {\epsilon} \}}\right) ^ {2} ] > \mathbb {E} [ \left(\Delta^ {0}\right) ^ {2} ] + C _ {1} \epsilon^ {2} \mathbb {E} [ \min \{t, \tau_ {\epsilon} \} ]
479
+ $$
480
+
481
+ Further, as $\Delta^t$ has bounded growth, we have that
482
+
483
+ $$
484
+ \mathbb {E} \left[ \left(\Delta^ {\min \{t, \tau_ {\epsilon} \}}\right) ^ {2} \right] < (\log \frac {2}{\epsilon} + 2 C _ {2}) ^ {2}.
485
+ $$
486
+
487
+ This implies $\mathbb{E}[\min \{t,\tau_{\epsilon}\}] < \frac{(\log\frac{2}{\epsilon} + 2C_2)^2}{C_1\epsilon^2}$ for all $t$ , this implies
488
+
489
+ $$
490
+ \mathbb {E} [ \tau_ {\epsilon} ] < \frac {(\log \frac {2}{\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \epsilon^ {2}}.
491
+ $$
492
+
493
+ Further, by Markov inequality, if we choose
494
+
495
+ $$
496
+ T _ {\epsilon , \delta , 2} = \frac {(\log \frac {2}{\epsilon} + 2 C _ {2}) ^ {2}}{C _ {1} \epsilon^ {2} \delta}.
497
+ $$
498
+
499
+ then,
500
+
501
+ $$
502
+ \Pr \left(\tau_ {\epsilon} > T _ {\epsilon , \delta , 2}\right) < \frac {\mathbb {E} \left[ \tau_ {\epsilon} \right]}{T _ {\epsilon , \delta , 2}} < \delta .
503
+ $$
504
+
505
+ This concludes the proof for $K = 2$ .
506
+
507
+ Now assuming the result holds for $K - 1$ and consider the case for $K$ , First, we choose a small enough constant $C_{\delta ,\epsilon ,K,N} > 0$ , such that when $p_{K - 1}^{(0)} < C_{\delta ,\epsilon ,K,N}$ , the following two random processes are close:
508
+
509
+ - Running the algorithm for $N$ steps on the $K$ arms bandit yields $\theta_i^{(t)}, i \in [K]$
510
+ - Running the algorithm for $N$ steps on a $K - 1$ arms bandit yields $\tilde{\theta}_i^{(t)}, i \in [K - 1]$ with $\tilde{\theta}_i^{(0)} = \theta_i^{(0)}, i < K - 1$ and $\tilde{\theta}_{K - 1}^{(0)} = \theta_K(0)$
511
+
512
+ and there exists a joint measure on $\theta$ and $\tilde{\theta}$ such that
513
+
514
+ $$
515
+ \forall i \in [ K - 2 ], t < N, \Pr (| p _ {i} ^ {t} - \tilde {p} _ {i} ^ {t} | > \epsilon / 2) < \delta / 6.
516
+ $$
517
+
518
+ $$
519
+ \operatorname * {P r} (| p _ {K} ^ {t} - \tilde {p} _ {K - 1} ^ {t} | > \epsilon / 2) < \delta / 6.
520
+ $$
521
+
522
+ $$
523
+ \Pr \left(\left| p _ {K} ^ {t} - p _ {K} ^ {0} \right| > \epsilon / 2\right) < \delta / 6.
524
+ $$
525
+
526
+ This joint measure is constructed by choosing the corresponding arm for two process at each sampling step as long as the sampled arm is not $K$ and uses the uniform convergence on $\nabla \log_{\theta} p_i$ . Now following the same argument at $K = 2$ , we can show that there exists $\tilde{T}_{\epsilon, \delta, K}$ such that
527
+
528
+ $$
529
+ \operatorname * {P r} (\exists t < \tilde {T} _ {\epsilon , \delta , K}, \min _ {t \in [ K ]} p _ {t} < C _ {\delta , \epsilon , K, T _ {\epsilon / 2, \delta / 2, K - 1}}) > 1 - \delta / 2.
530
+ $$
531
+
532
+ Then we can invoke the induction hypothesis and uses the coupling shown above to show that if we choose $T_{\epsilon, \delta, K} = \tilde{T}_{\epsilon, \delta, K} + T_{\epsilon/2, \delta/2, K-1}$ , then there exists a time step that one arm has probability higher than $1 - \epsilon$ with probability at least $1 - \delta$ .
533
+
534
+ ![](images/ca1b539d95c5fb460f2be006fa7134f3f1ba977fdaf654bf1420e28cac93d5b2.jpg)
535
+
536
+ Lemma C.6. The REINFORCE algorithm without KL regularization ( $\beta = 0$ ) is self-enforcing stochastic (Definition C.2) once $p_{K+1}^{(t)} < 1/2$ .
537
+
538
+ Proof. The REINFORCE algorithm is self-enforcing because
539
+
540
+ $$
541
+ \mathbb {E} [ \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} ] = \eta p _ {i} (r _ {i} - \sum_ {j \in [ K + 1 ]} p _ {j} r _ {j}).
542
+ $$
543
+
544
+ Further,
545
+
546
+ $$
547
+ | \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} | \leq 1
548
+ $$
549
+
550
+ and if we consider the distribution of $\Delta_{i,i^*,t} = \frac{\left(\theta_i^{(t + 1)} - \theta_i^{(t)}\right) - \left(\theta_{i^*}^{(t + 1)} - \theta_{i^*}^{(t)}\right)}{\eta}$ , it holds that
551
+
552
+ $$
553
+ \Delta_ {i, i ^ {*}, t} = r _ {I _ {t}} \left(\mathbf {1} (i = I _ {t}) - \mathbf {1} (i ^ {*} = I _ {t}) - p _ {i} + p _ {i ^ {*}}\right)
554
+ $$
555
+
556
+ $$
557
+ \Pr \left(\Delta_ {i, i ^ {*}, t} = - 1 - p _ {i} + p _ {i} ^ {*}\right) \geq \Pr \left(I _ {t} = i ^ {*}\right) = p _ {i ^ {*}}
558
+ $$
559
+
560
+ Therefore
561
+
562
+ $$
563
+ \begin{array}{l} \mathbb {E} \left[ \Delta_ {i, i ^ {*}, t} ^ {2} \right] \geq p _ {i ^ {*}} \left(- 1 - p _ {i} + p _ {i} ^ {*}\right) ^ {2} \\ \geq p _ {i ^ {*}} (1 - p _ {i ^ {*}}) ^ {2} \geq \frac {\epsilon^ {2}}{2 K}. \\ \end{array}
564
+ $$
565
+
566
+ This then concludes the proof with $C_1 = \eta / 2K$ and $C_2 = \eta$ .
567
+
568
+ Lemma C.7. The GRPO algorithm without KL regularization ( $\beta = 0$ ) is self-enforcing stochastic (Definition C.2) once $p_{K+1}^{(t)} < 1/2$ .
569
+
570
+ Proof. The GRPO algorithm is self-enforcing because
571
+
572
+ $$
573
+ \mathbb {E} [ \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} ] = \eta \mathbb {E} [ \tilde {r} _ {t} ^ {(g)} (\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) ] = \eta \mathbb {E} [ \tilde {r} _ {t} ^ {(g)} \mathbf {1} (I _ {t} ^ {(g)} = i) ] = \eta \mathbb {E} _ {\mu_ {t}} [ \mathbb {E} [ \tilde {r} _ {t} ^ {(g)} \mathbf {1} (I _ {t} ^ {(g)} = i) | \mu_ {t} ] ].
574
+ $$
575
+
576
+ Noted that $\mathbb{E}[\tilde{r}_t^{(g)}\mathbf{1}(I_t^{(g)} = i)|\mu_t]$ is monotonous with $p_i$ , hence monotonous with $\theta_{i}$ .
577
+
578
+ Further
579
+
580
+ $$
581
+ \begin{array}{l} | \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} | \leq \eta \max _ {g} | \tilde {r} _ {t} ^ {(g)} (\mathbf {1} (I _ {t} ^ {(g)} = i) - p _ {i} ^ {(t)}) | \\ \leq \eta \max _ {g} | \tilde {r} _ {t} ^ {(g)} | \leq \eta \sqrt {G}. \\ \end{array}
582
+ $$
583
+
584
+ Now we only need to lower bound the second momentum of
585
+
586
+ $$
587
+ \Delta_ {i, i ^ {*}, t} = \frac {\left(\theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)}\right) - \left(\theta_ {i ^ {*}} ^ {(t + 1)} - \theta_ {i ^ {*}} ^ {(t)}\right)}{\eta}
588
+ $$
589
+
590
+ Noted that
591
+
592
+ $$
593
+ \theta_ {i} ^ {(t + 1)} - \theta_ {i} ^ {(t)} = \frac {\eta}{G} \sum_ {g = 1} ^ {G} \tilde {r} _ {t} ^ {(g)} \mathbf {1} (I _ {t} ^ {(g)} = i).
594
+ $$
595
+
596
+ It holds that
597
+
598
+ $$
599
+ \sigma_ {t} = \sqrt {\frac {1}{G} \sum_ {g} (r _ {t} ^ {g} - \mu) ^ {2}} = \sqrt {\frac {1}{G} \sum_ {g} r _ {t} ^ {g} - 2 \mu r _ {t} ^ {g} + \mu^ {2}} = \sqrt {\mu - \mu^ {2}}.
600
+ $$
601
+
602
+ Therefore when $r_t^{(g)} > 0$
603
+
604
+ $$
605
+ \tilde {r} _ {t} ^ {(g)} = \frac {r _ {t} ^ {(g)} - \mu_ {t}}{\sigma_ {t}} = \frac {1 - \mu_ {t}}{\sigma_ {t}} = \sqrt {\frac {1 - \mu_ {t}}{\mu_ {t}}} \geq \sqrt {\frac {1}{G - 1}}.
606
+ $$
607
+
608
+ Because all $\tilde{r}_t^{(g)}$ are the same when $r_t^{(g)} > 0$ , it holds that when $i \in [K]$
609
+
610
+ $$
611
+ \begin{array}{l} \Delta_ {i, i ^ {*}, t} ^ {2} = \frac {1}{G} \frac {1 - \mu_ {t}}{\mu_ {t}} \left(\sum_ {g = 1} ^ {G} {\bf 1} (I _ {t} ^ {(g)} = i) - {\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\right) ^ {2} \\ \geq \frac {1}{G (G - 1)} \left(\sum_ {g = 1} ^ {G} \mathbf {1} \left(I _ {t} ^ {(g)} = i\right) - \mathbf {1} \left(I _ {t} ^ {(g)} = i ^ {*}\right)\right) ^ {2}. \\ \end{array}
612
+ $$
613
+
614
+ This then implies
615
+
616
+ $$
617
+ \mathbb {E} [ \Delta_ {i, i ^ {*}, t} ^ {2} ] \geq \frac {1}{G (G - 1)} \mathbb {E} \left[ \left(\sum_ {g = 1} ^ {G} {\bf 1} (I _ {t} ^ {(g)} = i) - {\bf 1} (I _ {t} ^ {(g)} = i ^ {*})\right) ^ {2} \Big | \mu_ {t} \neq 1, 0 \right]
618
+ $$
619
+
620
+ One can without loss of generality assume $I_{t}^{(G)} = K + 1$ and show that
621
+
622
+ $$
623
+ \begin{array}{l} \mathbb {E} \left[ \Delta_ {i, i ^ {*}, t} ^ {2} \right] \geq \frac {1}{G (G - 1)} \mathbb {E} \left[ \left(\sum_ {g = 1} ^ {G - 1} \mathbf {1} \left(I _ {t} ^ {(g)} = i\right) - \mathbf {1} \left(I _ {t} ^ {(g)} = i ^ {*}\right)\right) ^ {2} \right] \\ \geq \frac {1}{G} \mathbb {E} \left[ \left(\mathbf {1} \left(I _ {t} ^ {(1)} = i\right) - \mathbf {1} \left(I _ {t} ^ {(1)} = i ^ {*}\right)\right) ^ {2} \right] = \frac {p _ {i} + p _ {i} ^ {*}}{G} \geq \frac {1}{2 K G}. \\ \end{array}
624
+ $$
625
+
626
+ When $i \neq K$ , noted that $\left(\theta_{i}^{(t+1)} - \theta_{i}^{(t)}\right) - \left(\theta_{i^{*}}^{(t+1)} - \theta_{i^{*}}^{(t)}\right) > \left(\theta_{i}^{(t+1)} - \theta_{i}^{(t)}\right) > 0$ . Therefore, a similar bound can show that $\mathbb{E}[\Delta_{i,i^{*},t}^{2}] > \frac{1}{2KG}$ . This then concludes the proof with $C_{1} = \eta / 2KG$ and $C_{2} = \sqrt{G}$ .
627
+
628
+
629
+
630
+ # C.4 Diversity Never Improves with KL regularization
631
+
632
+ Theorem C.8 (Diversity Preservation under KL Regularization). With $p_0$ as the initial policy and KL regularization hyperparameter $\beta > 0$ , if the REINFORCE process converges to policy $p^*$ . Then, $p^*$ satisfies:
633
+
634
+ $$
635
+ \frac {p ^ {*} (i)}{\sum_ {j = 1} ^ {K} p ^ {*} (j)} = \frac {p _ {0} (i)}{\sum_ {j = 1} ^ {K} p _ {0} (j)} \quad \forall i \in \{1, \dots , K \}.
636
+ $$
637
+
638
+ Consequently, the distribution over the optimal arms under $p^*$ matches the initial distribution $p_0$ restricted to these arms and renormalized.
639
+
640
+ Proof. Using policy gradient theorem, we know that the converged policy $p^*$ and corresponding parameter $\theta^*$ satisfy that,
641
+
642
+ $$
643
+ \nabla_ {\theta} \left[ \sum_ {i = 1} ^ {K + 1} r _ {i} p _ {i} + \beta \mathrm {K L} \left(p | p ^ {0}\right) \right] \Bigg | _ {\theta = \theta^ {*}} = 0
644
+ $$
645
+
646
+ This then suggests that for any $k$
647
+
648
+ $$
649
+ r _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \beta \sum_ {i = 1} ^ {K + 1} \nabla_ {\theta_ {k}} [ p _ {i} \log p _ {i} - p _ {i} \log p _ {i} ^ {0} ] = 0
650
+ $$
651
+
652
+ This is equivalent to
653
+
654
+ $$
655
+ r _ {k} p _ {k} ^ {*} - p _ {k} ^ {*} \sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \beta \sum_ {i = 1} ^ {K + 1} (\mathbf {1} (i = k) - p _ {k} ^ {*}) p _ {i} ^ {*} (\log p _ {i} ^ {*} + 1 - \log p _ {i} ^ {0}) = 0
656
+ $$
657
+
658
+ Simplifying
659
+
660
+ $$
661
+ r _ {k} + \beta (\log p _ {k} ^ {*} + 1 - \log p _ {0}) = \sum_ {i = 1} ^ {K + 1} r _ {i} ^ {*} p _ {i} ^ {*} + \beta \sum_ {i = 1} ^ {K + 1} p _ {i} ^ {*} (\log p _ {i} ^ {*} + 1 - \log p _ {i} ^ {0})
662
+ $$
663
+
664
+ For all $k \in [K]$ , we know that $r_k$ is equivalent, therefore, $\frac{p_k^*(i)}{p_0^*(i)}$ is a constant for $k \in [K]$ , concluding our proof.
665
+
666
+ # C.5 Technical Lemma
667
+
668
+ Lemma C.9. For $x\in \mathbb{R}$ $|x| < C$ , it holds that
669
+
670
+ $$
671
+ \exp (x) > 1 + x + A _ {C} x ^ {2}
672
+ $$
673
+
674
+ here $A_{C} = \frac{\exp(-C) + C - 1}{C^{2}}$
675
+
676
+ Proof. Define $g(x) = \frac{\exp(x) - 1 - x}{x^2}$ , this function monotonically increases when $x < 0$ .
677
+
678
+ # D Open-Thoughts Evaluation
679
+
680
+ We finetune Qwen2.5-7B-Instruct over OpenThoughts-114k for 5 epochs using BF16 and AdamW and hyperparameters lr=1e-5, bs=128, warmup=150 steps. We sample 40 reasoning traces with temperature set to 0.7 for each of the 30 problems in AIME24. Then we evaluate the following quantities.
681
+
682
+ ![](images/73d34b55c39b755f68e6950c8eebdf29d21cb222617fda1fe97a55a0270a9208.jpg)
683
+ Competition Math (AIME24)
684
+
685
+ ![](images/5aa59d6e2aba258b1051ce25b0904b17cd2e7490d5e852f24f61c5460a902111.jpg)
686
+
687
+ ![](images/b920292dcecb0dc0a1bc2d82f9a4af452fedf7363369eff449048c35d2f2d1b2.jpg)
688
+ Figure 10: Pass@K Evaluated on AIME24 over OpenThoughts-114K SFT checkpoints. We plot the expected Pass@K ± SD. Note that improvements in Pass@K slows down while Pass@1 improves at a constant rate. Furthermore, the confidence interval of Pass@1 widens, meaning the variance increases during SFT.
689
+
690
+ ![](images/5667e64bdc969d25573bf150b13c2046ca2168b091045a68f3bfe49c56c2ea9e.jpg)
691
+ Figure 11: Histogram of Pass@1 over AIME24. Variance of Pass@1 increases over finetuning on OpenThoughts-114K. We note that since AIME24 only has 30 questions, the density plot may not be completely reliable.
692
+
693
+ ![](images/ce0b365e2d0cfdbc6033585c43605d8fc9a10bbe002ac2b4022da555660ffbd8.jpg)
694
+
695
+ ![](images/a68455b7785772599da90654e44c982cadd4ddd78f610dab8af5816a70e0c43c.jpg)
696
+
697
+ ![](images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg)
698
+
699
+ ![](images/3eb53d8c924bb92658ec5635c7b2ae63fb3273756bce235deb3ec5596f40bd57.jpg)
700
+
701
+ ![](images/bb1d37404a819e2ad64f45cefe623549253879023f3bf030ffe2894bde9a30af.jpg)
702
+ Figure 12: We plot the average number of unique answers sampled over the total number samples i.e. $\left|\left\{y_{i}\right\}_{i=1}^{n}\right| / n$ . Model samples less diverse number of answers as SFT progresses.
703
+
704
+ # E Interpolation Coefficients
705
+
706
+ ![](images/408e81cdfc67395009c30c39aad9c4a31fa71dc028099dd11d2d322e044cc302.jpg)
707
+
708
+ ![](images/5cc7d3e6e8dc1b3da5bcfeaac5e13c685f56aae0ab567a53aa222a84743494fd.jpg)
709
+ WiSE-Step672 on MATH500
710
+
711
+ ![](images/b592a7e7ccb7f34cab0c0fe80f8b46299a170f5e015417c6a260678ca37e45d5.jpg)
712
+ WiSE-Step672 on MATH500
713
+ WiSE-Step672 on MATH500
714
+
715
+ ![](images/e15bf9634190a320923c26e9e93036be5f32c027f6a9256280b04a259db1412f.jpg)
716
+ WiSE-Step672 on MATH500
717
+
718
+ ![](images/a8fef11dae9deb43d79b665e01086d9767e0568d82dec02ac4133daecf6e053a.jpg)
719
+ WiSE-Step896 on MATH500
720
+
721
+ ![](images/b5a0036c7749a83fbea586e2ad026ccfe84a4e95cb087bdcfd3c7171f8cbc3f2.jpg)
722
+ WiSE-Step896 on MATH500
723
+
724
+ ![](images/e0ef05e879f2cabc6cc375cd5dfc6c14399c3e8c637b35a2bdd2dcd0382c64de.jpg)
725
+ WiSE-Step896 on MATH500
726
+
727
+ ![](images/cd383a929ba7e9a9865537886d40e7be10298f21f2a4eee59bb64f1c03895e08.jpg)
728
+ WiSE-Step896 on MATH500
729
+
730
+ ![](images/3d13f320227abed29d629f8ace04823e9059603f82a24fb7c3c0d25ac51e5eb4.jpg)
731
+ WiSE-Step1120 on MATH500
732
+ Figure 13: Pass@1 versus Pass@K of WiSEFT of Qwen-2.5-0.5B trained and evaluated on MATH500. We interpolate between model $\pmb{w}_0$ at Step 112 with $\pmb{w}_t$ for $t\in [672,896,1120]$ as $\delta \pmb{w}_0 + (1 - \delta)\pmb{w}_t$ where $\delta \in [0.1,0.9]$ .
733
+
734
+ ![](images/d05652d4c9f3cc0229b825169a87f0a25576d63a99afa799f8969368cab3b996.jpg)
735
+ WiSE-Step1120 on MATH500
736
+
737
+ ![](images/a79e505ca270a42946351e05193fde44a40c51342de249abe1125406caef19ef.jpg)
738
+ WiSE-Step1120 on MATH500
739
+
740
+ ![](images/4970e2bbeb01a58d65034f7eee2d971620f82a124a24f05a15a0868fd8060784.jpg)
741
+ WiSE-Step1120 on MATH500
742
+
743
+ ![](images/6415bf26abc61b3f3704f1e93f285ad0c1d640bbb79a1978e63f250e5f01c217.jpg)
744
+
745
+ ![](images/cee9d9f66f1ba05e20958f6280cd378d5414554e40cbe63513148f4d200be612.jpg)
746
+
747
+ ![](images/7e11ff93e0b785d9953e3d89bcd12390815907a6dc2f3d35716009e51d01b2a9.jpg)
748
+
749
+ ![](images/e2a337f3f3b316e21bd15dad207ede3c32034de3208f9ee58d3e8e8a316f2a94.jpg)
750
+
751
+ ![](images/c5f007038e4a8539395358cfc078786e75a2500ccebd39ea8dcc0d9f3edd38c9.jpg)
752
+
753
+ ![](images/8ab320b8b9b148e7007304b25bcea6941eee571cdcfb9f2bcb270cc915f41d2e.jpg)
754
+
755
+ ![](images/534a1915e7237bbe8eb59cd3e2becaa163ccc90f8b40a09cb87b02ff2f834f83.jpg)
756
+
757
+ ![](images/88a2aea4db6f0ceb07a3f515c45521b42aef420591197fc4ac31c9e750a6eb8c.jpg)
758
+
759
+ ![](images/d98f4a0d11c754910a2b645db67fdc99936a484cb60dcd83a0344a2437fc161b.jpg)
760
+
761
+ ![](images/5186f02cf14107fb5f45cd06a48b8a84b8d148a57fc1f02fb24e57f2de6f0b2c.jpg)
762
+
763
+ ![](images/6879e45cb82d6e4e3cb71ac588c9076b39549c4f56feda977179787fe237fef8.jpg)
764
+
765
+ ![](images/7c463a6b643c401de38db5cb6513d86e528960fd75d924839fc2a365a4fe7d82.jpg)
766
+
767
+ ![](images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg)
768
+
769
+ ![](images/97b56b7e63b48c137ae6e876983d6e3b63f26d408263648812f7955049209453.jpg)
770
+
771
+ ![](images/6583cb61c58420a22e2b481d3ecd6ab5badcb2086aeae20b3945609becb32131.jpg)
772
+
773
+ ![](images/76cd3796b40caac6ff8a02aec6cb5d704946a539355c961c1da0cde04fcc0f3b.jpg)
774
+
775
+ ![](images/9b0b5f9803b94a0b5f4b2b333482b5acab14a7b4396c642f98d4c63f944bbd16.jpg)
776
+ Figure 14: Pass@1 versus Pass@K of WiSEFT of Gemma-2-2B trained and evaluated on GSM8K. We interpolate between model $\pmb{w}_0$ at Step 171 with $\pmb{w}_t$ for $t \in [342, 684, 1026, 1368, 1710]$ as $\delta \pmb{w}_0 + (1 - \delta) \pmb{w}_t$ where $\delta \in [0.05, 0.9]$ .
777
+
778
+ ![](images/4c43efb60dc7fd63fa7e0bf6fe8153abec532160cb2af6bb071e080da392c5f2.jpg)
779
+
780
+ ![](images/ad7003e3f0b25d46193c9fffc98906a772a124775a56cd8b98a55940824b50a2.jpg)
781
+
782
+ ![](images/b1061168966769f0a706a6cdefd0ca7df762aaf6f5cf2e53465bfdddcf6d261d.jpg)
783
+
784
+ # F Measuring Diversity of Traces
785
+
786
+ We measure the diversity of the 100 sampled traces of Gemma-2-2B across GSM8k test. We measure diversity in terms of 3 different measures.
787
+
788
+ Output Diversity The cardinality or number of unique answers in the set of all model outputs $\left|\{\hat{y}_1,\hat{y}_2,\dots ,\hat{y}_n\}\right|$ over the total number of traces.
789
+
790
+ Operation Diversity In GSM8k, each intermediate step consists of basic arithmetic operations, e.g. $5 + 3 = 8$ . We may simply map each of the traces to the sequence of arithmetic operations the model steps through, i.e. $r_i \rightarrow [o_1, o_2, \ldots, o_t]$ . This mapping is extracted by code. Then, given this set, we measure unique sequence of operations over the number of total traces.
791
+
792
+ Semantic Diversity We measure the similarity of trace using cosine similarities between the text-embeddings (Bilmes, 2022; Yu et al., 2023).
793
+
794
+ # F.1 Does temperature increase diversity?
795
+
796
+ Temperature does increase diversity, but it also increases the chances of sampling outlier answers.
797
+
798
+ ![](images/e5ccfdef5708eae2cdd50fb2c0053f33997475088945e324b1799671240c70ec.jpg)
799
+
800
+ ![](images/6505c814dcb35aa9757d8be7050bd89319b4a0aae6db8d965dfe1bf81985e105.jpg)
801
+ Diversity Across SFT [T=1.0]
802
+
803
+ ![](images/9edcca763df460a84257bc718bc153f2f8f5a4c6a79bbf78ad4b859738b6d86e.jpg)
804
+ Diversity Across SFT [T=0.8]
805
+
806
+ ![](images/6357c2ac829597a5cabbd3f6f2c13751aeaac97b2934f7e1144f19fda33b7246.jpg)
807
+
808
+ ![](images/7ad9bd5e57c1c23dbd6a2fe73d568d72272de9cf3ff807b8a8da06b7e3ec8421.jpg)
809
+
810
+ ![](images/c03ad321d119e163d7d652bd75fdd1be669582b2e192edd3f0c824e236fa1553.jpg)
811
+ Diversity Across SFT [T=1.5]
812
+
813
+ ![](images/946d993abc5cb2f9296711897d897572593e58f08517be74488fe99906faa457.jpg)
814
+
815
+ ![](images/4cca06e5c218704714c26e86a6ac995e438bcee795019154e74edbfe68091d0f.jpg)
816
+
817
+ ![](images/6c18e77dd874f11856943ca580d37e31d9687c5a7be4bc53d7a94208e6f4079a.jpg)
818
+ Figure 15: Diversity of traces sampled with Temperature $\in$ {0.8, 1.0, 1.5} for Gemma-2-2B SFT checkpoints on GSM8k
819
+
820
+ ![](images/445fd20e4c1373f0114643f0a40149f387735f7d61bc9ab734ce91f06f149ec6.jpg)
821
+
822
+ ![](images/c20602e164d649b2cede3ffcbe6d6d45d34e0fbfdb8701a97c0a3495fe22a13a.jpg)
823
+
824
+ ![](images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg)
825
+
826
+ # F.2 How well do token-level diverse decoding strategies compare with optimal strategy with oracle?
827
+
828
+ Hyperparameter Tuning Details We grid search for optimal temperature for all baselines over $T = [0.8, 1.0, 1.2, 1.5, 1.8]$ . For nucleus, we choose the best cutoff threshold between $[0.8, 0.9, 0.95]$ . For min-p, we choose the best probability threshold between $[0.01, 0.05, 0.1]$ . For tokenwise top-k, we choose best k between $[12, 25, 50]$ .
829
+
830
+ <table><tr><td>Decoding Strategy</td><td>Pass@2</td><td>Pass@4</td><td>Pass@8</td></tr><tr><td>Naive</td><td>0.565</td><td>0.666</td><td>0.760</td></tr><tr><td>Nucleus</td><td>0.566</td><td>0.668</td><td>0.757</td></tr><tr><td>Min-p</td><td>0.566</td><td>0.668</td><td>0.760</td></tr><tr><td>Top-k</td><td>0.563</td><td>0.666</td><td>0.756</td></tr><tr><td>Top-k w/Oracle</td><td>0.760</td><td>0.832</td><td>0.901</td></tr></table>
831
+
832
+ Table 2: Best Pass@k of Sampling Strategies for Qwen-2.5-0.5B over SFT checkpoints
833
+
834
+ <table><tr><td>Decoding Strategy</td><td>Pass@2</td><td>Pass@4</td><td>Pass@8</td></tr><tr><td>Naive</td><td>0.547</td><td>0.648</td><td>0.737</td></tr><tr><td>Nucleus</td><td>0.528</td><td>0.617</td><td>0.694</td></tr><tr><td>Min-p</td><td>0.550</td><td>0.655</td><td>0.744</td></tr><tr><td>Top-k</td><td>0.538</td><td>0.646</td><td>0.738</td></tr><tr><td>Top-k w/Oracle</td><td>0.730</td><td>0.814</td><td>0.878</td></tr></table>
835
+
836
+ Table 3: Pass@k of Sampling Strategies for Qwen-2.5-0.5B at Last SFT Checkpoint
837
+
838
+ ![](images/c6ea5a13b67d1c489b5482fd1dc7e9c590db2bda35acfbb90c820e3cee9fbbba.jpg)
839
+ Figure 16: Pass@K over different Min-P thresholds $\gamma \in [0,0.3]$ and temperatures $T\in [1,1.6]$ for Gemma2-2B finetuned on GSM8K. Generally, no min-p threshold paired with high temperature $\mathrm{T} = 1.6$ (in light green) is able to surpass the Pass@1 of $\mathrm{T} = 1$ with best min-p threshold (in orange). In other words, unlike WiSE-FT which increases both Pass@1 and Pass@K, Pass@1 tends to still decrease for the diverse decoding strategy of applying min-p with high temperature.
840
+
841
+ ![](images/a63859f2d49730f3fc594d38e64daee690ae73ff3dfdfd4802371d22024209fe.jpg)
842
+
843
+ ![](images/e2ab7f724e76325f770f6dd199c9afc3e5801a548582d0e9ac5362497292c00e.jpg)
844
+
845
+ ![](images/528a40964d3bfda6c2aef41d322a1d87b86f248ba95412cafdaaf3724d5c8979.jpg)
846
+
847
+ ![](images/510bcdc6999edfdaf5ad51d42c4d837bb3ee069afcc24f8a26bd6ee5bc71c4e7.jpg)
848
+
849
+ ![](images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg)
850
+ Figure 17: Pass@k of Gemma-2-2B GSM8k Naive Sampling with Replacement
851
+
852
+ ![](images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg)
853
+ Figure 18: Pass@k of Gemma-2-2B GSM8k Oracle Top K Sampling
854
+
855
+ ![](images/746a4c2b336ab6d80695d60f5f6112ea89a75779153cb6d50e8f7c5219e462ab.jpg)
856
+
857
+ ![](images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg)
858
+
859
+ ![](images/d3bd9e86127b96e78ed2a923eb025ecabea92613c76f789c22579bde4d166df7.jpg)
860
+
861
+ ![](images/a3080dc079b0901c51c02399a1907b2e2c96b0a922a54f5a446ed2ca4860e645.jpg)
862
+
863
+ ![](images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg)
864
+
865
+ ![](images/fc1e852587bf0d908433faf87683b609d90596c02a702dd55504a960a15c609c.jpg)
866
+
867
+ ![](images/9e6d23e6964203140d9a54ab2c01c778ff9fc0a06702b556276de27f651dd276.jpg)
868
+
869
+ ![](images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg)
870
+
871
+ ![](images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg)
872
+ Figure 19: Pass@k of Qwen-2.5-0.5B GSM8k Naive Sampling with Replacement
873
+
874
+ # F.3 Diversity Comparison Between SFT and WiSE-FT
875
+
876
+ ![](images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg)
877
+
878
+ ![](images/8a3d27f0b455df3c98d77c12e7837d76fbf9b29395a7db957519d3cc75142e50.jpg)
879
+
880
+ ![](images/db799a546fac2266a4fb6e884a368cf79f696cc523c048ac9f7712be215438ae.jpg)
881
+
882
+ ![](images/f3af3a04795cd7ec6e134e72ceb0fd20ab57c4f2f3d29acda347037690cecaa8.jpg)
883
+
884
+ ![](images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg)
885
+ Figure 20: Pass@k of Qwen-2.5-0.5B GSM8k Oracle Top K Sampling
886
+
887
+ ![](images/ccfab5cfd7bae8ef782e079cda3aeda4adbe6f786122fc77799b75ec40133ee2.jpg)
888
+
889
+ ![](images/5c78239b846481b7bf9c97f9b381676efebb712f06044215ffc0828a1d3181dd.jpg)
890
+ Figure 21: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.0.
891
+
892
+ ![](images/4546a62db2a2c93d5719df6b62375b8b3d804a1758924ed0422076a662d1a358.jpg)
893
+ Figure 22: Operation, Semantic, and Answer Diversity of Gemma-2-2B checkpoints of SFT over GSM8K versus the corresponding WiSE-FT variants (with the earliest checkpoint). We decode with temperature set to 1.6.
894
+
895
+ # G Best of K Evaluation
896
+
897
+ ![](images/9e3fdc034bc19b8587b4804417cfbed97b363f7d3658230fe4584772068195a9.jpg)
898
+
899
+ ![](images/b44675a6666bda0b73ef9e200a9b1d1022ee221c3d4530367974a9e78cee2014.jpg)
900
+
901
+ ![](images/aeb10270cd0a472e542fdfa57315bbbb7b5ba41555291cba501c05e400478d37.jpg)
902
+
903
+ ![](images/b2d2e4fd5b348100cefd256746110d2df91d3244a48b747cbf5561ad703f3c6e.jpg)
904
+
905
+ ![](images/ec5aabdb8afc02d520db433b2ed880cfb460293ab4f514cf1cd416a0e05b8f01.jpg)
906
+ Figure 23: Best@K performance on MATH500 with ORM verifier, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for $K = 2,4,8,32$
907
+
908
+ ![](images/814e37e4a8cfad542f088af46bbd948f458f6686fdea387a041191635dae743d.jpg)
909
+
910
+ ![](images/dcc8297332fec4d4f444504910cf3c945a0afaf6ec9a11be41dfb59a185c4df3.jpg)
911
+
912
+ ![](images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg)
913
+
914
+ ![](images/568239dfe0d053038302fc7fbde2c6756bc3312c7e80cec28090883679638505.jpg)
915
+
916
+ ![](images/860ad658417f481695762b3985aa86cc6c8648a27a1974383b58f81738a321ca.jpg)
917
+
918
+ ![](images/b0c17f52c3d8d9f710084b2e71cd83327cb690eb5a0d9b91600ddb7e86032d53.jpg)
919
+
920
+ ![](images/81519a54b17fc6c666684e274696d0ac2127e9fe4ff88d0d9326a87c6149a208.jpg)
921
+
922
+ ![](images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg)
923
+ Figure 24: Best@K performance on MATH500 with ORM (Top) and Majority Vote (Bottom) for early, middle, and late SFT checkpoints and WiSE-FT counterparts, showing Qwen-2.5-0.5B's scaling across K values.
924
+
925
+ # H Diversity Collapse and WiSE-FT Results for the Coding Task
926
+
927
+ To test whether coding tasks exhibit the same diversity collapse observed in reasoning benchmarks, we fine-tuned the Qwen2.5-coder-0.5B model for 10 epochs on the Magicoder-Evol-Instruct-110K dataset, following the Stage 2 SFT recipe from OpenCoder LLM. We then applied WiSE-FT by interpolating the weights of the second SFT checkpoint with the initial model using interpolation ratio 0.5. Both the original SFT checkpoints and their WiSE-FT counterparts were evaluated on HumanEval for pass@k.
928
+
929
+ ![](images/60a8b9a9af69b7127d68729341fe2361371efa2b3e4bacb8eec3f42fc9ec84a0.jpg)
930
+
931
+ ![](images/9dc1172db134cef8ae1e854cc6f68ebfe8e6c96aaeacb27c5b0c870ad9752a67.jpg)
932
+
933
+ ![](images/cafeaf2d7bb4404026a1ba3699040624b79be50df113c5458aaa97d41afc6c76.jpg)
934
+
935
+ ![](images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg)
936
+
937
+ ![](images/d837c799a3424c44d929865ee8c5c17f2da827b7e87b44e7380e31803ce3e9b3.jpg)
938
+ Figure 25: Best@K performance on MATH500 with majority voting, comparing different SFT and WiSE-FT checkpoints of Qwen-2.5-0.5B for $K = 2, 4, 8, 32$
939
+
940
+ We found that, much like in mathematical reasoning tasks, SFT on coding data indeed suffers from diversity collapse: although pass@1 steadily improves over epochs, pass@k begins to deteriorate. And WiSE-FT still improves performance and mitigates the diversity collapse.
941
+
942
+ ![](images/cb3ca09225ec7f2708663a63adf1bb7d1807c672a937ff5625e5894c7f467191.jpg)
943
+ HumanEval - Pass@k Across SFT Checkpoints
944
+
945
+ ![](images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg)
946
+ Figure 26: Pass@K performance of SFT checkpoints on HumanEval (temperature = 1.0).
947
+
948
+ ![](images/dc80c75a67d472177dcc63f0074f37ec267e332b7e7317accb544868294cd22e.jpg)
949
+
950
+ ![](images/aa8a4b4c00fe197e0575e11b2fecb88604a4562cbe6fb459230d83e7172326d4.jpg)
951
+ HumanEval - Pass@k Across Checkpoints (SFT vs WiSE-FT)
952
+
953
+ ![](images/6f01ccaf5c53f26ffda8ffe2a095592c21beb991d051e6b3ecc692744b009663.jpg)
954
+ Figure 27: Comparison of pass@K for SFT checkpoints and their WiSE-FT counterparts at $k = 1$ , 16, 64.
955
+
956
+ ![](images/adce33339af950e6097d8809ac7898c79d536d03de13d3b42c8e47246adb4ef3.jpg)
957
+
958
+ ![](images/b5e95e0338cedd9b1f43533a5dbb16642aa6e174015a5f1dd91e1e8a66e77aa3.jpg)
959
+ HumanEval - Last Checkpoint (1700) Comparison: SFT vs WiSE-FT
960
+ Figure 28: Pass@K performance of the final SFT checkpoint versus its WiSE-FT variant.
data/2025/2504_10xxx/2504.10478/images/010ca52fb4d6d2f4a74f158e3d4b742d0d60ff44e3344acc665ea390dcd0d87d.jpg ADDED

Git LFS Details

  • SHA256: b5ab4b3a7da701a35ab2875f0651a90acf1d8b8cded2004eb550b46eab0c477c
  • Pointer size: 130 Bytes
  • Size of remote file: 17.9 kB
data/2025/2504_10xxx/2504.10478/images/010df19603f6a8b8aed4c150ff7104474ebf1313d06df35b56b879fe8d0142f9.jpg ADDED

Git LFS Details

  • SHA256: d3e3ea700e0b550c15c523e78d53b529970f2f3ceb4c81467c1e8aa6aa5b2d91
  • Pointer size: 130 Bytes
  • Size of remote file: 11.5 kB
data/2025/2504_10xxx/2504.10478/images/025400c251313c19885eeda9c411127b9f8264a71c6d8cea574e0029094f187b.jpg ADDED

Git LFS Details

  • SHA256: f32714e1832878ce1b47e6cfda07eb905366672d387539bd7442010afa377fd1
  • Pointer size: 129 Bytes
  • Size of remote file: 4.23 kB
data/2025/2504_10xxx/2504.10478/images/02544de3a14cfeaa6aea139b5b0ab1cbd6ec4f541559f7e7213fbf4c3e2553a9.jpg ADDED

Git LFS Details

  • SHA256: 70942cd36049449ae3847c7e1cddbef73bf6c28c63cc772a86142ccee0db6068
  • Pointer size: 129 Bytes
  • Size of remote file: 4.83 kB
data/2025/2504_10xxx/2504.10478/images/04a8123274c3fe4a7a0f718441ab029fcec04c65de463ce4d5f4f8ddb1113e96.jpg ADDED

Git LFS Details

  • SHA256: fbeb4300f160b6d6aa9f087160af61f2e7e2623a3253ab1e57aeae956eb29d79
  • Pointer size: 130 Bytes
  • Size of remote file: 14.3 kB
data/2025/2504_10xxx/2504.10478/images/04d3dcc21b958936bdc9a1f91c07188c307c9efefc4ccbe3518792514baf7514.jpg ADDED

Git LFS Details

  • SHA256: bea75663cf6a947a81807a3dbed676c2385694d9259a26db556cdab8aeec3b57
  • Pointer size: 130 Bytes
  • Size of remote file: 17.4 kB
data/2025/2504_10xxx/2504.10478/images/056234a694a700f1c7f01a6e9ed20094d7cd0fdb69eb5de2b5a653d849348ef9.jpg ADDED

Git LFS Details

  • SHA256: 123975f30a2e372c7a0b6fe25cd9588e0fa8030408d0f43ca0c8eb416927e951
  • Pointer size: 130 Bytes
  • Size of remote file: 17.3 kB
data/2025/2504_10xxx/2504.10478/images/07caf13e3b113102c5e105ffade1b5592d6b69e20a12f6d7be497607fbc435bf.jpg ADDED

Git LFS Details

  • SHA256: c76d73d75100dffbc9b681d3125a48ff17cf3e2e9bba676497c57aa0eb2936bb
  • Pointer size: 130 Bytes
  • Size of remote file: 15.3 kB
data/2025/2504_10xxx/2504.10478/images/083eced91fb46849603d1ad72fa239d4acbfb899ad82936da65f408e5a04fcc8.jpg ADDED

Git LFS Details

  • SHA256: 2186efb5eaf24cec02084fb5a10e22cf80024340e0a67ff5596031205639fba0
  • Pointer size: 129 Bytes
  • Size of remote file: 7.16 kB
data/2025/2504_10xxx/2504.10478/images/08617bf65e4ebad736f26039f5ce3bc40693f92d1f1c5988d045480873e04806.jpg ADDED

Git LFS Details

  • SHA256: 8c6df7f2e2f4db91dd4776b4e4473f6a38c8f410e1add8448741ebd0ca536d04
  • Pointer size: 129 Bytes
  • Size of remote file: 4.45 kB
data/2025/2504_10xxx/2504.10478/images/09a06b40fa4ea2f11c53a1fb0b61297195528e6be636a1b05e4aa3162e56bcea.jpg ADDED

Git LFS Details

  • SHA256: 467b47c0711fba98c3224a9c3758a88392a0eb3f6f2c4da852434345c9b07675
  • Pointer size: 129 Bytes
  • Size of remote file: 9.7 kB
data/2025/2504_10xxx/2504.10478/images/0c4250ef61690cb6f1d8c4915cae968117a343314b9954e0c0942cc5689c68ec.jpg ADDED

Git LFS Details

  • SHA256: 1493fd060c0c9bb5673f6eaa1c29a912cb87626151c7eee8399497f01662b041
  • Pointer size: 129 Bytes
  • Size of remote file: 5.82 kB
data/2025/2504_10xxx/2504.10478/images/0c94c758263527048a63f3968a11597da849450b1792cda84ac408bad5f98b58.jpg ADDED

Git LFS Details

  • SHA256: 58ce24ec342bb731d2c72e60abd33acc569d728b2f73aa32a4c1e5c478793268
  • Pointer size: 130 Bytes
  • Size of remote file: 17.1 kB
data/2025/2504_10xxx/2504.10478/images/0ca043e2ceef566089e4e9de31db5711daf3366c9e45fb0dd7cea3659355b3d9.jpg ADDED

Git LFS Details

  • SHA256: 8c26d303fe7aa13ca649c8038832a2cedff516d52f9264f04f186f20748b8e41
  • Pointer size: 130 Bytes
  • Size of remote file: 13.7 kB
data/2025/2504_10xxx/2504.10478/images/0d716e431c635ba9b4ddf9553c1ba062384da0054901a2617578c183216748d1.jpg ADDED

Git LFS Details

  • SHA256: cecbfb67267a95ee9a297d5dea5bfea297eadbf52e6a4be767cf1bed7a236c3d
  • Pointer size: 129 Bytes
  • Size of remote file: 2.92 kB
data/2025/2504_10xxx/2504.10478/images/0f296249734c6e78fe73f1f7ad3efd2ffa14c033f2625ceb5f3ec2ec4cadfdf1.jpg ADDED

Git LFS Details

  • SHA256: 6f9870773c41e2818f8d663a85b54b66e0f69e1962f759a6bd8452bba694a0f7
  • Pointer size: 129 Bytes
  • Size of remote file: 6.63 kB
data/2025/2504_10xxx/2504.10478/images/105ec9b2f770f3fd0623182efd6b919cba9dbb10c81c24b7a52fedf2e526985a.jpg ADDED

Git LFS Details

  • SHA256: b9d143aedc76e0e87ef48d81b90f73425ccf2e99a3a45e8d233f6567f451cf26
  • Pointer size: 130 Bytes
  • Size of remote file: 45.7 kB
data/2025/2504_10xxx/2504.10478/images/106be94ca90f21b22c1046ac94af48a64aa8412f6ffde8449c502aa85c296ee4.jpg ADDED

Git LFS Details

  • SHA256: 50245a2b35325b70396a64b103b4ed5287454e5d5fc72d75b822ea2daa9ade0c
  • Pointer size: 129 Bytes
  • Size of remote file: 3.51 kB
data/2025/2504_10xxx/2504.10478/images/110fab68254d7edd76626c12dee15bae4c5510f3d1620d88c62cdb6cd3e849b2.jpg ADDED

Git LFS Details

  • SHA256: 6c54ed73dd762b7e5fd39ff93edfa58d88e47862e4094d5b439b55554f4cea5e
  • Pointer size: 130 Bytes
  • Size of remote file: 10.9 kB
data/2025/2504_10xxx/2504.10478/images/136c97bd309561e7ca54a0ca2069ad3f0b521147a93b614b1fb712de45f0c740.jpg ADDED

Git LFS Details

  • SHA256: a5b9c3fd2ed28d1d63a0a1ec8a32b2f4d7b91669ab5f18af2b839028bbbb0488
  • Pointer size: 129 Bytes
  • Size of remote file: 8.66 kB
data/2025/2504_10xxx/2504.10478/images/19a01a99597df80ac8df614f4c1787a0a5b99ab4663cc34196f872471af91463.jpg ADDED

Git LFS Details

  • SHA256: bec16174219a3aba58e9ec6484c63f279c29e93d6142db21d48673d3e0b2e788
  • Pointer size: 130 Bytes
  • Size of remote file: 23.8 kB
data/2025/2504_10xxx/2504.10478/images/1c07139c31aa052e9802591db451bb95cd466333559e1496c77fb8de1be1c789.jpg ADDED

Git LFS Details

  • SHA256: b37e56bcd3cca667a971890c16d04ba8099e95d6096a8a3e568ca99aa6f7b9df
  • Pointer size: 129 Bytes
  • Size of remote file: 2.9 kB
data/2025/2504_10xxx/2504.10478/images/1e7d7a28f9f5517750d4bcda26579c66a321c8f7a9d9072a3c097f819faa0084.jpg ADDED

Git LFS Details

  • SHA256: 7290eccfac0d26a4818a2f08900807d64251f5be731a8a6b35a5a559f15d719c
  • Pointer size: 130 Bytes
  • Size of remote file: 10.3 kB
data/2025/2504_10xxx/2504.10478/images/1eb6b1fd37997ea936e23fe598e2d89e0cb3ea24361199d8c518915d5a76ffb0.jpg ADDED

Git LFS Details

  • SHA256: 3007c16f9f9ce4985b1d130880751a0581c0c20cb38feef8fc08260f6eb75617
  • Pointer size: 130 Bytes
  • Size of remote file: 17 kB
data/2025/2504_10xxx/2504.10478/images/205af94455736f91afbef56faed86d6536d4884d2490f957f228fe6f9d772c60.jpg ADDED

Git LFS Details

  • SHA256: 6f59259c6cfbc978127acae75413984dcd45805f21874e236c215a2a98ec9b9a
  • Pointer size: 129 Bytes
  • Size of remote file: 7.4 kB
data/2025/2504_10xxx/2504.10478/images/210f985ab8344ab9024b0e8866757143d08c6a402d3b38801bfe06bad03d0471.jpg ADDED

Git LFS Details

  • SHA256: 1142370df49560afeac8574f2a22322501fecc1edb3b10a28b08d0c0f7449e97
  • Pointer size: 130 Bytes
  • Size of remote file: 13.5 kB
data/2025/2504_10xxx/2504.10478/images/2129eafde804b7c6253031f96635d28714155c8f61913d221bec62adc0d05819.jpg ADDED

Git LFS Details

  • SHA256: fb2f9d03649bc11fe37d8d0ac36962a628cc1bd403b94f15cc791dd8244ab911
  • Pointer size: 129 Bytes
  • Size of remote file: 7.9 kB
data/2025/2504_10xxx/2504.10478/images/219b5d70f1361d0524e9e43e4ca877a094875ed4025d0d18eae4a0143396e6c9.jpg ADDED

Git LFS Details

  • SHA256: b23b45a48b8bccfb59fd579f96fd01b57a05b24e052f44eb6e923161a77d597f
  • Pointer size: 129 Bytes
  • Size of remote file: 6.51 kB
data/2025/2504_10xxx/2504.10478/images/235ace59d9f3a9fd4138d33eda84ec30aa842d50403a6325e09b14c22038f792.jpg ADDED

Git LFS Details

  • SHA256: 030791b5f5a5645d6619e4f2e87f3fa5e91dcbf250ca93fbcbe85e848e4a9887
  • Pointer size: 129 Bytes
  • Size of remote file: 9.59 kB
data/2025/2504_10xxx/2504.10478/images/25ebeb290d7259127374c9c05e2e0ce5075d7b65dfef8be8699ed49268e3b01a.jpg ADDED

Git LFS Details

  • SHA256: 37f7d8d89c0e8f3a7a20752934bd17fe4b2faafcf7bd9614b4f2b9014c09a8f6
  • Pointer size: 130 Bytes
  • Size of remote file: 10.8 kB
data/2025/2504_10xxx/2504.10478/images/268a4214b18b790ae18f4c66932eb03706a3662c1ff2bbd2235424d0bcd92783.jpg ADDED

Git LFS Details

  • SHA256: c6004793f1209f4e7972c6d1360aebad19f9d0dfb86194ae16b59918b31ccacd
  • Pointer size: 130 Bytes
  • Size of remote file: 15.6 kB
data/2025/2504_10xxx/2504.10478/images/281b7e3b04727e31eb48c9f9eb0dac923c0ed6c74f21659a0a4d939eab7dbcdc.jpg ADDED

Git LFS Details

  • SHA256: 0df986bce2bc7c9dea0b241422bb903d12049eb4efb9baa1384bd4d1c208b95f
  • Pointer size: 130 Bytes
  • Size of remote file: 20.1 kB
data/2025/2504_10xxx/2504.10478/images/293bfc0fc27150b8eec52e2f179c7a8bee6d05de89eb6f954bb89407cf535e84.jpg ADDED

Git LFS Details

  • SHA256: a04757101998b1b2132ad60f156a177cd08f34ba3212e365e931dda12bfa1e03
  • Pointer size: 130 Bytes
  • Size of remote file: 11.8 kB
data/2025/2504_10xxx/2504.10478/images/2afd9d9121757c862096b1a3d7ace4ff98bfb6719ea911ea906426aa924ec8b7.jpg ADDED

Git LFS Details

  • SHA256: a760a472037bbfe174c33acb195f5f70a926358f2653daadb5c939005b3ca11e
  • Pointer size: 129 Bytes
  • Size of remote file: 5.65 kB
data/2025/2504_10xxx/2504.10478/images/2b5bc1934d7c465d670fdb3f31d7c83014925d80d56f23229733f5fb3a5e4176.jpg ADDED

Git LFS Details

  • SHA256: 21a104152ce30b4df690ac2a885320bd708430322dec1ec3f7eadba22d77b4ef
  • Pointer size: 130 Bytes
  • Size of remote file: 14.7 kB
data/2025/2504_10xxx/2504.10478/images/2ba83d43ee06d46a52d1c0a7af9c34e2f33d4173d7a4793a62c64f013efa1d66.jpg ADDED

Git LFS Details

  • SHA256: 7d0f9531cb8b94d8c5f9c9149279cba99d7a790036cc3a0d64803923ac7945a4
  • Pointer size: 130 Bytes
  • Size of remote file: 11.2 kB
data/2025/2504_10xxx/2504.10478/images/2f678190c34bc263f209d564e733b949a49f0c5b80c07037e411d4cdccc2776b.jpg ADDED

Git LFS Details

  • SHA256: 778d113d121073a9934929ea1b8d8e236bd86bc5ccdfc1e77f5891ba84985084
  • Pointer size: 130 Bytes
  • Size of remote file: 11.1 kB
data/2025/2504_10xxx/2504.10478/images/335ad53fb8c267ab9f4d31675ae0ac9c056c01642d53698f0a201571736eb81f.jpg ADDED

Git LFS Details

  • SHA256: 2b1f27efbf7b470aa82eb2762d299f82ce30d66a9d76560025bc5b4528ec5e2a
  • Pointer size: 129 Bytes
  • Size of remote file: 5.34 kB
data/2025/2504_10xxx/2504.10478/images/33b82e197b5263aaec61d52cf001bea59054b3e26d2244a7f955f99bbd538652.jpg ADDED

Git LFS Details

  • SHA256: f33684418dd300d6c319cea3db6b937b2301159789e178e32d901e22a64162bf
  • Pointer size: 130 Bytes
  • Size of remote file: 62 kB
data/2025/2504_10xxx/2504.10478/images/34da879b88c056d54890866c39e610bb90307c00176ff655d5e91b80505a2801.jpg ADDED

Git LFS Details

  • SHA256: 0f32f7663c3c53afe1c050220e72b151530b57794dbfcaf4713ef3eb9d913632
  • Pointer size: 129 Bytes
  • Size of remote file: 4.79 kB
data/2025/2504_10xxx/2504.10478/images/34dd8473c7bea803c37a13a33b349b2cd610686e846013957bdec7e504f82175.jpg ADDED

Git LFS Details

  • SHA256: 889668c83537d7139d7851978f115f2286f602909b45b6f508faa812c41da722
  • Pointer size: 130 Bytes
  • Size of remote file: 20 kB
data/2025/2504_10xxx/2504.10478/images/3536f4b1df50cd66ddd3bfbccb80c35fc10834c00925c31728553b31b2fbfd2a.jpg ADDED

Git LFS Details

  • SHA256: 278647c0de47cb353b1a1414ebc443a4d903de8c2b5cd63d1fb7b012442ab745
  • Pointer size: 130 Bytes
  • Size of remote file: 21.3 kB
data/2025/2504_10xxx/2504.10478/images/38159ef7ae6f51b78b1f51b27ac07d7019b9006e97ddbd5054b02d32b076acac.jpg ADDED

Git LFS Details

  • SHA256: bd1b0dd7d6ef4d3e8eb9c8093bbfc2318926f843db5c3524e6910b24cbed54f6
  • Pointer size: 130 Bytes
  • Size of remote file: 35.2 kB
data/2025/2504_10xxx/2504.10478/images/393c4e789004c9b1d25cabc33e6caa251b9a30ac8408881cef3e9408d407b11f.jpg ADDED

Git LFS Details

  • SHA256: fc57b86b4221f242f04af291ad7c48ffb9b8ed2753f768e8ca61544eeeb67d3e
  • Pointer size: 130 Bytes
  • Size of remote file: 17.6 kB
data/2025/2504_10xxx/2504.10478/images/39f305f282f3aa2b8d837c2869699fbd2bb84a64fa117b53ed57a515d2e954a8.jpg ADDED

Git LFS Details

  • SHA256: 717a6077123a3cbeaf2852a4568b749877268debfdead4bd4767f9cede25bc09
  • Pointer size: 129 Bytes
  • Size of remote file: 5.55 kB