Chelsea707 commited on
Commit
1a2b7d9
·
verified ·
1 Parent(s): 3a166a1

MinerU Batch e87e1d9f-d17b-4b8a-bbe3-e5d8a7ae5e47 (Part 6/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_07xxx/2504.07887/2c64acb6-b959-4ce1-8f5e-e00bed67e6e0_content_list.json +0 -0
  3. data/2025/2504_07xxx/2504.07887/2c64acb6-b959-4ce1-8f5e-e00bed67e6e0_model.json +0 -0
  4. data/2025/2504_07xxx/2504.07887/2c64acb6-b959-4ce1-8f5e-e00bed67e6e0_origin.pdf +3 -0
  5. data/2025/2504_07xxx/2504.07887/full.md +0 -0
  6. data/2025/2504_07xxx/2504.07887/images/0ddab2bec0b2a20978d6be92846542bce3e971627d8055fbaf4971088d9a19cb.jpg +3 -0
  7. data/2025/2504_07xxx/2504.07887/images/0e6c825ddf1be386971cb6fb427075703589e1c7a480556178eeffb9bbc05261.jpg +3 -0
  8. data/2025/2504_07xxx/2504.07887/images/197f4697129a620515fd6ef294708ebcc22082e876d139814f9ec72e8e44d128.jpg +3 -0
  9. data/2025/2504_07xxx/2504.07887/images/1e7055f38df96a015f166613e02f9fa6cb21f3fa0efcce5d080ea04f6f451fec.jpg +3 -0
  10. data/2025/2504_07xxx/2504.07887/images/24dba94375c48991a5b9757f3dddd78ca510353a5705e03a39617d8d4e2dbacd.jpg +3 -0
  11. data/2025/2504_07xxx/2504.07887/images/2ae48ee356462a27b6945608bd2e510f91220dbf7f02241a6213fa28e53f4875.jpg +3 -0
  12. data/2025/2504_07xxx/2504.07887/images/309433abbf551714c614bfdc653ebfd557523ca44e7e5cf204c6ecd324420700.jpg +3 -0
  13. data/2025/2504_07xxx/2504.07887/images/35614d9dd8584d8583c6250ffcd91227f9033c77c2570b672b58c5401a95dd03.jpg +3 -0
  14. data/2025/2504_07xxx/2504.07887/images/36fc73495d59cb07e3e2122f83649af4d4fc9e15bfd0bdd919dd2785f97104ef.jpg +3 -0
  15. data/2025/2504_07xxx/2504.07887/images/39e827dba5c5b2f755d5e1573289f740b7811c6b394da6b9195248c85602e5f1.jpg +3 -0
  16. data/2025/2504_07xxx/2504.07887/images/3a5162c7e1f0205ad1c6672b4ba0850996f5b5bcf742ab451e65ba3d2640ca29.jpg +3 -0
  17. data/2025/2504_07xxx/2504.07887/images/3e4f052e6d40e102850d96b1fefa276be1d825793a3e70c79839eaa98b67e0d1.jpg +3 -0
  18. data/2025/2504_07xxx/2504.07887/images/4908f844e3430dfd3db0a7465ce8c8e401b65822a8dd125c6d8954b9dd0be940.jpg +3 -0
  19. data/2025/2504_07xxx/2504.07887/images/4969846a5b2c353a6ef575665e9db6a29df7e1f7d2b5308a6ccbfab2427bb645.jpg +3 -0
  20. data/2025/2504_07xxx/2504.07887/images/55858d49f3b255fc192f14d76abc9b42f1fd45a122e00b9fb84dcc5ec1885b2e.jpg +3 -0
  21. data/2025/2504_07xxx/2504.07887/images/598af86f69710ded6bd851be46d3dd4a7b421687ae61c399e9a4a4bb21c59b94.jpg +3 -0
  22. data/2025/2504_07xxx/2504.07887/images/6fc9c9d64d97023d9bc22d5c59cb10e0e68d02f843d698f371e6a6e93ba0186d.jpg +3 -0
  23. data/2025/2504_07xxx/2504.07887/images/7e8234b6ed19d9c09fdaabca9d85a277334554fba9efb6d9a5cd395fae192f8b.jpg +3 -0
  24. data/2025/2504_07xxx/2504.07887/images/8164ef41fe6892edc8bbe28b2545295283489f182b93f6c12780be5bc281b08d.jpg +3 -0
  25. data/2025/2504_07xxx/2504.07887/images/8e278c9ae6cf1e81062ed5446c1f771ff066dac71881c4add1c494e2065c8aa1.jpg +3 -0
  26. data/2025/2504_07xxx/2504.07887/images/8f763f60e6f2dcb5da611a2a689333b1d992f51f7e355477047d36e7fc2eee60.jpg +3 -0
  27. data/2025/2504_07xxx/2504.07887/images/92065e622739fc241df5ae5a0018eb1e9f09063cc9cb2a2f48f0bcd48607d681.jpg +3 -0
  28. data/2025/2504_07xxx/2504.07887/images/93b625f6770da315744d261c6ebce970fc1d2fe5a9b8bf1de62c49b199631fd0.jpg +3 -0
  29. data/2025/2504_07xxx/2504.07887/images/9b043795303bdaf2196ffee60f33dd56f13ebcee895b8f498a7435576899ee60.jpg +3 -0
  30. data/2025/2504_07xxx/2504.07887/images/a56bbd7ad8737e044b251e6ace1838de31ac3d9726fc88b76cd78492346428c0.jpg +3 -0
  31. data/2025/2504_07xxx/2504.07887/images/a72c91d0de5a9f70ced5473959299594386b6c407599d70301e637cd49c56984.jpg +3 -0
  32. data/2025/2504_07xxx/2504.07887/images/b1f3d2e9b82681591729eb8ba7209f7d146dc6d8695ea599957ab02a4e083797.jpg +3 -0
  33. data/2025/2504_07xxx/2504.07887/images/bf49c59770cc6bb235374b07ae27cf30ce2142f834bfe4869f4c2a3d810d7d52.jpg +3 -0
  34. data/2025/2504_07xxx/2504.07887/images/c06308c4675fbc408080b83228343936c5e4efc7bba657b480fc655d855a97ec.jpg +3 -0
  35. data/2025/2504_07xxx/2504.07887/images/c12a6ddea3560e5f0092437939ab12bb058f9869a2663c71509026fe32e5daa9.jpg +3 -0
  36. data/2025/2504_07xxx/2504.07887/images/c140558bb60ff5e6cd66a968a6fc7c9d75b44a16ea6041681fd4a033ad13ca93.jpg +3 -0
  37. data/2025/2504_07xxx/2504.07887/images/c192ed2e03cf7fd358109d7f0149fed3cbc0d15322d539ec696eedad45e1a0c7.jpg +3 -0
  38. data/2025/2504_07xxx/2504.07887/images/c457be4f8b8056df352ef6902fea71035cda8e7e043230d982019d5b0be57ee2.jpg +3 -0
  39. data/2025/2504_07xxx/2504.07887/images/c554cf3f3cf48e6dbcf54cbd9dda9207a53e537f46232f87a5d18208df859177.jpg +3 -0
  40. data/2025/2504_07xxx/2504.07887/images/c96ccee4736bb221e49b1eee54bed3ff77b9c1020fe8ccb34683792009107f0c.jpg +3 -0
  41. data/2025/2504_07xxx/2504.07887/images/cdb74e46a552bd1ac1b1b9ac2dd40ac5816c3a5617533c9f33b5ac26784dae20.jpg +3 -0
  42. data/2025/2504_07xxx/2504.07887/images/d144173aa8b9714540f4205b373471feeb264d509c964583f0ff4504bf8b2494.jpg +3 -0
  43. data/2025/2504_07xxx/2504.07887/images/d199d1341ed1a587b050aa0c34c7f83b1279e196fcd1d435109147207cbb77fd.jpg +3 -0
  44. data/2025/2504_07xxx/2504.07887/images/d3a464269c90508559e9c76d43d9541091682a54eedc04e608d986593a1a5461.jpg +3 -0
  45. data/2025/2504_07xxx/2504.07887/images/e8629ad6784766aef1cba829366eb3d27d9632181585f089e877212a5e3fa45a.jpg +3 -0
  46. data/2025/2504_07xxx/2504.07887/images/f52dc6278f8e95f4f356bc68263de8c7f8849ff76db79a2825b200c864e9f254.jpg +3 -0
  47. data/2025/2504_07xxx/2504.07887/images/f84ea84db7b67f64a03e0fbbcb66f774bec02d0357fd33d3865e57545f152f16.jpg +3 -0
  48. data/2025/2504_07xxx/2504.07887/images/fadf854244734d90b5cf12f03d9409dc2711c8eccde80e1c1731ff48ff1ea2b9.jpg +3 -0
  49. data/2025/2504_07xxx/2504.07887/layout.json +0 -0
  50. data/2025/2504_07xxx/2504.07891/154d7bd7-bf53-43dc-835d-060e982bbe89_content_list.json +1264 -0
.gitattributes CHANGED
@@ -1244,3 +1244,11 @@ data/2025/2504_07xxx/2504.07958/5d46c20d-6377-4068-b00f-99fd75c9048a_origin.pdf
1244
  data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_origin.pdf filter=lfs diff=lfs merge=lfs -text
1245
  data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1246
  data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1244
  data/2025/2504_07xxx/2504.07960/f287b47c-51dc-4176-8688-7dfa564aba51_origin.pdf filter=lfs diff=lfs merge=lfs -text
1245
  data/2025/2504_07xxx/2504.07961/1b218924-4458-4e79-be30-03db9efaf1e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1246
  data/2025/2504_07xxx/2504.07963/b52d65f3-2baa-49b5-a1c3-87a564685375_origin.pdf filter=lfs diff=lfs merge=lfs -text
1247
+ data/2025/2504_07xxx/2504.07887/2c64acb6-b959-4ce1-8f5e-e00bed67e6e0_origin.pdf filter=lfs diff=lfs merge=lfs -text
1248
+ data/2025/2504_07xxx/2504.07891/154d7bd7-bf53-43dc-835d-060e982bbe89_origin.pdf filter=lfs diff=lfs merge=lfs -text
1249
+ data/2025/2504_07xxx/2504.07912/2d66932b-db6d-4a30-b705-fea2ed4cbe19_origin.pdf filter=lfs diff=lfs merge=lfs -text
1250
+ data/2025/2504_07xxx/2504.07934/22a9c7de-6a20-46b4-9243-0619fe1a084a_origin.pdf filter=lfs diff=lfs merge=lfs -text
1251
+ data/2025/2504_07xxx/2504.07943/9854c588-fbd0-47a1-b560-4e8c5b07fb00_origin.pdf filter=lfs diff=lfs merge=lfs -text
1252
+ data/2025/2504_07xxx/2504.07956/233f7388-cf46-41c3-99bf-1eb30e12bcd2_origin.pdf filter=lfs diff=lfs merge=lfs -text
1253
+ data/2025/2504_08xxx/2504.08837/8cb49279-0a74-44c0-aaf5-baf8779e12d9_origin.pdf filter=lfs diff=lfs merge=lfs -text
1254
+ data/2025/2504_13xxx/2504.13914/ff11ce5d-6bb3-4214-9c75-cd867f0e0926_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_07xxx/2504.07887/2c64acb6-b959-4ce1-8f5e-e00bed67e6e0_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_07xxx/2504.07887/2c64acb6-b959-4ce1-8f5e-e00bed67e6e0_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_07xxx/2504.07887/2c64acb6-b959-4ce1-8f5e-e00bed67e6e0_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f44ff697bc40e7bc6409b80a1876bddebf9d2672faa85e44131986daff89910
3
+ size 3010444
data/2025/2504_07xxx/2504.07887/full.md ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_07xxx/2504.07887/images/0ddab2bec0b2a20978d6be92846542bce3e971627d8055fbaf4971088d9a19cb.jpg ADDED

Git LFS Details

  • SHA256: 6f3a6b9fb41318f68f669484bd3a137be8427296b67b3bad77ec17278320a65f
  • Pointer size: 130 Bytes
  • Size of remote file: 25 kB
data/2025/2504_07xxx/2504.07887/images/0e6c825ddf1be386971cb6fb427075703589e1c7a480556178eeffb9bbc05261.jpg ADDED

Git LFS Details

  • SHA256: 2f771acca9d3056136afcd3015acfbbc25354f7b97d4d035e6a48cf2dc10c95d
  • Pointer size: 130 Bytes
  • Size of remote file: 72.9 kB
data/2025/2504_07xxx/2504.07887/images/197f4697129a620515fd6ef294708ebcc22082e876d139814f9ec72e8e44d128.jpg ADDED

Git LFS Details

  • SHA256: 3a365c5ee35856190cf10ae56db8fcacc60e300f26e0588415861f5004001b35
  • Pointer size: 131 Bytes
  • Size of remote file: 125 kB
data/2025/2504_07xxx/2504.07887/images/1e7055f38df96a015f166613e02f9fa6cb21f3fa0efcce5d080ea04f6f451fec.jpg ADDED

Git LFS Details

  • SHA256: a8666feb3050f40aa35e97ac869d74e4d3fc4dd92a85da5edcc465e109a041d1
  • Pointer size: 129 Bytes
  • Size of remote file: 8.18 kB
data/2025/2504_07xxx/2504.07887/images/24dba94375c48991a5b9757f3dddd78ca510353a5705e03a39617d8d4e2dbacd.jpg ADDED

Git LFS Details

  • SHA256: 53b3d220c040a4d89519956b5a5a2fc43ce425afc956e05bb82de303bfe4bb27
  • Pointer size: 129 Bytes
  • Size of remote file: 5.5 kB
data/2025/2504_07xxx/2504.07887/images/2ae48ee356462a27b6945608bd2e510f91220dbf7f02241a6213fa28e53f4875.jpg ADDED

Git LFS Details

  • SHA256: 5cb5badc7561dc9f07fca773f8e275b19d7a7a57f94967c0c6389a0572d01b26
  • Pointer size: 130 Bytes
  • Size of remote file: 12.1 kB
data/2025/2504_07xxx/2504.07887/images/309433abbf551714c614bfdc653ebfd557523ca44e7e5cf204c6ecd324420700.jpg ADDED

Git LFS Details

  • SHA256: 3a4ebabd3385ec7bf2675fe2d4f456aafb230842ce27e420da852c425908775c
  • Pointer size: 131 Bytes
  • Size of remote file: 207 kB
data/2025/2504_07xxx/2504.07887/images/35614d9dd8584d8583c6250ffcd91227f9033c77c2570b672b58c5401a95dd03.jpg ADDED

Git LFS Details

  • SHA256: fac2d4b105aa2e09461ec8c8768762acd615404d359d44f7234291083b25c215
  • Pointer size: 130 Bytes
  • Size of remote file: 12.1 kB
data/2025/2504_07xxx/2504.07887/images/36fc73495d59cb07e3e2122f83649af4d4fc9e15bfd0bdd919dd2785f97104ef.jpg ADDED

Git LFS Details

  • SHA256: cc7d4798aa7c3719b4533af70075a009dbaaffb66b54338ae06fa7ceff2915e2
  • Pointer size: 130 Bytes
  • Size of remote file: 46.9 kB
data/2025/2504_07xxx/2504.07887/images/39e827dba5c5b2f755d5e1573289f740b7811c6b394da6b9195248c85602e5f1.jpg ADDED

Git LFS Details

  • SHA256: 7bb450a30aacb15f1a77ec2402022b62c14f0748dc2fb8ba5a7bab2b41096a62
  • Pointer size: 131 Bytes
  • Size of remote file: 160 kB
data/2025/2504_07xxx/2504.07887/images/3a5162c7e1f0205ad1c6672b4ba0850996f5b5bcf742ab451e65ba3d2640ca29.jpg ADDED

Git LFS Details

  • SHA256: 9bd1a99bae0b22746d75bfec443bf17f696cad29b20fe53ea62a0c36827be46c
  • Pointer size: 129 Bytes
  • Size of remote file: 5.78 kB
data/2025/2504_07xxx/2504.07887/images/3e4f052e6d40e102850d96b1fefa276be1d825793a3e70c79839eaa98b67e0d1.jpg ADDED

Git LFS Details

  • SHA256: 2453eb1d96f374aa5144bb4ebe83f86ce6974778bce042df20052d287c0e9c17
  • Pointer size: 130 Bytes
  • Size of remote file: 10.9 kB
data/2025/2504_07xxx/2504.07887/images/4908f844e3430dfd3db0a7465ce8c8e401b65822a8dd125c6d8954b9dd0be940.jpg ADDED

Git LFS Details

  • SHA256: 5f5fd65a689ecd22d4256aacc12c3ce87263c577663d729ece6855af1d793a5c
  • Pointer size: 129 Bytes
  • Size of remote file: 5.65 kB
data/2025/2504_07xxx/2504.07887/images/4969846a5b2c353a6ef575665e9db6a29df7e1f7d2b5308a6ccbfab2427bb645.jpg ADDED

Git LFS Details

  • SHA256: f2d78f51e5c449c002107337a1254c8b8928b18a9aac98964382b57c444a3092
  • Pointer size: 130 Bytes
  • Size of remote file: 12.3 kB
data/2025/2504_07xxx/2504.07887/images/55858d49f3b255fc192f14d76abc9b42f1fd45a122e00b9fb84dcc5ec1885b2e.jpg ADDED

Git LFS Details

  • SHA256: 10ce42c86b258e7bf45c9e4a19da3d169b079a85d5c08f1ad04481e90302151b
  • Pointer size: 129 Bytes
  • Size of remote file: 8.55 kB
data/2025/2504_07xxx/2504.07887/images/598af86f69710ded6bd851be46d3dd4a7b421687ae61c399e9a4a4bb21c59b94.jpg ADDED

Git LFS Details

  • SHA256: 07ce2a8c973bc63a260b4bd4377a2696bf9bb87172162a4185f4e00211770179
  • Pointer size: 129 Bytes
  • Size of remote file: 6.2 kB
data/2025/2504_07xxx/2504.07887/images/6fc9c9d64d97023d9bc22d5c59cb10e0e68d02f843d698f371e6a6e93ba0186d.jpg ADDED

Git LFS Details

  • SHA256: 6a327077703ce2b85cd853f075fe9ba9afb6e8a248494241b20cff05f43889bd
  • Pointer size: 130 Bytes
  • Size of remote file: 20.5 kB
data/2025/2504_07xxx/2504.07887/images/7e8234b6ed19d9c09fdaabca9d85a277334554fba9efb6d9a5cd395fae192f8b.jpg ADDED

Git LFS Details

  • SHA256: 8bbb57f2d46ce8541deba23e5bed9500a85b84830f77b8417d2f6ddaa1d68c08
  • Pointer size: 130 Bytes
  • Size of remote file: 13.5 kB
data/2025/2504_07xxx/2504.07887/images/8164ef41fe6892edc8bbe28b2545295283489f182b93f6c12780be5bc281b08d.jpg ADDED

Git LFS Details

  • SHA256: 377ee1bb627312e7b06e1c97658bce716bd7c6a1b116911e0e61cfe49aa894e2
  • Pointer size: 130 Bytes
  • Size of remote file: 12.2 kB
data/2025/2504_07xxx/2504.07887/images/8e278c9ae6cf1e81062ed5446c1f771ff066dac71881c4add1c494e2065c8aa1.jpg ADDED

Git LFS Details

  • SHA256: 2639ce1add4ba6051a1154ef308dd6059c2b94596a8b54f106d3e00e18999f3f
  • Pointer size: 130 Bytes
  • Size of remote file: 73.5 kB
data/2025/2504_07xxx/2504.07887/images/8f763f60e6f2dcb5da611a2a689333b1d992f51f7e355477047d36e7fc2eee60.jpg ADDED

Git LFS Details

  • SHA256: a940e43dd8577ecc9db9e26133035506794362f4cd39326a83280b28e19643aa
  • Pointer size: 130 Bytes
  • Size of remote file: 72.7 kB
data/2025/2504_07xxx/2504.07887/images/92065e622739fc241df5ae5a0018eb1e9f09063cc9cb2a2f48f0bcd48607d681.jpg ADDED

Git LFS Details

  • SHA256: 82e337156405bcd6df07628bc17a67fd3385bb7e1cc4a861601a696689a0b411
  • Pointer size: 130 Bytes
  • Size of remote file: 28.6 kB
data/2025/2504_07xxx/2504.07887/images/93b625f6770da315744d261c6ebce970fc1d2fe5a9b8bf1de62c49b199631fd0.jpg ADDED

Git LFS Details

  • SHA256: 37f5ff62485614a794d1657af9887ab695089ddd83a65e0dbb2ae41de1958d7e
  • Pointer size: 130 Bytes
  • Size of remote file: 62.9 kB
data/2025/2504_07xxx/2504.07887/images/9b043795303bdaf2196ffee60f33dd56f13ebcee895b8f498a7435576899ee60.jpg ADDED

Git LFS Details

  • SHA256: 59c089917fc4cc638ff33fa2b1c1527bcd7c26ed600573d962125a67ce713bf0
  • Pointer size: 131 Bytes
  • Size of remote file: 462 kB
data/2025/2504_07xxx/2504.07887/images/a56bbd7ad8737e044b251e6ace1838de31ac3d9726fc88b76cd78492346428c0.jpg ADDED

Git LFS Details

  • SHA256: 22f84901c6a0fa13b8e1c6ffe690feaef0053fbd5d45bd51dd4f2f957a3a1015
  • Pointer size: 130 Bytes
  • Size of remote file: 14.7 kB
data/2025/2504_07xxx/2504.07887/images/a72c91d0de5a9f70ced5473959299594386b6c407599d70301e637cd49c56984.jpg ADDED

Git LFS Details

  • SHA256: 59b49119dd270c0557ad2b966c94496660941556d70e34950c87915d3e56a720
  • Pointer size: 129 Bytes
  • Size of remote file: 8.82 kB
data/2025/2504_07xxx/2504.07887/images/b1f3d2e9b82681591729eb8ba7209f7d146dc6d8695ea599957ab02a4e083797.jpg ADDED

Git LFS Details

  • SHA256: 4595028e4e802194b43af4454dd25295ac45254acc5e2494ee9d418228821cdf
  • Pointer size: 130 Bytes
  • Size of remote file: 50.4 kB
data/2025/2504_07xxx/2504.07887/images/bf49c59770cc6bb235374b07ae27cf30ce2142f834bfe4869f4c2a3d810d7d52.jpg ADDED

Git LFS Details

  • SHA256: c6e19b402578f51dea3e7b17d28061c58a16c6b3773c34e13897701a34d9a9e9
  • Pointer size: 129 Bytes
  • Size of remote file: 9.39 kB
data/2025/2504_07xxx/2504.07887/images/c06308c4675fbc408080b83228343936c5e4efc7bba657b480fc655d855a97ec.jpg ADDED

Git LFS Details

  • SHA256: 5863864b02a7b71298101fae76288d78eb114cff825f3edf72aadeffa9b73044
  • Pointer size: 130 Bytes
  • Size of remote file: 22.5 kB
data/2025/2504_07xxx/2504.07887/images/c12a6ddea3560e5f0092437939ab12bb058f9869a2663c71509026fe32e5daa9.jpg ADDED

Git LFS Details

  • SHA256: f5e8f3c26049ad7e0759de065049e0f7b2a7d3f77925b1c849bbe150cc0c75c0
  • Pointer size: 130 Bytes
  • Size of remote file: 93.1 kB
data/2025/2504_07xxx/2504.07887/images/c140558bb60ff5e6cd66a968a6fc7c9d75b44a16ea6041681fd4a033ad13ca93.jpg ADDED

Git LFS Details

  • SHA256: 60f129a7abae954e899c6580321fa01302b8e504d2fd82b3d6aff3ab44c13b1f
  • Pointer size: 130 Bytes
  • Size of remote file: 12.5 kB
data/2025/2504_07xxx/2504.07887/images/c192ed2e03cf7fd358109d7f0149fed3cbc0d15322d539ec696eedad45e1a0c7.jpg ADDED

Git LFS Details

  • SHA256: 0b5ef0ce6b49de886a9ad9ca4814e39ffe16ad1432307c976532e14c632c80af
  • Pointer size: 130 Bytes
  • Size of remote file: 94.9 kB
data/2025/2504_07xxx/2504.07887/images/c457be4f8b8056df352ef6902fea71035cda8e7e043230d982019d5b0be57ee2.jpg ADDED

Git LFS Details

  • SHA256: bac9af33071ed0b282165cb779243ee3cf9469f6e286ab661e3aacb782b049df
  • Pointer size: 129 Bytes
  • Size of remote file: 7.5 kB
data/2025/2504_07xxx/2504.07887/images/c554cf3f3cf48e6dbcf54cbd9dda9207a53e537f46232f87a5d18208df859177.jpg ADDED

Git LFS Details

  • SHA256: 15f46b0ee85f77e77c3b15cb0c64469d438151688cc5698c760cb28badf8c86b
  • Pointer size: 130 Bytes
  • Size of remote file: 71.5 kB
data/2025/2504_07xxx/2504.07887/images/c96ccee4736bb221e49b1eee54bed3ff77b9c1020fe8ccb34683792009107f0c.jpg ADDED

Git LFS Details

  • SHA256: 0886374b2c38a7125d39def9962ad735337d9a314431e063bb639ed4d0127636
  • Pointer size: 130 Bytes
  • Size of remote file: 42.6 kB
data/2025/2504_07xxx/2504.07887/images/cdb74e46a552bd1ac1b1b9ac2dd40ac5816c3a5617533c9f33b5ac26784dae20.jpg ADDED

Git LFS Details

  • SHA256: d209003f4eed8425a6aebfd9d7e2543b113ea2c50c0b077160b60603882dd654
  • Pointer size: 130 Bytes
  • Size of remote file: 23.7 kB
data/2025/2504_07xxx/2504.07887/images/d144173aa8b9714540f4205b373471feeb264d509c964583f0ff4504bf8b2494.jpg ADDED

Git LFS Details

  • SHA256: 846af1c448cc4b332278f56784ed3efe43b1ec0157dffd1ca158aceeb03f88b6
  • Pointer size: 130 Bytes
  • Size of remote file: 20.7 kB
data/2025/2504_07xxx/2504.07887/images/d199d1341ed1a587b050aa0c34c7f83b1279e196fcd1d435109147207cbb77fd.jpg ADDED

Git LFS Details

  • SHA256: 25ed12c6b3214979c47ffc80b49686f4ce38d5e530a17d21f42231ba4d7b3e9e
  • Pointer size: 129 Bytes
  • Size of remote file: 5.97 kB
data/2025/2504_07xxx/2504.07887/images/d3a464269c90508559e9c76d43d9541091682a54eedc04e608d986593a1a5461.jpg ADDED

Git LFS Details

  • SHA256: d6e5b53e339ab0b353322e2613a77d5db6d2d35778b378790b03b08285782c22
  • Pointer size: 129 Bytes
  • Size of remote file: 7.56 kB
data/2025/2504_07xxx/2504.07887/images/e8629ad6784766aef1cba829366eb3d27d9632181585f089e877212a5e3fa45a.jpg ADDED

Git LFS Details

  • SHA256: 70f15900bd369834af04050382789cbccccbd913721dd654d6e94eff04ef0152
  • Pointer size: 130 Bytes
  • Size of remote file: 14.7 kB
data/2025/2504_07xxx/2504.07887/images/f52dc6278f8e95f4f356bc68263de8c7f8849ff76db79a2825b200c864e9f254.jpg ADDED

Git LFS Details

  • SHA256: c9f9ae1267dce5cd358c599eb9386fa2daed354b32b0d6de27543c49397e3cc3
  • Pointer size: 130 Bytes
  • Size of remote file: 53.7 kB
data/2025/2504_07xxx/2504.07887/images/f84ea84db7b67f64a03e0fbbcb66f774bec02d0357fd33d3865e57545f152f16.jpg ADDED

Git LFS Details

  • SHA256: 0a2a8f48539798359b6d8a4c9ac6748bbe63658e31e51d763672e56aba5a7a03
  • Pointer size: 129 Bytes
  • Size of remote file: 9.64 kB
data/2025/2504_07xxx/2504.07887/images/fadf854244734d90b5cf12f03d9409dc2711c8eccde80e1c1731ff48ff1ea2b9.jpg ADDED

Git LFS Details

  • SHA256: 281fb858f213a7b7b215d51a6484e869cd0ce6ec7f5bd4973d6adb79dc80d8e1
  • Pointer size: 129 Bytes
  • Size of remote file: 9.7 kB
data/2025/2504_07xxx/2504.07887/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_07xxx/2504.07891/154d7bd7-bf53-43dc-835d-060e982bbe89_content_list.json ADDED
@@ -0,0 +1,1264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "SpecReason: Fast and Accurate Inference-Time Compute via Speculative Reasoning",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 207,
8
+ 122,
9
+ 789,
10
+ 174
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Rui Pan§ Yinwei Dai§ Zhihao Zhang† Gabriele Oliaro† Zhihao Jia† Ravi Netravali§",
17
+ "bbox": [
18
+ 285,
19
+ 223,
20
+ 715,
21
+ 253
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{\\S}$ Princeton University $\\dagger$ Carnegie Mellon University {ruipan,yinweid}@princeton.edu,{zhihaoz3,goliaro}@cs.cmu.edu, zhihao@cmu.edu, rnetravali@cs.princeton.edu",
28
+ "bbox": [
29
+ 238,
30
+ 255,
31
+ 759,
32
+ 297
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Abstract",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 459,
42
+ 333,
43
+ 537,
44
+ 349
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Recent advances in inference-time compute have significantly improved performance on complex tasks by generating long chains of thought (CoTs) using Large Reasoning Models (LRMs). However, this improved accuracy comes at the cost of high inference latency due to the length of generated reasoning sequences and the autoregressive nature of decoding. Our key insight in tackling these overheads is that LRM inference, and the reasoning that it embeds, is highly tolerant of approximations: complex tasks are typically broken down into simpler steps, each of which brings utility based on the semantic insight it provides for downstream steps rather than the exact tokens it generates. Accordingly, we introduce SpecReason, a system that automatically accelerates LRM inference by using a lightweight model to (speculatively) carry out simpler intermediate reasoning steps and reserving the costly base model only to assess (and potentially correct) the speculated outputs. Importantly, SpecReason's focus on exploiting the semantic flexibility of thinking tokens in preserving final-answer accuracy is complementary to prior speculation techniques, most notably speculative decoding, which demands token-level equivalence at each step. Across a variety of reasoning benchmarks, SpecReason achieves $1.4 - 3.0 \\times$ speedup over vanilla LRM inference while improving accuracy by $0.4 - 9.0\\%$ . Compared to speculative decoding without SpecReason, their combination yields an additional $8.8 - 58.0\\%$ latency reduction. We open-source SpecReason at https://github.com/ruipeterpan/specreason.",
51
+ "bbox": [
52
+ 228,
53
+ 364,
54
+ 767,
55
+ 643
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "1 Introduction",
62
+ "text_level": 1,
63
+ "bbox": [
64
+ 171,
65
+ 667,
66
+ 313,
67
+ 684
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "text",
73
+ "text": "Inference-time compute has unlocked a new axis for scaling AI capabilities. Recent advancements in Large Reasoning Models (LRMs) such as OpenAI o1/o3 [Jaech et al., 2024, ope, 2025] and DeepSeek R1 [Guo et al., 2025] have demonstrated state-of-the-art performance across a wide range of complex tasks. Although these LRMs share the architectural backbones as traditional large language models (LLMs), their inference behavior differs significantly: LRMs first \"think\" by generating internal thinking tokens—tokens that decompose a task into a sequence of composable reasoning steps via a long chain-of-thought (CoT) [Wei et al., 2022] before producing the final tokens that summarize the reasoning process.",
74
+ "bbox": [
75
+ 169,
76
+ 699,
77
+ 826,
78
+ 811
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Despite their promise, LRMs incur substantial inference latency due to the length of the reasoning sequences they generate. This challenge is primarily driven by the autoregressive nature of LLMs, where decoding time scales linearly with sequence length. As a result, final output generation can routinely take minutes, if not hours, to answer a single query; such delays far exceed those of typical LLMs and are prohibitively slow for many interactive applications, ultimately degrading user experience [Fu et al., 2024b].",
85
+ "bbox": [
86
+ 169,
87
+ 816,
88
+ 826,
89
+ 902
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "aside_text",
95
+ "text": "arXiv:2504.07891v2 [cs.LG] 16 May 2025",
96
+ "bbox": [
97
+ 22,
98
+ 255,
99
+ 60,
100
+ 708
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "footer",
106
+ "text": "Preprint. Under review.",
107
+ "bbox": [
108
+ 171,
109
+ 922,
110
+ 315,
111
+ 936
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "Question: Every morning Aya goes for a \\(9\\)-kilometer-long walk and stops at a coffee shop afterwards. When she walks at a constant speed of \\(\\$ 8\\)/kilometers per hour, the walk takes her 4 hours, including \\(\\$ 8\\)minutes spent in the coffee shop. When she walks \\(\\$ 5+\\)2\\(kilometers per hour, the walk takes her 2 hours and 24 minutes, including \\(\\$ t\\)\\(8 minutes spent in the coffee shop. Suppose Aya walks at \\(\\$ s+\\) \\(\\backslash\\)frac{1}{2}\\)kilometers per hour. Find the number of minutes the walk takes her, including the \\(\\$ t\\)\\(8 minutes spent in the coffee shop.",
118
+ "bbox": [
119
+ 181,
120
+ 90,
121
+ 808,
122
+ 132
123
+ ],
124
+ "page_idx": 1
125
+ },
126
+ {
127
+ "type": "image",
128
+ "img_path": "images/deea8423b9d8d8040b1873ccbe564eeda45c43155574c10044106489d1999587.jpg",
129
+ "image_caption": [
130
+ "Figure 1: SpecReason leverages a smaller reasoning model to speculate individual reasoning steps, deferring to the base model only for assessment (and optionally as a fallback), enabling faster yet accurate reasoning. For illustration, we show a math question as an example; our evaluation includes more general reasoning workloads."
131
+ ],
132
+ "image_footnote": [],
133
+ "bbox": [
134
+ 178,
135
+ 137,
136
+ 821,
137
+ 299
138
+ ],
139
+ "page_idx": 1
140
+ },
141
+ {
142
+ "type": "text",
143
+ "text": "Our approach to tackling reasoning delays—without compromising accuracy—is rooted in two fundamental properties of LRMs: (1) LRMs tackle difficult tasks by generating long CoTs that decompose them into many simpler, sequential steps. For example, in mathematical problem solving, a few key reasoning steps require complex long-term planning and have a major influence on downstream reasoning, while most subsequent steps simply execute the plan through straightforward calculations or case analyses (Fig. 1); (2) The utility of an individual reasoning step hinges less on the exact wording of the thinking tokens but more on the semantic insight it provides. That is, as long as a step contributes meaningfully to advancing the CoT, it remains effective—even if phrased imprecisely or differently (Fig. 2). Moreover, LRMs possess self-reflection capabilities that enable them to revise or correct occasional missteps from earlier steps.",
144
+ "bbox": [
145
+ 169,
146
+ 393,
147
+ 826,
148
+ 532
149
+ ],
150
+ "page_idx": 1
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "Taken together, these properties make the decoding of thinking tokens—the dominant source of inference latency in LRMs—inherently more approximation tolerant than typical LLM decoding. A large fraction of intermediate reasoning steps can be effectively handled by lightweight reasoning models, which both align with the nature of these steps and can tolerate minor inaccuracies. As shown in Fig. 3, this opens the door to significantly faster inference without sacrificing output quality.",
155
+ "bbox": [
156
+ 169,
157
+ 539,
158
+ 826,
159
+ 609
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "Building on these insights, we propose SpecReason, a system for accelerating LRM inference by selectively offloading easier intermediate steps to be speculated by a smaller model without compromising final output accuracy. SpecReason employs a lightweight reasoning model to generate individual reasoning steps, while reserving the slower but more capable base model to efficiently verify these speculated steps (§4.1) and guide the reasoning process along the correct trajectory (Fig. 1). Consistent with prior findings [Song et al., 2025], we observe that base models can be prompted to act as critic models—assessing the utility of intermediate steps and accepting or rejecting them as needed (Fig. 7).",
166
+ "bbox": [
167
+ 169,
168
+ 614,
169
+ 826,
170
+ 727
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "Speculative reasoning vs. speculative decoding. While SpecReason is conceptually related to speculative decoding [Leviathan et al., 2023], which accelerates LLM inference by using a smaller draft model to predict future tokens, there are key distinctions between the two. Most notably, speculative decoding is an exact optimization: it relies on token-level equivalence between the small and base models, i.e., focusing on typical LLM serving where all generated tokens are part of the final model output being assessed. In contrast, SpecReason explicitly leverages the approximation tolerance inherent in reasoning: it targets thinking tokens—intermediate steps in the reasoning process—where semantic alignment, rather than token-level equivalence, is sufficient. This relaxation enables substantial latency savings during LRM inference, as semantically similar intermediate steps (Fig. 2) are often adequate to preserve end-task accuracy (Fig. 3). In many cases, SpecReason even improves final accuracy over the base model by generating fewer unnecessary tokens (Fig. 4). To further address the high inference cost of LRMs, SpecReason also exposes a user-configurable knob that allows trading off accuracy for latency by adjusting the tolerance level",
177
+ "bbox": [
178
+ 169,
179
+ 731,
180
+ 828,
181
+ 912
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "page_number",
187
+ "text": "2",
188
+ "bbox": [
189
+ 493,
190
+ 935,
191
+ 504,
192
+ 946
193
+ ],
194
+ "page_idx": 1
195
+ },
196
+ {
197
+ "type": "image",
198
+ "img_path": "images/ab95ef55f5f4105d8b27dead040c57b87a55cd1d3a072a4b863a5c031d3b4f2c.jpg",
199
+ "image_caption": [
200
+ "Figure 2: The spectrum of approximations of one example reasoning step (equation 1 in Fig. 1). SpecReason can control the exactness of reasoning approximations by adjusting its acceptance threshold to navigate through the accuracy-latency tradeoff space ( $\\S 5.3$ )."
201
+ ],
202
+ "image_footnote": [],
203
+ "bbox": [
204
+ 205,
205
+ 92,
206
+ 790,
207
+ 154
208
+ ],
209
+ "page_idx": 2
210
+ },
211
+ {
212
+ "type": "text",
213
+ "text": "for speculative approximations. Finally and most importantly, because speculative reasoning and speculative decoding operate at different levels, we show that they are complementary techniques (§4.2), and when combined in a hierarchical speculation framework, achieve even greater reductions in inference latency.",
214
+ "bbox": [
215
+ 169,
216
+ 234,
217
+ 823,
218
+ 290
219
+ ],
220
+ "page_idx": 2
221
+ },
222
+ {
223
+ "type": "text",
224
+ "text": "We evaluate SpecReason across a wide range of reasoning workloads spanning tasks of varying complexity [aim, 2025, Hendrycks et al., 2021, Rein et al., 2024]. Overall, SpecReason reduces end-to-end inference latency by $1.4 - 3.0 \\times$ compared to vanilla LRM inference while improving accuracy by $0.4 - 9.0\\%$ . Moreover, SpecReason can be combined with speculative decoding to provide an additional $8.8 - 58.0\\%$ improvement over speculative decoding alone.",
225
+ "bbox": [
226
+ 169,
227
+ 296,
228
+ 826,
229
+ 367
230
+ ],
231
+ "page_idx": 2
232
+ },
233
+ {
234
+ "type": "text",
235
+ "text": "2 Background",
236
+ "text_level": 1,
237
+ "bbox": [
238
+ 171,
239
+ 386,
240
+ 308,
241
+ 402
242
+ ],
243
+ "page_idx": 2
244
+ },
245
+ {
246
+ "type": "text",
247
+ "text": "Inference-time scaling. LRMs introduce a structured problem-solving approach that breaks down complex problems into multiple simpler reasoning steps, commonly referred to as a long chain of thought (CoT) [Wei et al., 2022]. This enables the model to generate intermediate reasoning steps before progressing further, reflect, and backtrack to correct errors if needed. LRMs that output long CoTs have been a popular approach to scale inference-time compute [Guo et al., 2025, Jaech et al., 2024, ope, 2025], and there also exist other schemes like Tree of Thoughts [Yao et al., 2023], process-reward-model-guided tree search [Lightman et al., 2023, Qi et al., 2024, Guan et al., 2025], and repeated sampling for scaling inference-time compute [Brown et al., 2024].",
248
+ "bbox": [
249
+ 169,
250
+ 417,
251
+ 826,
252
+ 529
253
+ ],
254
+ "page_idx": 2
255
+ },
256
+ {
257
+ "type": "text",
258
+ "text": "Speculative decoding. Speculation has long been a classic concept in the literature of computer architecture [Burton, 1985]. Due to the memory-bound nature of LLM decoding, recent work has also leveraged the technique of speculation to accelerate the decoding phase [Stern et al., 2018, Leviathan et al., 2023, Yan et al., 2024] of LLM inference. The speculative decoding process alternates between speculation and verification steps to ensure correctness while achieving speed-ups. The speculation phase usually consists of either a standalone draft model [Leviathan et al., 2023, Miao et al., 2024], a trainable module on top of the base model [Cai et al., 2024, Li et al., 2025], a tree-based token cache [Oliaro et al., 2024, Luo et al., 2024, Zhao et al., 2024], an n-gram lookup table [Fu et al., 2024a], or a retrieval-based data store [He et al., 2023] to make efficient but less accurate speculations. The verification process, on the other hand, is a base model chunked-refill over the speculation results, which usually consists of either a single sequence of tokens as in Leviathan et al. [2023] or tree-like structures to further boost the accuracy of speculation [Miao et al., 2024, Cai et al., 2024, Li et al., 2025, Chen et al., 2024]. The verification process then accepts the longest matched sequences on the token level from the speculation results and repeats the process. As a result, the speculation length is usually conservative to maintain an optimal trade-off between the speculation overhead and accuracy.",
259
+ "bbox": [
260
+ 169,
261
+ 534,
262
+ 828,
263
+ 755
264
+ ],
265
+ "page_idx": 2
266
+ },
267
+ {
268
+ "type": "text",
269
+ "text": "Existing approaches for reducing latency. Sky-T1-Flash Team [2025] reduces unnecessary thinking tokens by fine-tuning models to curb overthinking, thereby reducing the length of reasoning chains and, consequently, latency. Dynasor-CoT Fu et al. [2024b, 2025] takes a different approach by probing intermediate model confidence and terminating the reasoning process early when the model exhibits sufficient confidence in its current output.",
270
+ "bbox": [
271
+ 169,
272
+ 762,
273
+ 825,
274
+ 832
275
+ ],
276
+ "page_idx": 2
277
+ },
278
+ {
279
+ "type": "text",
280
+ "text": "3 Motivation",
281
+ "text_level": 1,
282
+ "bbox": [
283
+ 171,
284
+ 852,
285
+ 299,
286
+ 867
287
+ ],
288
+ "page_idx": 2
289
+ },
290
+ {
291
+ "type": "text",
292
+ "text": "In this work, we show that reasoning workloads executed by LRMs exhibit unique opportunities for latency reduction due to their inherent tolerance to approximation—setting them apart from",
293
+ "bbox": [
294
+ 169,
295
+ 883,
296
+ 823,
297
+ 912
298
+ ],
299
+ "page_idx": 2
300
+ },
301
+ {
302
+ "type": "page_number",
303
+ "text": "3",
304
+ "bbox": [
305
+ 493,
306
+ 935,
307
+ 503,
308
+ 946
309
+ ],
310
+ "page_idx": 2
311
+ },
312
+ {
313
+ "type": "text",
314
+ "text": "traditional generation tasks in LLMs. We illustrate these properties using a representative example from the AIME dataset, selected for its clarity and ease of exposition.",
315
+ "bbox": [
316
+ 169,
317
+ 90,
318
+ 823,
319
+ 119
320
+ ],
321
+ "page_idx": 3
322
+ },
323
+ {
324
+ "type": "text",
325
+ "text": "Intermediate steps are easier than end-to-end reasoning. A key observation in LRM behavior is that reasoning difficulty is not uniform across the steps in a long chain-of-thought (CoT). As shown in Fig. 1, while the overall task might be too challenging for a small model to solve end-to-end, only a few critical steps—such as problem analysis, decomposition through formulations or case analyses, and high-level planning—are critical to the overall reasoning progress. In contrast, many other steps are significantly easier.",
326
+ "bbox": [
327
+ 169,
328
+ 126,
329
+ 826,
330
+ 209
331
+ ],
332
+ "page_idx": 3
333
+ },
334
+ {
335
+ "type": "text",
336
+ "text": "This behavior is intentional by design: LRMs are often trained with reinforcement learning to generate CoTs that decompose complex problems into sequences of simpler, more tractable reasoning steps. These intermediate steps often include routine reasoning such as arithmetic calculations, case enumeration, or basic logical deductions—operators that are much easier to decode than synthesizing a full solution directly. This heterogeneity in step difficulty and importance creates an opportunity for lightweight models to handle a substantial portion of the reasoning process both efficiently and accurately.",
337
+ "bbox": [
338
+ 169,
339
+ 215,
340
+ 826,
341
+ 314
342
+ ],
343
+ "page_idx": 3
344
+ },
345
+ {
346
+ "type": "text",
347
+ "text": "Reasoning progress depends on insights, not exact tokens. Another key takeaway from our work is that the utility of a reasoning step lies in the semantic contribution it makes to the overall reasoning process, rather than the precise tokens it uses. Unlike tasks like translation in traditional LLM inference, where fidelity to exact combinations of tokens matters more, reasoning CoTs within LRM's thinking tokens care more about the information that advances the reasoning chain. As illustrated in Fig. 2, a spectrum of valid phrasings often exists for a given step: semantically equivalent or similar expressions can convey the same insight and lead to the same downstream reasoning trajectory. This semantic flexibility is a key enabler for approximation-tolerant inference.",
348
+ "bbox": [
349
+ 169,
350
+ 319,
351
+ 826,
352
+ 431
353
+ ],
354
+ "page_idx": 3
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "Occasional mistakes can be corrected via self-reflection. LRMs exhibit strong self-reflection capabilities, enabling them to recover from earlier reasoning errors. Even when an earlier step contains a factual or logical mistake, the model often revises its trajectory in subsequent steps, marked by tokens like \"Wait\" or \"Hmm\". Moreover, unlike LLM inference where all output tokens contribute to the final answer, in LRM inference, only the tokens generated after the thinking tokens determine the final outcome. Therefore, LRM inference can tolerate occasional mistakes during the reasoning phase, as the model can often identify and correct these mistakes during self-reflection. This inherent fault tolerance further underscores the viability and effectiveness of approximation-based acceleration.",
359
+ "bbox": [
360
+ 169,
361
+ 436,
362
+ 826,
363
+ 561
364
+ ],
365
+ "page_idx": 3
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "In summary, compared to traditional LLM inference, LRM inference is inherently more tolerant of approximations that do not require token-level equivalence as long as the overall reasoning trajectory is preserved. This property is not limited to a single, linear CoT; rather, it extends naturally to more general inference-time compute scaling paradigms such as tree-based search strategies and other structured reasoning approaches.",
370
+ "bbox": [
371
+ 169,
372
+ 566,
373
+ 826,
374
+ 638
375
+ ],
376
+ "page_idx": 3
377
+ },
378
+ {
379
+ "type": "text",
380
+ "text": "4 Method",
381
+ "text_level": 1,
382
+ "bbox": [
383
+ 171,
384
+ 655,
385
+ 272,
386
+ 671
387
+ ],
388
+ "page_idx": 3
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "4.1 Speculative Reasoning",
393
+ "text_level": 1,
394
+ "bbox": [
395
+ 171,
396
+ 685,
397
+ 372,
398
+ 700
399
+ ],
400
+ "page_idx": 3
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "Due to its reliance on autoregressive decoding, LRM inference incurs significantly higher latency than typical LLMs—often to the point of being prohibitively slow for interactive applications and degrading user experience [Fu et al., 2025]. Existing approaches for latency reduction include using a distilled version of the base model [Guo et al., 2025], limiting the number of thinking tokens via a predefined token budget, or disabling the reasoning process altogether by omitting the thinking tokens (<think> and </think>) during generation [qwe, 2025]. However, these approaches impose a harshly trade-off between accuracy for latency: they either limit the model's capacity to reason or apply a lower-quality model uniformly across all reasoning steps. In contrast, SpecReason takes a more fine-grained and adaptive approach. Instead of explicitly restricting output length, it selectively offloads only the easier reasoning steps to a lightweight model, preserving overall reasoning quality while substantially reducing inference latency.",
405
+ "bbox": [
406
+ 169,
407
+ 710,
408
+ 826,
409
+ 864
410
+ ],
411
+ "page_idx": 3
412
+ },
413
+ {
414
+ "type": "text",
415
+ "text": "The approximation-tolerant nature of LRM reasoning enables a new form of speculative execution: tentatively carrying out reasoning steps using a lightweight model, assessing their utility with a stronger base model, and selectively accepting them. SpecReason leverages this flexibility to reduce",
416
+ "bbox": [
417
+ 169,
418
+ 869,
419
+ 826,
420
+ 912
421
+ ],
422
+ "page_idx": 3
423
+ },
424
+ {
425
+ "type": "page_number",
426
+ "text": "4",
427
+ "bbox": [
428
+ 493,
429
+ 935,
430
+ 504,
431
+ 946
432
+ ],
433
+ "page_idx": 3
434
+ },
435
+ {
436
+ "type": "text",
437
+ "text": "decoding latency while preserving output quality. To achieve this goal, SpecReason offloads easier or less critical reasoning steps—defined as semantically self-contained units such as complete sentences or logical steps—to a smaller, faster speculator model. Each step is decoded in two stages: (1) the lightweight speculator proposes the next reasoning step based on the current context, and (2) the base model evaluates the proposed step for semantic utility. If the step is accepted, SpecReason proceeds to the next step; otherwise, SpecReason falls back to the base model to regenerate the step. While our implementation uses a simple static-threshold mechanism for verification, the framework supports richer, customizable decision strategies. We outline key design principles below.",
438
+ "bbox": [
439
+ 169,
440
+ 90,
441
+ 823,
442
+ 203
443
+ ],
444
+ "page_idx": 4
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "Navigating the Pareto frontier of the latency-accuracy tradeoff. SpecReason expands the Pareto frontier of the latency-accuracy tradeoff by exposing fine-grained control knobs to navigate through this space. The key knob SpecReason employs is the acceptance threshold: after each speculated reasoning step, the base model is prompted to generate a single-token utility score (e.g., an integer from 0 to 9) indicating the quality of the step. If the utility score is above a static acceptance threshold (e.g., score $\\geq 7$ ), the speculated reasoning step is accepted; otherwise, it is discarded and regenerated by the base model.",
449
+ "bbox": [
450
+ 169,
451
+ 208,
452
+ 823,
453
+ 306
454
+ ],
455
+ "page_idx": 4
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "Adjusting this threshold allows users to control the strictness of speculation (Fig. 5): a higher threshold requires speculated steps to be closer to token-level equivalence on the equivalence spectrum (Fig. 2), improving accuracy but reducing the acceptance rate and thereby increasing latency. Conversely, a lower threshold increases speculation efficiency at the cost of potential accuracy degradation.",
460
+ "bbox": [
461
+ 169,
462
+ 311,
463
+ 823,
464
+ 368
465
+ ],
466
+ "page_idx": 4
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "An additional knob involves forcing the first $n$ reasoning steps to be decoded by the base model. Since LRMs often use the initial steps to analyze the problem and formulate a high-level plan, assigning these initial steps to the base model can steer the overall reasoning trajectory toward higher quality. We show in Fig. 6 that this knob also allows SpecReason to manage latency-accuracy tradeoff, though with less impact than the acceptance threshold knob.",
471
+ "bbox": [
472
+ 169,
473
+ 375,
474
+ 823,
475
+ 444
476
+ ],
477
+ "page_idx": 4
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "While our current implementation uses a simple, discrete threshold-based scoring scheme—offering only a coarse-grained configuration space—it establishes a lower bound on verification quality. Future work can explore more sophisticated strategies, such as logprob-based confidence estimates or dynamic thresholds, to enable finer-grained tradeoffs without incurring additional runtime cost, and may further improve overall performance.",
482
+ "bbox": [
483
+ 169,
484
+ 450,
485
+ 823,
486
+ 518
487
+ ],
488
+ "page_idx": 4
489
+ },
490
+ {
491
+ "type": "text",
492
+ "text": "Efficient verification. Because each step requires verification by the base model, it's crucial to keep verification overhead low to avoid compounding latency. Instead of autoregressively decoding or reranking multiple candidate steps, SpecReason evaluates each speculated step in a single `prefill-only` pass of the base model. The verification prompt is templated to reuse most of the CoT prefix, so each verification requires prefilling only $\\sim 70$ new tokens. Since short-prefix forward passes are memory-bound, the overhead is comparable to decoding just 1-2 tokens, making verification highly efficient in practice.",
493
+ "bbox": [
494
+ 169,
495
+ 525,
496
+ 823,
497
+ 625
498
+ ],
499
+ "page_idx": 4
500
+ },
501
+ {
502
+ "type": "text",
503
+ "text": "Implementation details. Since the small model is lightweight, we colocate both the small and base models on the same GPU. The memory reserved for Key-Value caches [Kwon et al., 2023] is statically partitioned between the two models. They do not share any internal model states-only the token IDs of the generated reasoning steps are managed and shared by SpecReason. If a speculative step is rejected, the corresponding KV cache entries are discarded.",
504
+ "bbox": [
505
+ 169,
506
+ 628,
507
+ 823,
508
+ 699
509
+ ],
510
+ "page_idx": 4
511
+ },
512
+ {
513
+ "type": "text",
514
+ "text": "Inference is performed sequentially: the small and base models take turns, avoiding kernel-level interference. In future work, we plan to explore pipelining to overlap the small model's decoding with the base model's inference. While this may introduce mild resource contention, it could further reduce end-to-end latency.",
515
+ "bbox": [
516
+ 169,
517
+ 705,
518
+ 823,
519
+ 761
520
+ ],
521
+ "page_idx": 4
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "4.2 Hierarchical Speculation across Semantic Similarity and Token Equivalence",
526
+ "text_level": 1,
527
+ "bbox": [
528
+ 169,
529
+ 785,
530
+ 740,
531
+ 801
532
+ ],
533
+ "page_idx": 4
534
+ },
535
+ {
536
+ "type": "text",
537
+ "text": "At a high level, SpecReason's speculative reasoning resembles the philosophy behind traditional speculative decoding, but differs in two important ways. First, speculative decoding guarantees token-level equivalence between draft and verified outputs, making it a form of exact acceleration. In contrast, SpecReason targets semantic-level similarity, accepting steps that carry the same insight even if phrased differently, and exposes knobs to control the exactness of reasoning approximations. Second, speculative decoding is typically applied to output generation tasks (e.g., text continuation or translation), where the fidelity of each token matters. SpecReason, on the other hand, is designed",
538
+ "bbox": [
539
+ 169,
540
+ 814,
541
+ 823,
542
+ 912
543
+ ],
544
+ "page_idx": 4
545
+ },
546
+ {
547
+ "type": "page_number",
548
+ "text": "5",
549
+ "bbox": [
550
+ 493,
551
+ 935,
552
+ 503,
553
+ 946
554
+ ],
555
+ "page_idx": 4
556
+ },
557
+ {
558
+ "type": "text",
559
+ "text": "specifically for internal thinking tokens in reasoning tasks, where intermediate steps are approximate and interchangeable as long as they preserve the logical progression of thought.",
560
+ "bbox": [
561
+ 169,
562
+ 90,
563
+ 823,
564
+ 119
565
+ ],
566
+ "page_idx": 5
567
+ },
568
+ {
569
+ "type": "text",
570
+ "text": "Further, because SpecReason and speculative decoding operate at different levels (semantic-level similarity vs. token-level equivalence), these two approaches are complementary and can be combined into a unified, hierarchical system - SpecReason+Decode first applies step-level speculative reasoning to draft and verify reasoning steps. If a step is rejected and regenerated by the base model, standard token-level speculative decoding can be applied during the base model regeneration to further accelerate decoding.",
571
+ "bbox": [
572
+ 169,
573
+ 126,
574
+ 826,
575
+ 209
576
+ ],
577
+ "page_idx": 5
578
+ },
579
+ {
580
+ "type": "text",
581
+ "text": "5 Evaluation",
582
+ "text_level": 1,
583
+ "bbox": [
584
+ 171,
585
+ 250,
586
+ 297,
587
+ 266
588
+ ],
589
+ "page_idx": 5
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "The overview of our evaluation results include:",
594
+ "bbox": [
595
+ 171,
596
+ 294,
597
+ 483,
598
+ 308
599
+ ],
600
+ "page_idx": 5
601
+ },
602
+ {
603
+ "type": "list",
604
+ "sub_type": "text",
605
+ "list_items": [
606
+ "- Reducing end-to-end latency. Because many intermediate steps are easier than end-to-end reasoning, many (up to $80\\%$ ) of the speculated steps are accepted. SpecReason achieves a $1.4 - 3.0 \\times$ speedup over vanilla LRM inference. Additionally, when combined with speculative decoding, SpecReason further reduces latency by $8.8 - 58.0\\%$ over speculative decoding alone, highlighting the complementary nature of these optimizations.",
607
+ "- Improving token-budget-aware accuracy. Beyond latency reduction, SpecReason also improves accuracy over the base model by $0.4 - 9.0\\%$ under the same token budget. We empirically find that small, lightweight models typically have shorter output sequence lengths – meaning, they need fewer thinking tokens before deriving an answer. Thus, by accepting many small model's speculated reasoning steps, SpecReason reduces the token consumption compared to the base model's vanilla inference. When the token budget is low – a common setup to curb inference cost and latency – SpecReason helps improve accuracy as the base model would need more tokens to get to an answer (Fig. 4)."
608
+ ],
609
+ "bbox": [
610
+ 171,
611
+ 318,
612
+ 823,
613
+ 501
614
+ ],
615
+ "page_idx": 5
616
+ },
617
+ {
618
+ "type": "text",
619
+ "text": "5.1 Setup",
620
+ "text_level": 1,
621
+ "bbox": [
622
+ 171,
623
+ 539,
624
+ 253,
625
+ 554
626
+ ],
627
+ "page_idx": 5
628
+ },
629
+ {
630
+ "type": "text",
631
+ "text": "Models. In our main results, we use two base models: QwQ-32B [qwq, 2025] and Skywork-OR1-Preview-32B [sky, 2025]. We also use two different small models for speculation: DeepSeek-R1-1.5B [Guo et al., 2025] and Zyphra's ZR1-1.5B [zyp, 2025] - both of which are based on Qwen-2.5 [Yang et al., 2024] and embed the capability of reasoning with long CoTs - and evaluate all four different model combinations. We evaluate an additional base model with a different size and architecture, R1-70B [Guo et al., 2025], a distilled version of DeepSeek-R1 onto Llama3.3-70B [Grattafiori et al., 2024], in §A.1.",
632
+ "bbox": [
633
+ 169,
634
+ 573,
635
+ 826,
636
+ 671
637
+ ],
638
+ "page_idx": 5
639
+ },
640
+ {
641
+ "type": "text",
642
+ "text": "Datasets. We evaluate SpecReason on three diverse reasoning benchmarks: AIME [aim, 2025] for high-school competition-level mathematical problems, MATH500 [Hendrycks et al., 2021] for high-school competition-level mathematical problems sampled from AMC 10, AMC 12, and AIME, and GPQA Diamond [Rein et al., 2024] for graduate-level questions in general domains like biology, physics, and chemistry. The accuracy metric we evaluate on is $\\text{pass} @ 1$ . Similar to prior work [Guo et al., 2025], we set $k = 16$ when calculating $\\text{pass} @ 1$ - i.e., we generate 16 responses with temperature $= 0.6$ for every query and calculate the average accuracy - and set the token budget to be 8192 tokens to ensure an apples-to-apples comparison between baselines.",
643
+ "bbox": [
644
+ 169,
645
+ 676,
646
+ 823,
647
+ 787
648
+ ],
649
+ "page_idx": 5
650
+ },
651
+ {
652
+ "type": "text",
653
+ "text": "Baselines. We run vanilla inference using the small and base models as the latency and accuracy baseline, respectively. Aside from SpecReason, we also run speculative decoding (\"SpecDecode\") with the smaller model as the draft model, speculating five tokens at a time. To demonstrate SpecReason's compatibility with speculative decoding, we also run a \"SpecReason+Decode\" baseline that employs the hierarchical speculation described in §4.2.",
654
+ "bbox": [
655
+ 169,
656
+ 794,
657
+ 823,
658
+ 863
659
+ ],
660
+ "page_idx": 5
661
+ },
662
+ {
663
+ "type": "text",
664
+ "text": "Hardware. We run our evaluations on two NVIDIA A6000-48GB GPUs. We use vLLM Kwon et al. [2023] 0.8.2 as the underlying inference engine and enable prefix caching. Both models are served with a tensor parallelism degree of two.",
665
+ "bbox": [
666
+ 169,
667
+ 869,
668
+ 826,
669
+ 912
670
+ ],
671
+ "page_idx": 5
672
+ },
673
+ {
674
+ "type": "page_number",
675
+ "text": "6",
676
+ "bbox": [
677
+ 493,
678
+ 936,
679
+ 504,
680
+ 946
681
+ ],
682
+ "page_idx": 5
683
+ },
684
+ {
685
+ "type": "image",
686
+ "img_path": "images/42ce2ba3b0ccdf8bdad6a6cfefe9d5472776dce81650da0d29af9f4a86ad8e5e.jpg",
687
+ "image_caption": [
688
+ "(a) QwQ-32B + R1-1.5B"
689
+ ],
690
+ "image_footnote": [],
691
+ "bbox": [
692
+ 174,
693
+ 92,
694
+ 816,
695
+ 200
696
+ ],
697
+ "page_idx": 6
698
+ },
699
+ {
700
+ "type": "image",
701
+ "img_path": "images/5ea80fa982e4974a46560c29d455b93016a34533133d72786c72477dd0623097.jpg",
702
+ "image_caption": [
703
+ "(b) QwQ-32B + Zyphra-1.5B"
704
+ ],
705
+ "image_footnote": [],
706
+ "bbox": [
707
+ 173,
708
+ 218,
709
+ 815,
710
+ 315
711
+ ],
712
+ "page_idx": 6
713
+ },
714
+ {
715
+ "type": "image",
716
+ "img_path": "images/4a97bf8307eff9b736a6ffa0a070722b00a5cfe583e3b3882d71b7c67a78ede0.jpg",
717
+ "image_caption": [
718
+ "(c) Skywork-Preview-32B $^+$ R1-1.5B"
719
+ ],
720
+ "image_footnote": [],
721
+ "bbox": [
722
+ 173,
723
+ 333,
724
+ 815,
725
+ 429
726
+ ],
727
+ "page_idx": 6
728
+ },
729
+ {
730
+ "type": "image",
731
+ "img_path": "images/d04850115bce9d3521fc151f7caa3829edcaa4826b3c059f93200a7094ed1cc9.jpg",
732
+ "image_caption": [
733
+ "(d) Skywork-Preview-32B + Zyphra-1.5B",
734
+ "Figure 3: Comparison of the accuracy and latency of different schemes on different model combinations. SpecReason significantly reduces latency while improving accuracy over vanilla inference. When combined with speculative decoding, SpecReason outperforms speculative decoding in both latency and accuracy on all datasets and model combinations."
735
+ ],
736
+ "image_footnote": [],
737
+ "bbox": [
738
+ 173,
739
+ 446,
740
+ 815,
741
+ 545
742
+ ],
743
+ "page_idx": 6
744
+ },
745
+ {
746
+ "type": "text",
747
+ "text": "5.2 Main Results",
748
+ "text_level": 1,
749
+ "bbox": [
750
+ 171,
751
+ 638,
752
+ 305,
753
+ 652
754
+ ],
755
+ "page_idx": 6
756
+ },
757
+ {
758
+ "type": "text",
759
+ "text": "We compare SpecReason against baseline methods in Fig. 3. Across the four model combinations, SpecReason achieves a $1.5 \\times -2.5 \\times$ , $1.6 \\times -3.0 \\times$ , $1.4 \\times -2.5 \\times$ , $1.7 \\times -2.4 \\times$ reduction in latency, respectively, compared to vanilla inference with the base model.",
760
+ "bbox": [
761
+ 169,
762
+ 669,
763
+ 826,
764
+ 712
765
+ ],
766
+ "page_idx": 6
767
+ },
768
+ {
769
+ "type": "text",
770
+ "text": "Accuracy improvement. Alongside these efficiency gains, SpecReason also yields modest accuracy improvements of $1.3\\% - 3.6\\%$ , $4.0\\% - 9.0\\%$ , $0.4\\% - 1.7\\%$ , and $1.4\\% - 5.0\\%$ compared to the base model. The key reason behind this accuracy improvement is the reduction in token consumption required for reasoning. In Fig. 4, we focus on the model combination with the highest overall accuracy improvement, QwQ-32B + Zyphra-1.5B, and compare the average number of thinking tokens needed to derive an answer between the base model, the small model, and SpecReason. As seen in Fig. 4a, the small model is generally less verbose than the base model, and because SpecReason adopts many speculated steps from the small model, its token consumption is also reduced by $1.2 \\times -2.0 \\times$ . We also focus on the AIME dataset and vary the token budget to study its effect on the difference in accuracy between SpecReason and the base model in Fig. 4b. The effect of token reduction on accuracy is the most significant for tighter output token budgets (16.2% at 4096 tokens) but shrinks as the base model is allowed to generate more thinking tokens (4.7% at 8192 tokens). We also attribute these accuracy gains to SpecReason's explicit judgment and scoring mechanism at each reasoning step, which augments the model's internal self-reflection with more structured assessment.",
771
+ "bbox": [
772
+ 169,
773
+ 717,
774
+ 826,
775
+ 912
776
+ ],
777
+ "page_idx": 6
778
+ },
779
+ {
780
+ "type": "page_number",
781
+ "text": "7",
782
+ "bbox": [
783
+ 493,
784
+ 935,
785
+ 504,
786
+ 946
787
+ ],
788
+ "page_idx": 6
789
+ },
790
+ {
791
+ "type": "image",
792
+ "img_path": "images/e4972be21ed40e3644fca3eea2b8c6276d3864bdde5164f0d33d6a1a456a2f52.jpg",
793
+ "image_caption": [],
794
+ "image_footnote": [
795
+ "(a) Output length comparison. SpecReason reduces the token consumption needed to answer queries by adopting speculated steps from small models that are less verbose."
796
+ ],
797
+ "bbox": [
798
+ 176,
799
+ 94,
800
+ 488,
801
+ 239
802
+ ],
803
+ "page_idx": 7
804
+ },
805
+ {
806
+ "type": "image",
807
+ "img_path": "images/75d22779f905d0dee6f72f7f10e4e971383aff1242ad17644db3915a81a9543e.jpg",
808
+ "image_caption": [],
809
+ "image_footnote": [
810
+ "(b) [AIME] Accuracy gap under different token budgets."
811
+ ],
812
+ "bbox": [
813
+ 509,
814
+ 94,
815
+ 821,
816
+ 238
817
+ ],
818
+ "page_idx": 7
819
+ },
820
+ {
821
+ "type": "image",
822
+ "img_path": "images/e73d3d5a62360175e128de98cef9f1262dba43316d7a9da9aa118db35ee517ed.jpg",
823
+ "image_caption": [
824
+ "Figure 4: [QwQ-32B + Zyphra-1.5B] Intuition behind SpecReason's accuracy improvement. See Fig. 9 in §A for the full set of results.",
825
+ "Figure 5: [QwQ-32B + R1-1.5B] SpecReason allows trading off latency for accuracy via adjusting the acceptance threshold (from left to right, the thresholds are: 3, 5, 7, and 9 out of 9)."
826
+ ],
827
+ "image_footnote": [],
828
+ "bbox": [
829
+ 178,
830
+ 356,
831
+ 818,
832
+ 465
833
+ ],
834
+ "page_idx": 7
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "When compared with speculative decoding, SpecReason lies on the Pareto frontier of the accuracy-latency tradeoff. More importantly, combining SpecReason with speculative decoding (SpecReason+Decode) results in further latency reductions of $19.4\\% - 44.2\\%$ , $30.8\\% - 58.0\\%$ , $8.8\\% - 52.2\\%$ , and $25.1\\% - 51.8\\%$ over speculative decoding alone. The most significant performance gains for SpecReason when the base model is QwQ-32B occur on the MATH dataset, where both models achieve relatively high accuracies and the capability gap between the small and base models is the narrowest. This makes intermediate steps easier for the small model to speculate correctly, increasing the acceptance rate of speculated steps and thereby lowering end-to-end latency. In comparison, Skywork-Preview-32B is slightly inferior at instruction following, so SpecReason has to adopt a higher threshold to avoid an accuracy loss, reducing SpecReason's latency wins.",
839
+ "bbox": [
840
+ 169,
841
+ 518,
842
+ 826,
843
+ 657
844
+ ],
845
+ "page_idx": 7
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "Finally, when comparing SpecReason+Decode with SpecReason, SpecReason+Decode reduces latency by $1.7 \\times -1.9 \\times$ , $1.7 \\times -1.8 \\times$ , $1.6 \\times -2.2 \\times$ , and $1.6 \\times -2.1 \\times$ , demonstrating the difference in ease of speculation across varying tasks. On these three datasets, the ratio of steps carried out by small models in SpecReason is $38.1\\% - 80.0\\%$ , $36.5\\% - 71.3\\%$ , $39.3\\% - 70.2\\%$ , and $41.4\\% - 66.6\\%$ , respectively.",
850
+ "bbox": [
851
+ 169,
852
+ 662,
853
+ 826,
854
+ 734
855
+ ],
856
+ "page_idx": 7
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "5.3 Controlling the Accuracy-Latency Tradeoff",
861
+ "text_level": 1,
862
+ "bbox": [
863
+ 169,
864
+ 753,
865
+ 516,
866
+ 768
867
+ ],
868
+ "page_idx": 7
869
+ },
870
+ {
871
+ "type": "text",
872
+ "text": "In Fig. 5, we illustrate how SpecReason enables flexible control over the accuracy-latency tradeoff, using a representative, randomly selected subdataset from the full datasets in §5.2 on QwQ-32B + R1-1.5B for ease of evaluation. During the base model's evaluation of each reasoning step, we vary the acceptance threshold for the utility score between 3, 5, 7, and 9, and report the resulting accuracy and latency.",
873
+ "bbox": [
874
+ 169,
875
+ 779,
876
+ 826,
877
+ 851
878
+ ],
879
+ "page_idx": 7
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "On the MATH subdataset, increasing the acceptance threshold from 3 to 7 results in fewer speculative steps from the small model being accepted. This leads to a latency increase from 35.7s to 69.2s, while accuracy improves from $59.4\\%$ to $63.7\\%$ , due to tighter control over the approximation level of intermediate reasoning steps. Notably, the gap between SpecReason+Decode and SpecRea",
884
+ "bbox": [
885
+ 169,
886
+ 854,
887
+ 826,
888
+ 912
889
+ ],
890
+ "page_idx": 7
891
+ },
892
+ {
893
+ "type": "page_number",
894
+ "text": "8",
895
+ "bbox": [
896
+ 493,
897
+ 935,
898
+ 503,
899
+ 946
900
+ ],
901
+ "page_idx": 7
902
+ },
903
+ {
904
+ "type": "image",
905
+ "img_path": "images/42ce6370e2c1264bccef1f1e65adfdd167a9df90c7b38a8c7a8a79057169e152.jpg",
906
+ "image_caption": [
907
+ "Figure 6: Effect of the alternative knob: forcing the first $n$ steps for base model decoding."
908
+ ],
909
+ "image_footnote": [],
910
+ "bbox": [
911
+ 176,
912
+ 95,
913
+ 444,
914
+ 219
915
+ ],
916
+ "page_idx": 8
917
+ },
918
+ {
919
+ "type": "image",
920
+ "img_path": "images/2dbe9d816d5fb6d47276d85c41db508c28497fe59c1be9e557f476f2e50def44.jpg",
921
+ "image_caption": [
922
+ "Figure 7: The utility scores in SpecReason closely reflect the quality score judgements from a process reward model. $x$ on the x-axis denotes PRM scores in the range $[x, x + 0.1)$ ."
923
+ ],
924
+ "image_footnote": [],
925
+ "bbox": [
926
+ 465,
927
+ 90,
928
+ 821,
929
+ 195
930
+ ],
931
+ "page_idx": 8
932
+ },
933
+ {
934
+ "type": "text",
935
+ "text": "son widens from 8.1s to 28.8s, since more reasoning steps are delegated to the base model, and SpecReason+Decode reduces only the base model's decoding time compared to SpecReason.",
936
+ "bbox": [
937
+ 169,
938
+ 297,
939
+ 823,
940
+ 325
941
+ ],
942
+ "page_idx": 8
943
+ },
944
+ {
945
+ "type": "text",
946
+ "text": "A similar trend is observed on the AIME and GPQA subdatasets: as the acceptance threshold increases from 3 to 7, latency grows from 109.4s to 261.9s and from 72.7s to 223.0s, and accuracy improves from $22.3\\%$ to $39.3\\%$ and from $33.1\\%$ to $50.7\\%$ . However, the accuracy degrades less gracefully as the threshold is relaxed compared to the MATH subdataset. This is because the small model exhibits a larger performance gap relative to the base model on AIME and GPQA, making aggressive acceptance of its speculative steps more costly in terms of accuracy.",
947
+ "bbox": [
948
+ 169,
949
+ 330,
950
+ 823,
951
+ 415
952
+ ],
953
+ "page_idx": 8
954
+ },
955
+ {
956
+ "type": "text",
957
+ "text": "In Fig. 6, we also study the effect of the alternative knob, forcing the first $n$ reasoning steps to be decoded by the base model, on the accuracy-latency tradeoff. As we change $n$ from 0 to 10, 20, 30, and 40, SpecReason's accuracy increases from $33.2\\%$ to $37.3\\%$ while the latency increases from 270.4s to 292.6s, showcasing an alternative approach to improve accuracy with a slight increase in latency.",
958
+ "bbox": [
959
+ 169,
960
+ 421,
961
+ 826,
962
+ 491
963
+ ],
964
+ "page_idx": 8
965
+ },
966
+ {
967
+ "type": "text",
968
+ "text": "5.4 Base Model's Judgement Capability",
969
+ "text_level": 1,
970
+ "bbox": [
971
+ 171,
972
+ 517,
973
+ 464,
974
+ 534
975
+ ],
976
+ "page_idx": 8
977
+ },
978
+ {
979
+ "type": "text",
980
+ "text": "The base model's ability to assess the quality of intermediate reasoning steps is a crucial cornerstone of SpecReason's performance. In this experiment, we compare the scores generated by a process reward model (PRM) – which assigns a reward score to each step within the solution to a math problem – with those given by the QwQ-32B base model on the AIME dataset. Specifically, we use Math-Shepherd [Wang et al., 2023], a PRM trained via reinforcement learning from the Mistral-7B base model on math problems, to score each speculated step produced by the R1-1.5B small model.",
981
+ "bbox": [
982
+ 169,
983
+ 546,
984
+ 823,
985
+ 631
986
+ ],
987
+ "page_idx": 8
988
+ },
989
+ {
990
+ "type": "text",
991
+ "text": "In Fig. 7, we bin the reward scores (a float from 0 to 1) into ten bins. Within each bin, we calculate the mean utility score given by the base model in SpecReason. This analysis demonstrates a strong correlation between the base model's and the PRM's assessments, particularly for lower-quality reasoning steps, where both models assign low scores. The results suggest that the base model can effectively approximate the PRM's judgments, making it a viable option for evaluating reasoning step quality in SpecReason.",
992
+ "bbox": [
993
+ 169,
994
+ 635,
995
+ 823,
996
+ 720
997
+ ],
998
+ "page_idx": 8
999
+ },
1000
+ {
1001
+ "type": "text",
1002
+ "text": "6 Conclusion",
1003
+ "text_level": 1,
1004
+ "bbox": [
1005
+ 171,
1006
+ 750,
1007
+ 302,
1008
+ 766
1009
+ ],
1010
+ "page_idx": 8
1011
+ },
1012
+ {
1013
+ "type": "text",
1014
+ "text": "In this work, we introduce SpecReason, a novel approach that accelerates LRM inference by leveraging speculative reasoning. By offloading simpler intermediate reasoning steps to a smaller, lightweight model and reserving the base model for assessment, SpecReason significantly reduces inference latency while maintaining or even improving accuracy. Our results demonstrate that SpecReason achieves a $1.4 - 3.0 \\times$ speedup over vanilla LRM inference, with accuracy improvements ranging from $0.4 - 9.0\\%$ . Additionally, when combined with speculative decoding, SpecReason further reduces latency by $8.8 - 58.0\\%$ , highlighting the complementary nature of these optimizations. We believe this work opens up new angles for efficient LRM inference acceleration, making it especially valuable for scenarios that demand both high accuracy and low latency.",
1015
+ "bbox": [
1016
+ 169,
1017
+ 787,
1018
+ 826,
1019
+ 912
1020
+ ],
1021
+ "page_idx": 8
1022
+ },
1023
+ {
1024
+ "type": "page_number",
1025
+ "text": "9",
1026
+ "bbox": [
1027
+ 493,
1028
+ 935,
1029
+ 504,
1030
+ 946
1031
+ ],
1032
+ "page_idx": 8
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "text": "Acknowledgments and Disclosure of Funding",
1037
+ "text_level": 1,
1038
+ "bbox": [
1039
+ 171,
1040
+ 89,
1041
+ 555,
1042
+ 108
1043
+ ],
1044
+ "page_idx": 9
1045
+ },
1046
+ {
1047
+ "type": "text",
1048
+ "text": "We thank Princeton's Systems for Artificial Intelligence Lab (SAIL) and Princeton Language and Intelligence (PLI) for providing the hardware resources for running experiments. This work was supported by NSF CNS grants 2147909, 2151630, 2140552, 2153449, and 2152313.",
1049
+ "bbox": [
1050
+ 171,
1051
+ 119,
1052
+ 826,
1053
+ 162
1054
+ ],
1055
+ "page_idx": 9
1056
+ },
1057
+ {
1058
+ "type": "text",
1059
+ "text": "References",
1060
+ "text_level": 1,
1061
+ "bbox": [
1062
+ 173,
1063
+ 179,
1064
+ 267,
1065
+ 196
1066
+ ],
1067
+ "page_idx": 9
1068
+ },
1069
+ {
1070
+ "type": "list",
1071
+ "sub_type": "ref_text",
1072
+ "list_items": [
1073
+ "Aime 2024 dataset card. https://huggingface.co/datasets/HuggingFaceH4/aime_2024, 2025.",
1074
+ "Openai o3-mini system card. https://cdn.openai.com/o3-mini-system-card-feb10.pdf, 2025.",
1075
+ "Qwen3: Think deeper, act faster. https://qwenlm.github.io/blog/qwen3/, 2025.",
1076
+ "Qwq-32b: Embracing the power of reinforcement learning. https://qwenlm.github.io/blog/qwq-32b/, 2025.",
1077
+ "Skywork-or1 (open reasoner 1). https://github.com/SkyworkAI/Skywork-OR1, 2025.",
1078
+ "Introducing zr1-1.5b, a small but powerful reasoning model for math and code). https://www.zyphra.com/post/introducing-zr1-1-5b-a-small-but-powerful-math-code-reasoning-model, 2025.",
1079
+ "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv preprint arXiv:2407.21787, 2024.",
1080
+ "F Warren Burton. Speculative computation, parallelism, and functional programming. IEEE Transactions on Computers, 100(12):1190-1193, 1985.",
1081
+ "Tianle Cai, Yuhong Li, Zhengyang Geng, Hongwu Peng, Jason D Lee, Deming Chen, and Tri Dao. Medusa: Simple llm inference acceleration framework with multiple decoding heads. arXiv preprint arXiv:2401.10774, 2024.",
1082
+ "Zhuoming Chen, Avner May, Ruslan Svirschevski, Yu-Hsun Huang, Max Ryabinin, Zhihao Jia, and Beidi Chen. Sequoia: Scalable and robust speculative decoding. Advances in Neural Information Processing Systems, 37: 129531-129563, 2024.",
1083
+ "Yichao Fu, Peter Bailis, Ion Stoica, and Hao Zhang. Break the sequential dependency of llm inference using lookahead decoding. arXiv preprint arXiv:2402.02057, 2024a.",
1084
+ "Yichao Fu, Junda Chen, Siqi Zhu, Zheyu Fu, Zhongdongming Dai, Aurick Qiao, and Hao Zhang. Efficiently serving llm reasoning programs with certainindex. arXiv preprint arXiv:2412.20993, 2024b.",
1085
+ "Yichao Fu, Junda Chen, Yonghao Zhuang, Zheyu Fu, Ion Stoica, and Hao Zhang. Reasoning without self-doubt: More efficient chain-of-thought through certainty probing. In ICLR 2025 Workshop on Foundation Models in the Wild, 2025.",
1086
+ "Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.",
1087
+ "Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025.",
1088
+ "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.",
1089
+ "Zhenyu He, Zexuan Zhong, Tianle Cai, Jason D Lee, and Di He. Rest: Retrieval-based speculative decoding. arXiv preprint arXiv:2311.08252, 2023.",
1090
+ "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874, 2021.",
1091
+ "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024."
1092
+ ],
1093
+ "bbox": [
1094
+ 173,
1095
+ 202,
1096
+ 825,
1097
+ 910
1098
+ ],
1099
+ "page_idx": 9
1100
+ },
1101
+ {
1102
+ "type": "page_number",
1103
+ "text": "10",
1104
+ "bbox": [
1105
+ 490,
1106
+ 935,
1107
+ 508,
1108
+ 946
1109
+ ],
1110
+ "page_idx": 9
1111
+ },
1112
+ {
1113
+ "type": "list",
1114
+ "sub_type": "ref_text",
1115
+ "list_items": [
1116
+ "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626, 2023.",
1117
+ "Yaniv Leviathan, Matan Kalman, and Yossi Matias. Fast inference from transformers via speculative decoding. In International Conference on Machine Learning, pages 19274-19286. PMLR, 2023.",
1118
+ "Yuhui Li, Fangyun Wei, Chao Zhang, and Hongyang Zhang. Eagle-3: Scaling up inference acceleration of large language models via training-time test. arXiv preprint arXiv:2503.01840, 2025.",
1119
+ "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In The Twelfth International Conference on Learning Representations, 2023.",
1120
+ "Xianzhen Luo, Yixuan Wang, Qingfu Zhu, Zhiming Zhang, Xuanyu Zhang, Qing Yang, Dongliang Xu, and Wanxiang Che. Turning trash into treasure: Accelerating inference of large language models with token recycling, 2024. URL https://arxiv.org/abs/2408.08696.",
1121
+ "Xupeng Miao, Gabriele Oliaro, Zhihao Zhang, Xinhao Cheng, Zeyu Wang, Zhengxin Zhang, Rae Ying Yee Wong, Alan Zhu, Lijie Yang, Xiaoxiang Shi, et al. Specinfer: Accelerating large language model serving with tree-based speculative inference and verification. In Proceedings of the 29th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, Volume 3, pages 932-949, 2024.",
1122
+ "Gabriele Oliaro, Zhihao Jia, Daniel Campos, and Aurick Qiao. Suffixdecoding: A model-free approach to speeding up large language model inference, 2024. URL https://arxiv.org/abs/2411.04975.",
1123
+ "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller llms stronger problem-solvers. arXiv preprint arXiv:2408.06195, 2024.",
1124
+ "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling, 2024.",
1125
+ "Mingyang Song, Zhaochen Su, Xiaoye Qu, Jiawei Zhou, and Yu Cheng. Prmbench: A fine-grained and challenging benchmark for process-level reward models. arXiv preprint arXiv:2501.03124, 2025.",
1126
+ "Mitchell Stern, Noam Shazeer, and Jakob Uszkoreit. Blockwise parallel decoding for deep autoregressive models. Advances in Neural Information Processing Systems, 31, 2018.",
1127
+ "NovaSky Team. Think less, achieve more: Cut reasoning costs by $50\\%$ without sacrificing accuracy. https://novasky-ai.github.io/posts/reduce-overthinking, 2025. Accessed: 2025-01-23.",
1128
+ "Peiyi Wang, Lei Li, Zhihong Shao, RX Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce llms step-by-step without human annotations. arXiv preprint arXiv:2312.08935, 2023.",
1129
+ "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.",
1130
+ "Minghao Yan, Saurabh Agarwal, and Shivaram Venkataraman. Decoding speculative decoding. arXiv preprint arXiv:2402.01528, 2024.",
1131
+ "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.",
1132
+ "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Tom Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. Advances in neural information processing systems, 36:11809-11822, 2023.",
1133
+ "Yao Zhao, Zhitian Xie, Chen Liang, Chenyi Zhuang, and Jinjie Gu. Lookahead: An inference acceleration framework for large language model with lossless generation accuracy. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, KDD '24, page 6344-6355. Association for Computing Machinery, 2024. ISBN 9798400704901. doi: 10.1145/3637528.3671614."
1134
+ ],
1135
+ "bbox": [
1136
+ 171,
1137
+ 90,
1138
+ 826,
1139
+ 869
1140
+ ],
1141
+ "page_idx": 10
1142
+ },
1143
+ {
1144
+ "type": "page_number",
1145
+ "text": "11",
1146
+ "bbox": [
1147
+ 490,
1148
+ 935,
1149
+ 506,
1150
+ 946
1151
+ ],
1152
+ "page_idx": 10
1153
+ },
1154
+ {
1155
+ "type": "text",
1156
+ "text": "A Appendix",
1157
+ "text_level": 1,
1158
+ "bbox": [
1159
+ 171,
1160
+ 89,
1161
+ 292,
1162
+ 108
1163
+ ],
1164
+ "page_idx": 11
1165
+ },
1166
+ {
1167
+ "type": "text",
1168
+ "text": "A.1 Base Models of Varying Sizes and Architectures",
1169
+ "text_level": 1,
1170
+ "bbox": [
1171
+ 171,
1172
+ 119,
1173
+ 550,
1174
+ 136
1175
+ ],
1176
+ "page_idx": 11
1177
+ },
1178
+ {
1179
+ "type": "image",
1180
+ "img_path": "images/290379407ac5e9ab4ceb682a6e2000822a825449423b37126bffb48036a9acd5.jpg",
1181
+ "image_caption": [
1182
+ "Figure 8: SpecReason's results on the model combination (R1-70B, R1-1.5B)."
1183
+ ],
1184
+ "image_footnote": [],
1185
+ "bbox": [
1186
+ 318,
1187
+ 157,
1188
+ 684,
1189
+ 313
1190
+ ],
1191
+ "page_idx": 11
1192
+ },
1193
+ {
1194
+ "type": "text",
1195
+ "text": "To demonstrate the generality of SpecReason, we replace the QwQ-32B base model with DeepSeek's R1-70B and evaluate on the same representative subdatasets as in §5.3. Given the size of the R1-70B model, we deploy it across four A100-80GB GPUs using a tensor parallelism degree of 4.",
1196
+ "bbox": [
1197
+ 169,
1198
+ 353,
1199
+ 823,
1200
+ 397
1201
+ ],
1202
+ "page_idx": 11
1203
+ },
1204
+ {
1205
+ "type": "text",
1206
+ "text": "On the AIME subdataset, SpecReason achieves a $1.5 \\times$ latency reduction compared to vanilla R1-70B inference. This speedup is smaller than the gains observed with the QwQ-32B model in our main results $(1.9 \\times)$ due to two key factors. First, the R1-70B model benefits from both stronger hardware and greater parallelism (4-way TP on A100s), resulting in a $1.5 \\times$ lower time-per-token (TPT) compared to QwQ-32B (2-way TP on A6000s). In contrast, the smaller model R1-1.5B sees only a modest $1.1 \\times$ TPT improvement on stronger hardware, which narrows the performance gap between base and small models and thus diminishes latency savings. Second, QwQ-32B is empirically a stronger model – outperforming R1-70B across many reasoning benchmarks qwq [2025] – and this performance gap impacts their respective abilities to assess intermediate steps. To maintain accuracy, we adopt a stricter acceptance threshold when using R1-70B as the base model, which reduces the fraction of steps offloaded to the small model (23.2% compared to 40.8% in the main results).",
1207
+ "bbox": [
1208
+ 169,
1209
+ 401,
1210
+ 826,
1211
+ 555
1212
+ ],
1213
+ "page_idx": 11
1214
+ },
1215
+ {
1216
+ "type": "text",
1217
+ "text": "A.2 Intuition behind Accuracy Improvement",
1218
+ "text_level": 1,
1219
+ "bbox": [
1220
+ 171,
1221
+ 570,
1222
+ 501,
1223
+ 585
1224
+ ],
1225
+ "page_idx": 11
1226
+ },
1227
+ {
1228
+ "type": "image",
1229
+ "img_path": "images/93f3bf2d7d6f800b3a006bdcd5211eed5d6eb2e0a359233d0a356f0aae3f0c98.jpg",
1230
+ "image_caption": [
1231
+ "Figure 9: Intuition behind SpecReason's accuracy improvement on all datasets and model combinations."
1232
+ ],
1233
+ "image_footnote": [],
1234
+ "bbox": [
1235
+ 178,
1236
+ 604,
1237
+ 821,
1238
+ 728
1239
+ ],
1240
+ "page_idx": 11
1241
+ },
1242
+ {
1243
+ "type": "text",
1244
+ "text": "In Fig. 9, we evaluate the average thinking token count of SpecReason and two vanilla inference baselines on a wide range of datasets and model combinations. We observe that the small model is generally less verbose than the base model, and because SpecReason adopts many speculated steps from the small model, its token consumption is reduced by $1.0 - 1.3 \\times$ , $1.2 - 2.0 \\times$ , $1.0 - 1.8 \\times$ , and $1.1 - 2.3 \\times$ on the four model combinations, respectively.",
1245
+ "bbox": [
1246
+ 169,
1247
+ 779,
1248
+ 825,
1249
+ 849
1250
+ ],
1251
+ "page_idx": 11
1252
+ },
1253
+ {
1254
+ "type": "page_number",
1255
+ "text": "12",
1256
+ "bbox": [
1257
+ 490,
1258
+ 935,
1259
+ 509,
1260
+ 946
1261
+ ],
1262
+ "page_idx": 11
1263
+ }
1264
+ ]