Chelsea707 commited on
Commit
6e466a1
·
verified ·
1 Parent(s): f54485d

MinerU Batch c640cf33-b3f3-4963-a1ce-f2b8601d96e1 (Part 5/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_content_list.json +1518 -0
  3. data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_model.json +2384 -0
  4. data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_origin.pdf +3 -0
  5. data/2025/2504_12xxx/2504.12285/full.md +298 -0
  6. data/2025/2504_12xxx/2504.12285/images/571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg +3 -0
  7. data/2025/2504_12xxx/2504.12285/images/7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg +3 -0
  8. data/2025/2504_12xxx/2504.12285/images/c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg +3 -0
  9. data/2025/2504_12xxx/2504.12285/images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg +3 -0
  10. data/2025/2504_12xxx/2504.12285/images/ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg +3 -0
  11. data/2025/2504_12xxx/2504.12285/layout.json +0 -0
  12. data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_content_list.json +0 -0
  13. data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_model.json +0 -0
  14. data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_origin.pdf +3 -0
  15. data/2025/2504_12xxx/2504.12369/full.md +542 -0
  16. data/2025/2504_12xxx/2504.12369/images/051a668f07afe27adca49a42fba69f683663d42d43addef7ff5276c78d55d7e8.jpg +3 -0
  17. data/2025/2504_12xxx/2504.12369/images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg +3 -0
  18. data/2025/2504_12xxx/2504.12369/images/128fabee19e5abaad9587da0de6cd970dc5cc8944b3b1196aad6e7166dc04fe7.jpg +3 -0
  19. data/2025/2504_12xxx/2504.12369/images/137374daba42cc29ee3827d4a155e71a28fcefdc5271bea03e2c5223a0b3ef72.jpg +3 -0
  20. data/2025/2504_12xxx/2504.12369/images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg +3 -0
  21. data/2025/2504_12xxx/2504.12369/images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg +3 -0
  22. data/2025/2504_12xxx/2504.12369/images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg +3 -0
  23. data/2025/2504_12xxx/2504.12369/images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg +3 -0
  24. data/2025/2504_12xxx/2504.12369/images/30d17f42dbaa4ca8d8b12815ea604146efbf347f4ae14367a292fdb24ea2af4b.jpg +3 -0
  25. data/2025/2504_12xxx/2504.12369/images/311186cc1d836831fefdca576808fd26c822e109d2ddfda303da4ad7c48f137b.jpg +3 -0
  26. data/2025/2504_12xxx/2504.12369/images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg +3 -0
  27. data/2025/2504_12xxx/2504.12369/images/3549a7c6280ea8f465ef040bf31d25e38f052420b7d8d952435c0d665c12cb43.jpg +3 -0
  28. data/2025/2504_12xxx/2504.12369/images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg +3 -0
  29. data/2025/2504_12xxx/2504.12369/images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg +3 -0
  30. data/2025/2504_12xxx/2504.12369/images/4cd14cfef9f9a857f1658afa482563f9f9aae5ff0fde6e994ca27c3ce2daf2f1.jpg +3 -0
  31. data/2025/2504_12xxx/2504.12369/images/5019318d9bedd2a41eeab53f93ae9a8dc4075660cc387a33e5ac9e4fd4af8336.jpg +3 -0
  32. data/2025/2504_12xxx/2504.12369/images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg +3 -0
  33. data/2025/2504_12xxx/2504.12369/images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg +3 -0
  34. data/2025/2504_12xxx/2504.12369/images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg +3 -0
  35. data/2025/2504_12xxx/2504.12369/images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg +3 -0
  36. data/2025/2504_12xxx/2504.12369/images/5be7f40ca0170cf0931349adf148b2063a4d2a61d782d7c6200d3dc6a412a8d7.jpg +3 -0
  37. data/2025/2504_12xxx/2504.12369/images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg +3 -0
  38. data/2025/2504_12xxx/2504.12369/images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg +3 -0
  39. data/2025/2504_12xxx/2504.12369/images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg +3 -0
  40. data/2025/2504_12xxx/2504.12369/images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg +3 -0
  41. data/2025/2504_12xxx/2504.12369/images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg +3 -0
  42. data/2025/2504_12xxx/2504.12369/images/7e60e4ac83e5a8851f5a56840f7cc4b18e041198f40aa3ce69ed935d26ae78dc.jpg +3 -0
  43. data/2025/2504_12xxx/2504.12369/images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg +3 -0
  44. data/2025/2504_12xxx/2504.12369/images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg +3 -0
  45. data/2025/2504_12xxx/2504.12369/images/8316f3b67686e102155ac6518c8ee82688e923ff0be476e4fe46781c9090d2df.jpg +3 -0
  46. data/2025/2504_12xxx/2504.12369/images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg +3 -0
  47. data/2025/2504_12xxx/2504.12369/images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg +3 -0
  48. data/2025/2504_12xxx/2504.12369/images/9491f18c075d1aadae6c839aeb0789004bda41f81162625333e60688614b9348.jpg +3 -0
  49. data/2025/2504_12xxx/2504.12369/images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg +3 -0
  50. data/2025/2504_12xxx/2504.12369/images/9e63e34502d19e1a9660587fe1b448ca4cbb22eb6399eea90032701c88909b36.jpg +3 -0
.gitattributes CHANGED
@@ -1109,3 +1109,11 @@ data/2025/2504_12xxx/2504.12532/64bfdedb-3c59-46bb-be2f-711a9c591fc4_origin.pdf
1109
  data/2025/2504_12xxx/2504.12597/6bba0264-76cf-48c3-b57c-10e72c522273_origin.pdf filter=lfs diff=lfs merge=lfs -text
1110
  data/2025/2504_12xxx/2504.12609/9901d2ed-1a6e-4cf8-b052-065c8865ea5c_origin.pdf filter=lfs diff=lfs merge=lfs -text
1111
  data/2025/2504_12xxx/2504.12636/9a12e4e5-302c-4454-96ab-c5e493e2dce0_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1109
  data/2025/2504_12xxx/2504.12597/6bba0264-76cf-48c3-b57c-10e72c522273_origin.pdf filter=lfs diff=lfs merge=lfs -text
1110
  data/2025/2504_12xxx/2504.12609/9901d2ed-1a6e-4cf8-b052-065c8865ea5c_origin.pdf filter=lfs diff=lfs merge=lfs -text
1111
  data/2025/2504_12xxx/2504.12636/9a12e4e5-302c-4454-96ab-c5e493e2dce0_origin.pdf filter=lfs diff=lfs merge=lfs -text
1112
+ data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_origin.pdf filter=lfs diff=lfs merge=lfs -text
1113
+ data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
1114
+ data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_origin.pdf filter=lfs diff=lfs merge=lfs -text
1115
+ data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_origin.pdf filter=lfs diff=lfs merge=lfs -text
1116
+ data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_origin.pdf filter=lfs diff=lfs merge=lfs -text
1117
+ data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_origin.pdf filter=lfs diff=lfs merge=lfs -text
1118
+ data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_origin.pdf filter=lfs diff=lfs merge=lfs -text
1119
+ data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_content_list.json ADDED
@@ -0,0 +1,1518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Abstract",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 459,
8
+ 250,
9
+ 537,
10
+ 265
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "We introduce BitNet b1.58 2B4T, the first open-source, native 1-bit Large Language Model (LLM) at the 2-billion parameter scale. Trained on a corpus of 4 trillion tokens, the model has been rigorously evaluated across benchmarks covering language understanding, mathematical reasoning, coding proficiency, and conversational ability. Our results demonstrate that BitNet b1.58 2B4T achieves performance on par with leading open-weight, full-precision LLMs of similar size, while offering significant advantages in computational efficiency, including substantially reduced memory footprint, energy consumption, and decoding latency. To facilitate further research and adoption, the model weights are released via Hugging Face along with open-source inference implementations for both GPU and CPU architectures.",
17
+ "bbox": [
18
+ 228,
19
+ 279,
20
+ 769,
21
+ 434
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "BitNet b1.58 2B4T (1.58-bit): bitnet-b1.58-2B-4T",
28
+ "bbox": [
29
+ 233,
30
+ 441,
31
+ 580,
32
+ 454
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "The packed weight of BitNet b1.58 2B4T, used for inference only",
39
+ "bbox": [
40
+ 254,
41
+ 455,
42
+ 638,
43
+ 468
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "BitNet b1.58 2B4T (bf16): bitnet-b1.58-2B-4T-bf16",
50
+ "bbox": [
51
+ 233,
52
+ 473,
53
+ 599,
54
+ 487
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "The master weight of BitNet b1.58 2B4T, used for training only",
61
+ "bbox": [
62
+ 254,
63
+ 487,
64
+ 630,
65
+ 501
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "BitNet b1.58 2B4T (gguf): bitnet-b1.58-2B-4T-gguf",
72
+ "bbox": [
73
+ 233,
74
+ 505,
75
+ 598,
76
+ 518
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "The GGUF format of BitNet b1.58 2B4T, used for bitnet.cpp",
83
+ "bbox": [
84
+ 254,
85
+ 518,
86
+ 614,
87
+ 534
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "BitNet b1.58 2B4T Code: bitnet.cpp Demo: aka.ms/bitnet-demo",
94
+ "bbox": [
95
+ 233,
96
+ 539,
97
+ 710,
98
+ 554
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "image",
104
+ "img_path": "images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg",
105
+ "image_caption": [
106
+ "Figure 1: BitNet b1.58 2B4T advances the Pareto frontier defined by leading open-weight LLMs under 3B parameters in terms of performance versus memory, demonstrating superior efficiency."
107
+ ],
108
+ "image_footnote": [],
109
+ "bbox": [
110
+ 222,
111
+ 561,
112
+ 769,
113
+ 824
114
+ ],
115
+ "page_idx": 0
116
+ },
117
+ {
118
+ "type": "header",
119
+ "text": "BitNet b1.58 2B4T Technical Report",
120
+ "bbox": [
121
+ 277,
122
+ 122,
123
+ 720,
124
+ 148
125
+ ],
126
+ "page_idx": 0
127
+ },
128
+ {
129
+ "type": "header",
130
+ "text": "Shuming Ma* Hongyu Wang* Shaohan Huang Xingxing Zhang Ying Hu Ting Song Yan Xia Furu Wei https://aka.ms/GeneralAI",
131
+ "bbox": [
132
+ 266,
133
+ 200,
134
+ 733,
135
+ 243
136
+ ],
137
+ "page_idx": 0
138
+ },
139
+ {
140
+ "type": "page_footnote",
141
+ "text": "* Equal contribution. ⋆ Corresponding author. S. Ma, S. Huang, X. Zhang, T. Song, Y. Xia and F. Wei are with Microsoft Research. H. Wang is with University of Chinese Academy of Sciences. Y. Hu is with Tsinghua University.",
142
+ "bbox": [
143
+ 169,
144
+ 872,
145
+ 823,
146
+ 912
147
+ ],
148
+ "page_idx": 0
149
+ },
150
+ {
151
+ "type": "aside_text",
152
+ "text": "arXiv:2504.12285v2 [cs.CL] 25 Apr 2025",
153
+ "bbox": [
154
+ 22,
155
+ 265,
156
+ 60,
157
+ 705
158
+ ],
159
+ "page_idx": 0
160
+ },
161
+ {
162
+ "type": "text",
163
+ "text": "1 Introduction",
164
+ "text_level": 1,
165
+ "bbox": [
166
+ 173,
167
+ 89,
168
+ 312,
169
+ 104
170
+ ],
171
+ "page_idx": 1
172
+ },
173
+ {
174
+ "type": "text",
175
+ "text": "Open-source large language models (LLMs) have become pivotal in democratizing access to advanced AI capabilities, fostering innovation, and enabling research across diverse fields such as natural language processing, code generation, and vision computing (Dubey et al., 2024; Yang et al., 2024; Bai et al., 2025). Their public availability allows for widespread experimentation and adaptation. However, a significant barrier hinders their broader adoption: the substantial computational resources required for deployment and inference. State-of-the-art open LLMs typically require large memory footprints, consume considerable energy, and exhibit notable inference latency, rendering them impractical for many edge devices, resource-constrained environments, and real-time applications.",
176
+ "bbox": [
177
+ 169,
178
+ 119,
179
+ 826,
180
+ 232
181
+ ],
182
+ "page_idx": 1
183
+ },
184
+ {
185
+ "type": "text",
186
+ "text": "1-bit LLMs, representing an extreme yet promising form of model quantization where weights and potentially activations are constrained to binary $\\{-1, +1\\}$ or ternary $\\{-1, 0, +1\\}$ , offer a compelling solution to the efficiency challenges. By drastically reducing the memory required to store weights and enabling highly efficient bitwise computations, they have the potential to significantly lower deployment costs, reduce energy consumption, and accelerate inference speeds. While prior work has explored 1-bit models, existing open efforts often fall into two categories: 1) post-training quantization (PTQ) methods applied to pre-trained full-precision models, which can lead to significant performance degradation (Xu et al., 2024b; Team, 2024), or 2) native 1-bit models (trained from scratch with 1-bit weights) that have been developed at relatively smaller scales (e.g., OLMo-Bitnet-1B²]) and may not yet match the capabilities of larger, full-precision counterparts. This performance gap has limited the practical impact of 1-bit LLMs thus far.",
187
+ "bbox": [
188
+ 169,
189
+ 237,
190
+ 826,
191
+ 391
192
+ ],
193
+ "page_idx": 1
194
+ },
195
+ {
196
+ "type": "text",
197
+ "text": "To bridge this gap between efficiency and performance, we introduce BitNet b1.58 2B4T, the first open-source, native 1-bit LLM trained at scale. This model, comprising 2 billion parameters, was trained from scratch on a substantial dataset of 4 trillion tokens, leveraging architectural and training innovations specific to the 1-bit paradigm. The core contribution of this work is to demonstrate that a native 1-bit LLM, when trained effectively at scale, can achieve performance comparable to leading open-weight, full-precision models of similar size across a wide range of tasks.",
198
+ "bbox": [
199
+ 169,
200
+ 396,
201
+ 823,
202
+ 481
203
+ ],
204
+ "page_idx": 1
205
+ },
206
+ {
207
+ "type": "text",
208
+ "text": "This technical report details the development and evaluation of BitNet b1.58 2B4T. We describe the architecture and training methodology, and then present comprehensive evaluation results on standard benchmarks assessing language understanding, mathematical reasoning, coding proficiency, and multi-turn conversational abilities. Our findings confirm its strong performance relative to established full-precision baselines, coupled with significant advantages in efficiency. Finally, we announce the public release of the BitNet b1.58 2B4T model weights via Hugging Face and provide open-source inference code optimized for both GPU and CPU execution, aiming to facilitate further research and the practical deployment of highly efficient LLMs.",
209
+ "bbox": [
210
+ 169,
211
+ 484,
212
+ 825,
213
+ 599
214
+ ],
215
+ "page_idx": 1
216
+ },
217
+ {
218
+ "type": "text",
219
+ "text": "2 Architecture",
220
+ "text_level": 1,
221
+ "bbox": [
222
+ 171,
223
+ 617,
224
+ 313,
225
+ 633
226
+ ],
227
+ "page_idx": 1
228
+ },
229
+ {
230
+ "type": "text",
231
+ "text": "The architecture of BitNet b1.58 2B4T is derived from the standard Transformer model (Vaswani et al., 2017), incorporating significant modifications based on the BitNet framework (Wang et al., 2023a; Ma et al., 2024). The model is trained entirely from scratch.",
232
+ "bbox": [
233
+ 169,
234
+ 648,
235
+ 826,
236
+ 691
237
+ ],
238
+ "page_idx": 1
239
+ },
240
+ {
241
+ "type": "text",
242
+ "text": "The core architectural innovation lies in replacing the standard full-precision linear layers (torch(nn.Linear) with custom BitLinear layers. This constitutes the foundation of the BitNet approach. Within these BitLinear layers:",
243
+ "bbox": [
244
+ 169,
245
+ 696,
246
+ 823,
247
+ 739
248
+ ],
249
+ "page_idx": 1
250
+ },
251
+ {
252
+ "type": "list",
253
+ "sub_type": "text",
254
+ "list_items": [
255
+ "- Weight Quantization: Model weights are quantized to 1.58 bits during the forward pass. This is achieved using an absolute mean (absmean) quantization scheme, which maps weights to ternary values $\\{-1,0, + 1\\}$ . This drastically reduces the model size and enables efficient mathematical operations.",
256
+ "- Activation Quantization: Activations flowing through the linear projection are quantized to 8-bit integers. This employs an absolute maximum (absmax) quantization strategy, applied per-token.",
257
+ "- Normalization: We incorporate subln normalization (Wang et al., 2022) to further enhance training stability, which can be particularly beneficial in quantized training regimes."
258
+ ],
259
+ "bbox": [
260
+ 215,
261
+ 750,
262
+ 823,
263
+ 885
264
+ ],
265
+ "page_idx": 1
266
+ },
267
+ {
268
+ "type": "page_footnote",
269
+ "text": "<sup>2</sup>https://huggingface.co/NousResearch/OLMo-Bitnet-1B",
270
+ "bbox": [
271
+ 192,
272
+ 896,
273
+ 589,
274
+ 911
275
+ ],
276
+ "page_idx": 1
277
+ },
278
+ {
279
+ "type": "page_number",
280
+ "text": "2",
281
+ "bbox": [
282
+ 493,
283
+ 935,
284
+ 504,
285
+ 946
286
+ ],
287
+ "page_idx": 1
288
+ },
289
+ {
290
+ "type": "text",
291
+ "text": "Beyond the BitLinear layers, several established LLM techniques are integrated to enhance performance and stability:",
292
+ "bbox": [
293
+ 171,
294
+ 90,
295
+ 823,
296
+ 119
297
+ ],
298
+ "page_idx": 2
299
+ },
300
+ {
301
+ "type": "list",
302
+ "sub_type": "text",
303
+ "list_items": [
304
+ "- Activation Function (FFN): Within the feed-forward network (FFN) sub-layers, instead of the commonly used SwiGLU activation (Shazeer, 2020), BitNet b1.58 2B4T employs squared ReLU $(\\mathrm{ReLU}^2)$ . This choice is motivated by its potential to improve model sparsity and computational characteristics within the 1-bit context (Wang et al., 2024b,a).",
305
+ "- **Positional Embeddings:** Rotary Position Embeddings (RoPE) (Su et al., 2024) are used to inject positional information, a standard practice in modern high-performance LLMs.",
306
+ "- Bias Removal: Consistent with architectures like LLaMA, all bias terms are removed from the linear layers and normalization layers throughout the network, reducing parameter count and potentially simplifying quantization."
307
+ ],
308
+ "bbox": [
309
+ 215,
310
+ 131,
311
+ 821,
312
+ 266
313
+ ],
314
+ "page_idx": 2
315
+ },
316
+ {
317
+ "type": "text",
318
+ "text": "For tokenization, we adopt the tokenizer developed for LLaMA 3 (Dubey et al., 2024). This tokenizer implements a byte-level Byte-Pair Encoding (BPE) scheme with a vocabulary size of 128,256 tokens. This choice ensures robust handling of diverse text and code, and its widespread adoption facilitates straightforward integration with existing open-source tooling and ecosystems.",
319
+ "bbox": [
320
+ 169,
321
+ 280,
322
+ 823,
323
+ 335
324
+ ],
325
+ "page_idx": 2
326
+ },
327
+ {
328
+ "type": "text",
329
+ "text": "3 Training",
330
+ "text_level": 1,
331
+ "bbox": [
332
+ 171,
333
+ 357,
334
+ 279,
335
+ 375
336
+ ],
337
+ "page_idx": 2
338
+ },
339
+ {
340
+ "type": "text",
341
+ "text": "The training process for BitNet b1.58 2B4T involved three distinct phases: large-scale pre-training followed by supervised fine-tuning (SFT) and direct preference optimization (DPO). While advanced techniques like Proximal Policy Optimization (PPO) or Group Relative Policy Optimization (GRPO) can further enhance capabilities such as mathematics and chain-of-thought reasoning (Schulman et al., 2017; Shao et al., 2024), the current version of BitNet b1.58 2B4T relies solely on pre-training, SFT, and DPO. The exploration of reinforcement learning methods remains a direction for future work.",
342
+ "bbox": [
343
+ 169,
344
+ 388,
345
+ 823,
346
+ 484
347
+ ],
348
+ "page_idx": 2
349
+ },
350
+ {
351
+ "type": "text",
352
+ "text": "3.1 Pre-training",
353
+ "text_level": 1,
354
+ "bbox": [
355
+ 171,
356
+ 503,
357
+ 300,
358
+ 518
359
+ ],
360
+ "page_idx": 2
361
+ },
362
+ {
363
+ "type": "text",
364
+ "text": "The pre-training phase aimed to imbue the model with broad world knowledge and foundational language capabilities. We adapted general training strategies from established LLM practices (Dubey et al., 2024), with specific adjustments tailored for the 1-bit architecture.",
365
+ "bbox": [
366
+ 169,
367
+ 529,
368
+ 823,
369
+ 571
370
+ ],
371
+ "page_idx": 2
372
+ },
373
+ {
374
+ "type": "text",
375
+ "text": "3.1.1 Learning Rate Schedule",
376
+ "text_level": 1,
377
+ "bbox": [
378
+ 171,
379
+ 587,
380
+ 393,
381
+ 602
382
+ ],
383
+ "page_idx": 2
384
+ },
385
+ {
386
+ "type": "text",
387
+ "text": "A two-stage learning rate schedule was employed.",
388
+ "bbox": [
389
+ 171,
390
+ 612,
391
+ 503,
392
+ 627
393
+ ],
394
+ "page_idx": 2
395
+ },
396
+ {
397
+ "type": "list",
398
+ "sub_type": "text",
399
+ "list_items": [
400
+ "1. **Stage 1 (High Learning Rate):** The initial phase utilized a standard cosine decay schedule but commenced with a relatively high peak learning rate. This decision was informed by the observation that 1-bit models often exhibit greater training stability compared to their full-precision counterparts, allowing for more aggressive initial learning steps.",
401
+ "2. **Stage 2 (Cooldown):** Approximately midway through the planned training token count, the learning rate was abruptly decayed and subsequently maintained via a cosine schedule with a significantly lower peak value. This \"cooldown\" phase allows the model to refine its representations on higher-quality data (see Section 3.1.3)."
402
+ ],
403
+ "bbox": [
404
+ 207,
405
+ 638,
406
+ 821,
407
+ 752
408
+ ],
409
+ "page_idx": 2
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "3.1.2 Weight Decay Schedule",
414
+ "text_level": 1,
415
+ "bbox": [
416
+ 171,
417
+ 771,
418
+ 388,
419
+ 785
420
+ ],
421
+ "page_idx": 2
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "Complementing the learning rate adjustments, a two-stage weight decay strategy was implemented.",
426
+ "bbox": [
427
+ 171,
428
+ 795,
429
+ 821,
430
+ 810
431
+ ],
432
+ "page_idx": 2
433
+ },
434
+ {
435
+ "type": "list",
436
+ "sub_type": "text",
437
+ "list_items": [
438
+ "1. **Stage 1:** During the first training stage, weight decay followed a cosine schedule, reaching a peak value of 0.1. This regularization helps prevent overfitting during the initial high learning-rate phase.",
439
+ "2. **Stage 2:** In the second stage, weight decay was effectively disabled (set to zero). This allows the model parameters to settle into finer-grained optima guided by the lower learning rate and curated data."
440
+ ],
441
+ "bbox": [
442
+ 207,
443
+ 821,
444
+ 821,
445
+ 907
446
+ ],
447
+ "page_idx": 2
448
+ },
449
+ {
450
+ "type": "page_number",
451
+ "text": "3",
452
+ "bbox": [
453
+ 493,
454
+ 935,
455
+ 503,
456
+ 946
457
+ ],
458
+ "page_idx": 2
459
+ },
460
+ {
461
+ "type": "text",
462
+ "text": "3.1.3 Pre-training Data",
463
+ "text_level": 1,
464
+ "bbox": [
465
+ 171,
466
+ 90,
467
+ 349,
468
+ 106
469
+ ],
470
+ "page_idx": 3
471
+ },
472
+ {
473
+ "type": "text",
474
+ "text": "The pre-training corpus comprised a mixture of publicly available text and code datasets, including large web crawls like DCLM (Li et al., 2024b) and educational web pages like FineWeb-EDU (Penedo et al., 2024). To enhance mathematical reasoning abilities, we also incorporated synthetically generated mathematical data. The data presentation strategy aligned with the two-stage training: the bulk of general web data was processed during Stage 1, while higher-quality curated datasets were emphasized during the Stage 2 cooldown phase, coinciding with the reduced learning rate.",
475
+ "bbox": [
476
+ 169,
477
+ 114,
478
+ 823,
479
+ 199
480
+ ],
481
+ "page_idx": 3
482
+ },
483
+ {
484
+ "type": "text",
485
+ "text": "3.2 Supervised Fine-tuning (SFT)",
486
+ "text_level": 1,
487
+ "bbox": [
488
+ 171,
489
+ 214,
490
+ 419,
491
+ 229
492
+ ],
493
+ "page_idx": 3
494
+ },
495
+ {
496
+ "type": "text",
497
+ "text": "Following pre-training, the model underwent supervised fine-tuning (SFT) to enhance its instruction-following capabilities and improve its performance in conversational interaction formats.",
498
+ "bbox": [
499
+ 169,
500
+ 239,
501
+ 826,
502
+ 268
503
+ ],
504
+ "page_idx": 3
505
+ },
506
+ {
507
+ "type": "text",
508
+ "text": "3.2.1 SFT Data",
509
+ "text_level": 1,
510
+ "bbox": [
511
+ 171,
512
+ 282,
513
+ 294,
514
+ 297
515
+ ],
516
+ "page_idx": 3
517
+ },
518
+ {
519
+ "type": "text",
520
+ "text": "The SFT phase utilized a diverse collection of publicly available instruction-following and conversational datasets. These included, but were not limited to, WildChat (Zhao et al., 2024), LMSYS-Chat1M (Zheng et al., 2024), WizardLM Evol-Instruct (Xu et al., 2024a), and SlimOrca (Lian et al., 2023). To further bolster specific capabilities, particularly in reasoning and complex instruction adherence, we supplemented these with synthetic datasets generated using methodologies like GLAN (Li et al., 2024a) and MathScale (Tang et al., 2024).",
521
+ "bbox": [
522
+ 169,
523
+ 306,
524
+ 826,
525
+ 391
526
+ ],
527
+ "page_idx": 3
528
+ },
529
+ {
530
+ "type": "text",
531
+ "text": "3.2.2 Chat Template",
532
+ "text_level": 1,
533
+ "bbox": [
534
+ 171,
535
+ 405,
536
+ 330,
537
+ 420
538
+ ],
539
+ "page_idx": 3
540
+ },
541
+ {
542
+ "type": "text",
543
+ "text": "For conversational tasks during SFT and inference, the following chat template structure was employed:",
544
+ "bbox": [
545
+ 169,
546
+ 429,
547
+ 826,
548
+ 458
549
+ ],
550
+ "page_idx": 3
551
+ },
552
+ {
553
+ "type": "code",
554
+ "sub_type": "code",
555
+ "code_caption": [],
556
+ "code_body": "<|begin_of_text|>System: {system_message}<|eot_id|>\nUser: {user_message_1}<|eot_id|\nAssistant: {assistant_message_1}<|eot_id|\nUser: {user_message_2}<|eot_id|\nAssistant: {assistant_message_2}<|eot_id|...",
557
+ "guess_lang": "txt",
558
+ "bbox": [
559
+ 169,
560
+ 468,
561
+ 612,
562
+ 539
563
+ ],
564
+ "page_idx": 3
565
+ },
566
+ {
567
+ "type": "text",
568
+ "text": "3.2.3 Optimization Details",
569
+ "text_level": 1,
570
+ "bbox": [
571
+ 171,
572
+ 553,
573
+ 370,
574
+ 568
575
+ ],
576
+ "page_idx": 3
577
+ },
578
+ {
579
+ "type": "text",
580
+ "text": "Several optimization choices were key during SFT:",
581
+ "bbox": [
582
+ 171,
583
+ 577,
584
+ 509,
585
+ 592
586
+ ],
587
+ "page_idx": 3
588
+ },
589
+ {
590
+ "type": "list",
591
+ "sub_type": "text",
592
+ "list_items": [
593
+ "- Loss Aggregation: Instead of averaging the cross-entropy loss across tokens within a batch (mean reduction), we employed summation. Empirically, we observed that summing the losses led to improved convergence and better final performance for this model.",
594
+ "- Hyperparameter Tuning: Careful tuning of the learning rate and the number of training epochs was performed. Consistent with our pre-training findings, the 1-bit model benefited from a relatively larger learning rate during SFT compared to typical full-precision model fine-tuning. Furthermore, achieving optimal convergence required extending the fine-tuning duration over a larger number of epochs than full-precision models of similar size."
595
+ ],
596
+ "bbox": [
597
+ 215,
598
+ 603,
599
+ 825,
600
+ 719
601
+ ],
602
+ "page_idx": 3
603
+ },
604
+ {
605
+ "type": "text",
606
+ "text": "3.3 Direct Preference Optimization (DPO)",
607
+ "text_level": 1,
608
+ "bbox": [
609
+ 171,
610
+ 734,
611
+ 480,
612
+ 751
613
+ ],
614
+ "page_idx": 3
615
+ },
616
+ {
617
+ "type": "text",
618
+ "text": "To further align the model's behavior with human preferences regarding helpfulness and safety, we applied Direct Preference Optimization (DPO) (Rafailov et al., 2023) following the SFT phase. DPO offers an efficient alternative to traditional RLHF by directly optimizing the language model using preference data, thereby circumventing the need to train a separate reward model. This DPO stage served to refine the model's conversational prowess and overall alignment with desired interaction patterns in practical use cases.",
619
+ "bbox": [
620
+ 169,
621
+ 761,
622
+ 825,
623
+ 844
624
+ ],
625
+ "page_idx": 3
626
+ },
627
+ {
628
+ "type": "text",
629
+ "text": "3.3.1 Training Data",
630
+ "text_level": 1,
631
+ "bbox": [
632
+ 171,
633
+ 859,
634
+ 323,
635
+ 875
636
+ ],
637
+ "page_idx": 3
638
+ },
639
+ {
640
+ "type": "text",
641
+ "text": "The preference dataset used for DPO training was constructed from a combination of publicly available resources recognized for capturing diverse human judgments on model outputs. Specifically,",
642
+ "bbox": [
643
+ 169,
644
+ 883,
645
+ 826,
646
+ 912
647
+ ],
648
+ "page_idx": 3
649
+ },
650
+ {
651
+ "type": "page_number",
652
+ "text": "4",
653
+ "bbox": [
654
+ 493,
655
+ 935,
656
+ 504,
657
+ 946
658
+ ],
659
+ "page_idx": 3
660
+ },
661
+ {
662
+ "type": "text",
663
+ "text": "we utilized UltraFeedback (Cui et al., 2024) and MagPie (Xu et al., 2024c). The aggregation of these datasets provided a robust and multifaceted preference signal, guiding the model towards generating responses more aligned with human expectations.",
664
+ "bbox": [
665
+ 169,
666
+ 90,
667
+ 823,
668
+ 133
669
+ ],
670
+ "page_idx": 4
671
+ },
672
+ {
673
+ "type": "text",
674
+ "text": "3.3.2 Training Details",
675
+ "text_level": 1,
676
+ "bbox": [
677
+ 171,
678
+ 147,
679
+ 339,
680
+ 162
681
+ ],
682
+ "page_idx": 4
683
+ },
684
+ {
685
+ "type": "text",
686
+ "text": "The DPO training phase was conducted for 2 epochs. We employed a learning rate of $2 \\times 10^{-7}$ and set the DPO beta parameter, which controls the divergence from the reference policy, to 0.1. To enhance training efficiency during this phase, we integrated optimized kernels from the Liger Kernel library (Hsu et al., 2024). Qualitatively, our observations indicate that the DPO process effectively steered the model towards preferred response styles without inducing significant degradation in the core capabilities established during pre-training and SFT.",
687
+ "bbox": [
688
+ 169,
689
+ 170,
690
+ 826,
691
+ 255
692
+ ],
693
+ "page_idx": 4
694
+ },
695
+ {
696
+ "type": "text",
697
+ "text": "4 Evaluation",
698
+ "text_level": 1,
699
+ "bbox": [
700
+ 171,
701
+ 273,
702
+ 297,
703
+ 289
704
+ ],
705
+ "page_idx": 4
706
+ },
707
+ {
708
+ "type": "text",
709
+ "text": "We measure performance on a wide variety of benchmarks classified as follows:",
710
+ "bbox": [
711
+ 169,
712
+ 303,
713
+ 699,
714
+ 319
715
+ ],
716
+ "page_idx": 4
717
+ },
718
+ {
719
+ "type": "list",
720
+ "sub_type": "text",
721
+ "list_items": [
722
+ "- Language understanding and reasoning: ARC-Easy (Yadav et al., 2019), ARC-Challenge (Yadav et al., 2019), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2020), PIQA (Bisk et al., 2019), OpenbookQA (Mihaylov et al., 2018), and CommonsenseQA (Talmor et al., 2019)",
723
+ "- World knowledge: TruthfulQA (Lin et al., 2022) and MMLU (Hendrycks et al., 2021a)",
724
+ "- Reading comprehension: TriviaQA (Joshi et al., 2017) and BoolQ (Clark et al., 2019)",
725
+ "- Math and code: GSM8K (Cobbe et al., 2021), MATH-500 (Hendrycks et al., 2021b) and HumanEval+ (Liu et al., 2023)",
726
+ "- Instruction following and conversation: IFEval (Zhou et al., 2023) and MT-bench (Zheng et al., 2023)"
727
+ ],
728
+ "bbox": [
729
+ 215,
730
+ 329,
731
+ 823,
732
+ 484
733
+ ],
734
+ "page_idx": 4
735
+ },
736
+ {
737
+ "type": "text",
738
+ "text": "We compare BitNet b1.58 2B4T with leading open-weight full precision LLMs of similar size, including LLaMA 3.2 1B (Dubey et al., 2024), Gemma-3 1B (Team et al., 2025), Qwen2.5 1.5B (Yang et al., 2024), SmolLM2 1.7B (Allal et al., 2025) and MiniCPM 2B (Hu et al., 2024). All models are instruction-tuned versions. We re-run all benchmarks with a public evaluation pipeline for a fair comparison. More evaluation details are available at the appendix. The main results are presented in Table 1.",
739
+ "bbox": [
740
+ 169,
741
+ 496,
742
+ 826,
743
+ 580
744
+ ],
745
+ "page_idx": 4
746
+ },
747
+ {
748
+ "type": "text",
749
+ "text": "4.1 Main Results",
750
+ "text_level": 1,
751
+ "bbox": [
752
+ 171,
753
+ 595,
754
+ 305,
755
+ 609
756
+ ],
757
+ "page_idx": 4
758
+ },
759
+ {
760
+ "type": "text",
761
+ "text": "As shown in Table 1, BitNet b1.58 2B4T demonstrates remarkable resource efficiency. Its non-embedding memory footprint and estimated energy consumption (Horowitz, 2014; Zhang et al., 2022) during decoding are substantially lower compared to all the full-precision models evaluated, highlighting a significant advantage in operational cost and deployability on resource-constrained devices.",
762
+ "bbox": [
763
+ 169,
764
+ 619,
765
+ 826,
766
+ 690
767
+ ],
768
+ "page_idx": 4
769
+ },
770
+ {
771
+ "type": "text",
772
+ "text": "In terms of task performance, BitNet b1.58 2B4T proves highly competitive. It achieves the best results among the compared models on several benchmarks spanning reasoning, knowledge, and math capabilities. On other benchmarks, its performance is closely comparable to the top-performing full-precision models. While some full-precision models show slight advantages on specific tasks or the overall average, BitNet b1.58 2B4T delivers strong performance across the board. The results indicate that BitNet b1.58 2B4T achieves capabilities nearly on par with leading models in its size class while offering dramatically improved efficiency.",
773
+ "bbox": [
774
+ 169,
775
+ 696,
776
+ 826,
777
+ 794
778
+ ],
779
+ "page_idx": 4
780
+ },
781
+ {
782
+ "type": "text",
783
+ "text": "4.2 Comparison with Post-training Quantized Models",
784
+ "text_level": 1,
785
+ "bbox": [
786
+ 169,
787
+ 809,
788
+ 562,
789
+ 825
790
+ ],
791
+ "page_idx": 4
792
+ },
793
+ {
794
+ "type": "text",
795
+ "text": "We further investigate the efficiency-performance trade-off by comparing BitNet b1.58 2B4T against post-training quantized (PTQ) versions of a leading competitor, Qwen2.5 1.5B, using standard INT4 methods (GPTQ and AWQ). The results are summarized in Table 2.",
796
+ "bbox": [
797
+ 169,
798
+ 834,
799
+ 825,
800
+ 878
801
+ ],
802
+ "page_idx": 4
803
+ },
804
+ {
805
+ "type": "text",
806
+ "text": "While INT4 quantization successfully reduces the memory footprint of the full-precision model, BitNet b1.58 2B4T achieves an even lower memory requirement due to its native 1-bit architecture.",
807
+ "bbox": [
808
+ 169,
809
+ 882,
810
+ 828,
811
+ 912
812
+ ],
813
+ "page_idx": 4
814
+ },
815
+ {
816
+ "type": "page_number",
817
+ "text": "5",
818
+ "bbox": [
819
+ 493,
820
+ 935,
821
+ 504,
822
+ 946
823
+ ],
824
+ "page_idx": 4
825
+ },
826
+ {
827
+ "type": "table",
828
+ "img_path": "images/571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg",
829
+ "table_caption": [],
830
+ "table_footnote": [],
831
+ "table_body": "<table><tr><td>Benchmark (Metric)</td><td>LLaMA 3.21B</td><td>Gemma-31B</td><td>Qwen2.51.5B</td><td>SmolLM21.7B</td><td>MiniCPM2B</td><td>BitNet b1.582B</td></tr><tr><td>Memory(Non-emb)</td><td>2GB</td><td>1.4GB</td><td>2.6GB</td><td>3.2GB</td><td>4.8GB</td><td>0.4GB</td></tr><tr><td>Latency(CPU; TPOT)</td><td>48ms</td><td>41ms</td><td>65ms</td><td>67ms</td><td>124ms</td><td>29ms</td></tr><tr><td>Energy(Estimated)</td><td>0.258J</td><td>0.186J</td><td>0.347J</td><td>0.425J</td><td>0.649J</td><td>0.028J</td></tr><tr><td>Training Tokens(Pre-training)</td><td>9T(pruning &amp; distillation)</td><td>2T(distillation)</td><td>18T</td><td>11T</td><td>1.1T</td><td>4T</td></tr><tr><td>ARC-Challange(0-shot; Acc,norm)</td><td>37.80</td><td>38.40</td><td>46.67</td><td>43.52</td><td>44.80</td><td>49.91</td></tr><tr><td>ARC-Easy(0-shot; Acc,norm)</td><td>63.17</td><td>63.13</td><td>76.01</td><td>62.92</td><td>72.14</td><td>74.79</td></tr><tr><td>OpenbookQA(0-shot; Acc,norm)</td><td>34.80</td><td>38.80</td><td>40.80</td><td>46.00</td><td>40.20</td><td>41.60</td></tr><tr><td>BoolQ(0-shot; Acc)</td><td>64.65</td><td>74.22</td><td>78.04</td><td>75.78</td><td>80.67</td><td>80.18</td></tr><tr><td>HellaSwag(0-shot; Acc,norm)</td><td>60.80</td><td>57.69</td><td>68.28</td><td>71.71</td><td>70.81</td><td>68.44</td></tr><tr><td>PIQA(0-shot; Acc,norm)</td><td>74.21</td><td>71.93</td><td>76.12</td><td>76.12</td><td>76.66</td><td>77.09</td></tr><tr><td>WinoGrande(0-shot; Acc)</td><td>59.51</td><td>58.48</td><td>62.83</td><td>68.98</td><td>61.80</td><td>71.90</td></tr><tr><td>CommonsenseQA(10-shot; Acc)</td><td>58.48</td><td>42.10</td><td>76.41</td><td>63.55</td><td>71.74</td><td>71.58</td></tr><tr><td>TruthfulQA(10-shot; MC2)</td><td>43.80</td><td>38.66</td><td>46.67</td><td>39.90</td><td>41.41</td><td>45.31</td></tr><tr><td>TriviaQA(5-shot; EM)</td><td>37.60</td><td>23.49</td><td>38.37</td><td>45.97</td><td>34.13</td><td>33.57</td></tr><tr><td>MMLU(5-shot; Acc)</td><td>45.58</td><td>39.91</td><td>60.25</td><td>49.24</td><td>51.82</td><td>53.17</td></tr><tr><td>HumanEval+(0-shot; Pass@1)</td><td>31.10</td><td>37.20</td><td>50.60</td><td>28.00</td><td>43.90</td><td>38.40</td></tr><tr><td>GSM8K(4-shot; EM)</td><td>38.21</td><td>31.16</td><td>56.79</td><td>45.11</td><td>4.40</td><td>58.38</td></tr><tr><td>MATH-500(0-shot; EM)</td><td>23.00</td><td>42.00</td><td>53.00</td><td>17.60</td><td>14.80</td><td>43.40</td></tr><tr><td>IFEval(0-shot; Instruct-Strict)</td><td>62.71</td><td>66.67</td><td>50.12</td><td>57.91</td><td>36.81</td><td>53.48</td></tr><tr><td>MT-bench(0-shot; Average)</td><td>5.43</td><td>6.40</td><td>6.12</td><td>5.50</td><td>6.57</td><td>5.85</td></tr><tr><td>Average</td><td>44.90</td><td>43.74</td><td>55.23</td><td>48.70</td><td>42.05</td><td>54.19</td></tr></table>",
832
+ "bbox": [
833
+ 181,
834
+ 87,
835
+ 812,
836
+ 672
837
+ ],
838
+ "page_idx": 5
839
+ },
840
+ {
841
+ "type": "text",
842
+ "text": "Table 1: Comparison of BitNet b1.58 2B4T with leading open-weight full-precision LLMs of similar size (1B-2B parameters) on efficiency metrics and performance across a wide range of benchmarks. All models compared are instruction-tuned versions.",
843
+ "bbox": [
844
+ 169,
845
+ 676,
846
+ 823,
847
+ 719
848
+ ],
849
+ "page_idx": 5
850
+ },
851
+ {
852
+ "type": "text",
853
+ "text": "More importantly, this superior memory efficiency does not compromise performance relative to the quantized models. Standard PTQ techniques lead to a noticeable degradation in performance compared to the original full-precision model. In contrast, BitNet b1.58 2B4T maintains stronger overall performance than the INT4 quantized versions of Qwen2.5-1.5B on the evaluated benchmarks. This comparison suggests that BitNet b1.58 2B4T represents a more favorable point on the efficiency-performance curve than applying conventional INT4 PTQ to existing architectures, offering better performance with lower resource usage.",
854
+ "bbox": [
855
+ 169,
856
+ 813,
857
+ 823,
858
+ 912
859
+ ],
860
+ "page_idx": 5
861
+ },
862
+ {
863
+ "type": "page_number",
864
+ "text": "6",
865
+ "bbox": [
866
+ 493,
867
+ 936,
868
+ 503,
869
+ 946
870
+ ],
871
+ "page_idx": 5
872
+ },
873
+ {
874
+ "type": "table",
875
+ "img_path": "images/ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg",
876
+ "table_caption": [],
877
+ "table_footnote": [],
878
+ "table_body": "<table><tr><td rowspan=\"2\">Benchmark (Metric)</td><td colspan=\"3\">Qwen2.5</td><td>BitNet b1.58</td></tr><tr><td>1.5B-bf16</td><td>1.5B-GPTQ-int4</td><td>1.5B-AWQ-int4</td><td>2B</td></tr><tr><td>Memory \n(Non-emb)</td><td>2.6GB</td><td>0.7GB</td><td>0.7GB</td><td>0.4GB</td></tr><tr><td>Activation</td><td>bf16</td><td>bf16</td><td>bf16</td><td>int8</td></tr><tr><td>MMLU \n(5-shot; Acc)</td><td>60.25</td><td>58.06</td><td>57.43</td><td>53.17</td></tr><tr><td>GSM8K \n(4-shot; EM)</td><td>56.79</td><td>50.57</td><td>50.64</td><td>58.38</td></tr><tr><td>IFEval \n(0-shot; Instruct-Strict)</td><td>50.12</td><td>47.84</td><td>45.44</td><td>53.48</td></tr><tr><td>Average</td><td>55.72</td><td>52.15</td><td>51.17</td><td>55.01</td></tr></table>",
879
+ "bbox": [
880
+ 204,
881
+ 88,
882
+ 790,
883
+ 282
884
+ ],
885
+ "page_idx": 6
886
+ },
887
+ {
888
+ "type": "table",
889
+ "img_path": "images/7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg",
890
+ "table_caption": [
891
+ "Table 2: Comparison of BitNet b1.58 (2B) against Qwen2.5 1.5B in its original bf16 precision and after INT4 post-training quantization (GPTQ and AWQ). All models shown are based on instruction-tuned checkpoints."
892
+ ],
893
+ "table_footnote": [],
894
+ "table_body": "<table><tr><td>Benchmark (Metric)</td><td>Bonsai 0.5B</td><td>OLMo-Bitnet 1B</td><td>Falcon3-1.58bit 7B</td><td>Llama3-8B-1.58 8B</td><td>BitNet b1.58 2B</td></tr><tr><td>Native 1-bit</td><td>✓</td><td>✓</td><td>✘</td><td>✘</td><td>✓</td></tr><tr><td>ARC-Challange (0-shot; Acc,norm)</td><td>33.19</td><td>26.54</td><td>37.80</td><td>43.69</td><td>49.91</td></tr><tr><td>ARC-Easy (0-shot; Acc,norm)</td><td>58.25</td><td>25.38</td><td>65.03</td><td>70.71</td><td>74.79</td></tr><tr><td>OpenbookQA (0-shot; Acc,norm)</td><td>33.60</td><td>28.20</td><td>38.20</td><td>37.20</td><td>41.60</td></tr><tr><td>BoolQ (0-shot; Acc)</td><td>58.44</td><td>52.48</td><td>72.14</td><td>68.38</td><td>80.18</td></tr><tr><td>HellaSwag (0-shot; Acc,norm)</td><td>48.01</td><td>25.88</td><td>59.46</td><td>68.56</td><td>68.44</td></tr><tr><td>PIQA (0-shot; Acc,norm)</td><td>70.02</td><td>50.49</td><td>72.36</td><td>75.30</td><td>77.09</td></tr><tr><td>WinoGrande (0-shot; Acc)</td><td>54.46</td><td>51.54</td><td>60.14</td><td>60.93</td><td>71.90</td></tr><tr><td>CommonsenseQA (10-shot; Acc)</td><td>18.43</td><td>19.49</td><td>67.08</td><td>28.50</td><td>71.58</td></tr><tr><td>TruthfulQA (10-shot; MC2)</td><td>40.65</td><td>49.05</td><td>43.29</td><td>39.13</td><td>45.31</td></tr><tr><td>TriviaQA (5-shot; EM)</td><td>10.84</td><td>0.00</td><td>0.00</td><td>19.82</td><td>33.57</td></tr><tr><td>MMLU (5-shot; Acc)</td><td>25.74</td><td>25.47</td><td>42.79</td><td>35.04</td><td>53.17</td></tr><tr><td>Average</td><td>41.06</td><td>32.22</td><td>50.76</td><td>49.75</td><td>60.68</td></tr></table>",
895
+ "bbox": [
896
+ 192,
897
+ 354,
898
+ 802,
899
+ 712
900
+ ],
901
+ "page_idx": 6
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "Table 3: Performance comparison of BitNet b1.58 2B4T against other open-weight 1-bit models. This includes natively trained 1-bit models (Bonsai-0.5B, OLMo-Bitnet-1B) and larger models posttraining quantized to 1.58-bit (Falcon3-1.58bit-7B, Llama3-8B-1.58).",
906
+ "bbox": [
907
+ 169,
908
+ 715,
909
+ 828,
910
+ 760
911
+ ],
912
+ "page_idx": 6
913
+ },
914
+ {
915
+ "type": "text",
916
+ "text": "4.3 Comparison with Open-weight 1-bit Models",
917
+ "text_level": 1,
918
+ "bbox": [
919
+ 169,
920
+ 794,
921
+ 521,
922
+ 810
923
+ ],
924
+ "page_idx": 6
925
+ },
926
+ {
927
+ "type": "text",
928
+ "text": "Finally, we situate BitNet b1.58 2B4T within the landscape of other models designed for or quantized to near 1-bit precision. We compare it against natively trained 1-bit models of smaller scale and significantly larger models post-training quantized to 1.58-bit precision. The comparative results are presented in Table 3.",
929
+ "bbox": [
930
+ 169,
931
+ 820,
932
+ 823,
933
+ 878
934
+ ],
935
+ "page_idx": 6
936
+ },
937
+ {
938
+ "type": "text",
939
+ "text": "The evaluation clearly positions BitNet b1.58 2B4T as the leading model in this category. It demonstrates significantly stronger overall performance than all other compared 1-bit models, achieving",
940
+ "bbox": [
941
+ 169,
942
+ 883,
943
+ 828,
944
+ 912
945
+ ],
946
+ "page_idx": 6
947
+ },
948
+ {
949
+ "type": "page_number",
950
+ "text": "7",
951
+ "bbox": [
952
+ 493,
953
+ 935,
954
+ 504,
955
+ 946
956
+ ],
957
+ "page_idx": 6
958
+ },
959
+ {
960
+ "type": "text",
961
+ "text": "the highest scores on the vast majority of benchmarks. Notably, BitNet b1.58 2B4T substantially outperforms not only the smaller, natively trained 1-bit models but also the much larger models (in terms of parameter count) that were quantized to 1-bit. This highlights the effectiveness of the native training approach employed by BitNet b1.58 2B4T, allowing it to set a new state-of-the-art performance level for models operating at this extreme level of quantization, even surpassing larger models subjected to post-training quantization.",
962
+ "bbox": [
963
+ 169,
964
+ 90,
965
+ 823,
966
+ 175
967
+ ],
968
+ "page_idx": 7
969
+ },
970
+ {
971
+ "type": "text",
972
+ "text": "5 Inference Implementation",
973
+ "text_level": 1,
974
+ "bbox": [
975
+ 171,
976
+ 205,
977
+ 421,
978
+ 222
979
+ ],
980
+ "page_idx": 7
981
+ },
982
+ {
983
+ "type": "text",
984
+ "text": "Efficient inference is crucial for deploying Large Language Models, particularly for resource-constrained environments. The unique quantization scheme of BitNet b1.58 2B4T, employing 1.58-bit weights and 8-bit activations (W1.58A8), necessitates specialized implementations, as standard deep learning libraries often lack optimized kernels for such mixed-precision, low-bit formats. To address this, we developed and open-sourced dedicated inference libraries for both GPU and CPU platforms. The code is publicly available at https://aka.ms/bitnet.",
985
+ "bbox": [
986
+ 169,
987
+ 243,
988
+ 826,
989
+ 327
990
+ ],
991
+ "page_idx": 7
992
+ },
993
+ {
994
+ "type": "text",
995
+ "text": "5.1 GPU Inference",
996
+ "text_level": 1,
997
+ "bbox": [
998
+ 171,
999
+ 354,
1000
+ 316,
1001
+ 368
1002
+ ],
1003
+ "page_idx": 7
1004
+ },
1005
+ {
1006
+ "type": "text",
1007
+ "text": "Current GPU architectures and their associated software libraries (e.g., cuBLAS, PyTorch kernels) are primarily optimized for operations involving standard data types like FP16, BF16, and INT8/INT4. Native, high-performance support for the specific W1.58A8 matrix multiplication required by BitNet b1.58 2B4T is generally unavailable. This limitation can hinder the realization of the theoretical efficiency gains offered by 1-bit models on existing hardware.",
1008
+ "bbox": [
1009
+ 169,
1010
+ 383,
1011
+ 823,
1012
+ 454
1013
+ ],
1014
+ "page_idx": 7
1015
+ },
1016
+ {
1017
+ "type": "text",
1018
+ "text": "To enable efficient GPU inference, we developed a custom CUDA kernel specifically designed for the W1.58A8 matrix multiplication. Since ternary weights $\\{-1,0, + 1\\}$ , representing 1.58 bits) cannot be stored efficiently using standard data types, we pack multiple weight values into a single 8-bit integer ('int8') for storage in High Bandwidth Memory (HBM). Specifically, four ternary values are encoded into one 'int8' value. During computation, the CUDA kernel loads the packed 'int8' weights from HBM into the GPU's faster on-chip Shared Memory (SRAM). It then unpacks these values back into a representation suitable for efficient ternary computation (e.g., reconstructing the -1, 0, +1 values) immediately before performing the matrix multiplication with the 8-bit activations. This 'pack-store-load-unpack-compute' strategy minimizes memory bandwidth usage while leveraging custom compute instructions. Further implementation details and optimization strategies are elaborated in the Ladder framework (Wang et al., 2023b).",
1019
+ "bbox": [
1020
+ 169,
1021
+ 459,
1022
+ 825,
1023
+ 612
1024
+ ],
1025
+ "page_idx": 7
1026
+ },
1027
+ {
1028
+ "type": "text",
1029
+ "text": "While our custom kernel significantly improves performance compared to naive implementations, we note that current commodity GPU architectures are not optimally designed for the 1-bit models. We believe that future hardware innovations, potentially incorporating dedicated logic for low-bit operations, will be essential to fully unlock the performance and energy efficiency potential of models like BitNet b1.58.",
1030
+ "bbox": [
1031
+ 169,
1032
+ 618,
1033
+ 823,
1034
+ 686
1035
+ ],
1036
+ "page_idx": 7
1037
+ },
1038
+ {
1039
+ "type": "text",
1040
+ "text": "5.2 CPU Inference",
1041
+ "text_level": 1,
1042
+ "bbox": [
1043
+ 171,
1044
+ 715,
1045
+ 316,
1046
+ 729
1047
+ ],
1048
+ "page_idx": 7
1049
+ },
1050
+ {
1051
+ "type": "text",
1052
+ "text": "To ensure broad accessibility and enable deployment on devices lacking powerful GPUs (e.g., edge devices, laptops, standard servers), we developed bitnet.cpp. This C++ library serves as an official reference implementation for CPU inference of 1-bit LLMs, including BitNet b1.58.",
1053
+ "bbox": [
1054
+ 169,
1055
+ 744,
1056
+ 823,
1057
+ 787
1058
+ ],
1059
+ "page_idx": 7
1060
+ },
1061
+ {
1062
+ "type": "text",
1063
+ "text": "bitnet.cpp provides optimized kernels tailored for efficient execution on standard CPU architectures. The kernels are designed to operate efficiently with the model's specific quantization scheme, avoiding the overhead of generic quantization libraries or intricate low-level bit manipulation where possible. It processes the weight elements in a manner consistent with the BitNet b1.58 training methodology, ensuring numerical accuracy (lossless inference relative to the training procedure).",
1064
+ "bbox": [
1065
+ 169,
1066
+ 792,
1067
+ 823,
1068
+ 863
1069
+ ],
1070
+ "page_idx": 7
1071
+ },
1072
+ {
1073
+ "type": "text",
1074
+ "text": "This approach delivers fast and accurate inference of 1.58-bit models directly on CPUs. More technical details and usage instructions can be found in the bitnet.cpp repository and associated technical report (Wang et al., 2025).",
1075
+ "bbox": [
1076
+ 169,
1077
+ 869,
1078
+ 823,
1079
+ 912
1080
+ ],
1081
+ "page_idx": 7
1082
+ },
1083
+ {
1084
+ "type": "page_number",
1085
+ "text": "8",
1086
+ "bbox": [
1087
+ 493,
1088
+ 935,
1089
+ 503,
1090
+ 946
1091
+ ],
1092
+ "page_idx": 7
1093
+ },
1094
+ {
1095
+ "type": "text",
1096
+ "text": "6 Conclusion",
1097
+ "text_level": 1,
1098
+ "bbox": [
1099
+ 171,
1100
+ 90,
1101
+ 302,
1102
+ 107
1103
+ ],
1104
+ "page_idx": 8
1105
+ },
1106
+ {
1107
+ "type": "text",
1108
+ "text": "This technical report introduced BitNet b1.58 2B4T, a significant step towards highly efficient yet capable Large Language Models. As the first open-source, native 1-bit LLM trained at the 2-billion parameter scale on 4 trillion tokens, our work demonstrates the viability of extreme quantization directly within the training process.",
1109
+ "bbox": [
1110
+ 169,
1111
+ 122,
1112
+ 823,
1113
+ 178
1114
+ ],
1115
+ "page_idx": 8
1116
+ },
1117
+ {
1118
+ "type": "text",
1119
+ "text": "Comprehensive evaluations across benchmarks assessing language understanding, reasoning, mathematics, coding, and dialogue revealed that BitNet b1.58 2B4T achieves performance comparable to state-of-the-art open-weight, full-precision models of similar size. Crucially, this performance parity is achieved with dramatically reduced computational requirements, offering substantial savings in memory footprint, energy consumption, and inference latency. To facilitate practical use and further research, we developed and released optimized inference implementations for both GPU (via custom CUDA kernels) and CPU (via the 'bitnet.cpp' library), alongside the model weights available on Hugging Face.",
1120
+ "bbox": [
1121
+ 169,
1122
+ 184,
1123
+ 826,
1124
+ 297
1125
+ ],
1126
+ "page_idx": 8
1127
+ },
1128
+ {
1129
+ "type": "text",
1130
+ "text": "BitNet b1.58 2B4T represents a compelling proof-of-concept that challenges the necessity of full-precision weights for achieving high performance in LLMs at scale. It opens avenues for deploying powerful language models in resource-constrained environments where previous models were prohibitive, potentially democratizing access to advanced AI capabilities.",
1131
+ "bbox": [
1132
+ 169,
1133
+ 301,
1134
+ 826,
1135
+ 358
1136
+ ],
1137
+ "page_idx": 8
1138
+ },
1139
+ {
1140
+ "type": "text",
1141
+ "text": "7 Future Directions",
1142
+ "text_level": 1,
1143
+ "bbox": [
1144
+ 171,
1145
+ 378,
1146
+ 354,
1147
+ 396
1148
+ ],
1149
+ "page_idx": 8
1150
+ },
1151
+ {
1152
+ "type": "text",
1153
+ "text": "While BitNet b1.58 2B4T demonstrates promising results, several exciting research directions remain:",
1154
+ "bbox": [
1155
+ 169,
1156
+ 412,
1157
+ 826,
1158
+ 429
1159
+ ],
1160
+ "page_idx": 8
1161
+ },
1162
+ {
1163
+ "type": "list",
1164
+ "sub_type": "text",
1165
+ "list_items": [
1166
+ "- Scaling Laws and Larger Models: Investigating the scaling properties of native 1-bit LLMs is crucial. Future work will explore training larger models (e.g., 7B, 13B parameters and beyond) and training on even larger datasets to understand if the performance parity with full-precision models holds.",
1167
+ "- Hardware Co-Design and Optimization: The full potential of 1-bit models is likely hindered by current hardware limitations. Continued development of highly optimized kernels for existing hardware (GPUs, CPUs, NPUs) is needed. Furthermore, co-designing future hardware accelerators specifically optimized for 1-bit computations and data movement could unlock orders-of-magnitude improvements in speed and energy efficiency.",
1168
+ "- Extended Sequence Length: Extending the maximum sequence length of BitNet b1.58 2B4T can process is crucial. This enhancement is vital for tasks demanding long-context understanding, such as summarizing lengthy documents or engaging in complex problem-solving, and is particularly critical for improving performance on long chain-of-thought reasoning tasks. Investigating efficient attention mechanisms suitable for low-bit models at longer sequence lengths will be key.",
1169
+ "- Multilingual Capabilities: The current model is primarily trained on English-centric data. Extending the pre-training corpus and potentially adapting the architecture to effectively support multiple languages is a key direction for broader applicability.",
1170
+ "- Multimodal Integration: Exploring the integration of 1-bit principles into multimodal architectures is another promising frontier. Developing efficient ways to process and fuse information from different modalities (e.g., text and images) within a low-bit framework could enable new applications.",
1171
+ "- Theoretical Understanding: Delving deeper into the theoretical underpinnings of why 1-bit training at scale is effective remains an open area. Analyzing the learning dynamics, loss landscapes, and representational properties of these models could yield valuable insights for future development."
1172
+ ],
1173
+ "bbox": [
1174
+ 215,
1175
+ 441,
1176
+ 826,
1177
+ 840
1178
+ ],
1179
+ "page_idx": 8
1180
+ },
1181
+ {
1182
+ "type": "text",
1183
+ "text": "By pursuing these directions, we aim to further advance the capability and efficiency of 1-bit LLMs, paving the way for more sustainable and accessible artificial intelligence. The open-source release of BitNet b1.58 2B4T and its associated tools provides a foundation for the community to build upon these efforts.",
1184
+ "bbox": [
1185
+ 169,
1186
+ 854,
1187
+ 826,
1188
+ 912
1189
+ ],
1190
+ "page_idx": 8
1191
+ },
1192
+ {
1193
+ "type": "page_number",
1194
+ "text": "9",
1195
+ "bbox": [
1196
+ 493,
1197
+ 935,
1198
+ 504,
1199
+ 946
1200
+ ],
1201
+ "page_idx": 8
1202
+ },
1203
+ {
1204
+ "type": "text",
1205
+ "text": "References",
1206
+ "text_level": 1,
1207
+ "bbox": [
1208
+ 173,
1209
+ 89,
1210
+ 269,
1211
+ 106
1212
+ ],
1213
+ "page_idx": 9
1214
+ },
1215
+ {
1216
+ "type": "list",
1217
+ "sub_type": "ref_text",
1218
+ "list_items": [
1219
+ "Allal, L. B., Lozhkov, A., Bakouch, E., Blázquez, G. M., Penedo, G., Tunstall, L., Marafioti, A., Kydlíček, H., Lajarín, A. P., Srivastav, V., Lochner, J., Fahlgren, C., Nguyen, X.-S., Fourrier, C., Burtenshaw, B., Larcher, H., Zhao, H., Zakka, C., Morlon, M., Raffel, C., von Werra, L., and Wolf, T. (2025). Smollm2: When smol goes big - data-centric training of a small language model. CoRR, abs/2502.02737.",
1220
+ "Bai, S., Chen, K., Liu, X., Wang, J., Ge, W., Song, S., Dang, K., Wang, P., Wang, S., Tang, J., Zhong, H., Zhu, Y., Yang, M.-H., Li, Z., Wan, J., Wang, P., Ding, W., Fu, Z., Xu, Y., Ye, J., Zhang, X., Xie, T., Cheng, Z., Zhang, H., Yang, Z., Xu, H., and Lin, J. (2025). Qwen2.5-vl technical report. CoRR, abs/2502.13923.",
1221
+ "Bisk, Y., Zellers, R., Bras, R. L., Gao, J., and Choi, Y. (2019). PIQA: reasoning about physical commonsense in natural language. CoRR, abs/1911.11641.",
1222
+ "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. (2019). Boolq: Exploring the surprising difficulty of natural yes/no questions. CoRR, abs/1905.10044.",
1223
+ "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. (2021). Training verifiers to solve math word problems. CoRR, abs/2110.14168.",
1224
+ "Cui, G., Yuan, L., Ding, N., Yao, G., He, B., Zhu, W., Ni, Y., Xie, G., Xie, R., Lin, Y., Liu, Z., and Sun, M. (2024). ULTRAFEEDBACK: boosting language models with scaled AI feedback. In ICML. OpenReview.net.",
1225
+ "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Rozière, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I. M., Misra, I., Evtimov, I., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., and et al. (2024). The llama 3 herd of models. CoRR, abs/2407.21783.",
1226
+ "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. (2021a). Measuring massive multitask language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021.",
1227
+ "Hendrycks, D., Burns, C., Kadavath, S., Arora, A., Basart, S., Tang, E., Song, D., and Steinhardt, J. (2021b). Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual.",
1228
+ "Horowitz, M. (2014). 1.1 computing's energy problem (and what we can do about it). In 2014 IEEE International Conference on Solid-State Circuits Conference, ISSCC 2014, Digest of Technical Papers, San Francisco, CA, USA, February 9-13, 2014, pages 10-14.",
1229
+ "Hsu, P.-L., Dai, Y., Kothapalli, V., Song, Q., Tang, S., Zhu, S., Shimizu, S., Sahni, S., Ning, H., and Chen, Y. (2024). Liger kernel: Efficient triton kernels for LLM training. CoRR, abs/2410.10989.",
1230
+ "Hu, S., Tu, Y., Han, X., He, C., Cui, G., Long, X., Zheng, Z., Fang, Y., Huang, Y., Zhao, W., Zhang, X., Thai, Z. L., Zhang, K., Wang, C., Yao, Y., Zhao, C., Zhou, J., Cai, J., Zhai, Z., Ding, N., Jia, C., Zeng, G., Li, D., Liu, Z., and Sun, M. (2024). Minicpm: Unveiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395."
1231
+ ],
1232
+ "bbox": [
1233
+ 173,
1234
+ 114,
1235
+ 828,
1236
+ 911
1237
+ ],
1238
+ "page_idx": 9
1239
+ },
1240
+ {
1241
+ "type": "page_number",
1242
+ "text": "10",
1243
+ "bbox": [
1244
+ 490,
1245
+ 935,
1246
+ 509,
1247
+ 946
1248
+ ],
1249
+ "page_idx": 9
1250
+ },
1251
+ {
1252
+ "type": "list",
1253
+ "sub_type": "ref_text",
1254
+ "list_items": [
1255
+ "Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. (2017). Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1601-1611.",
1256
+ "Li, H., Dong, Q., Tang, Z., Wang, C., Zhang, X., Huang, H., Huang, S., Huang, X., Huang, Z., Zhang, D., Gu, Y., Cheng, X., Wang, X., Chen, S.-Q., Dong, L., Lu, W., Sui, Z., Wang, B., Lam, W., and Wei, F. (2024a). Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064.",
1257
+ "Li, J., Fang, A., Smyrnis, G., Ivgi, M., Jordan, M., Gadre, S. Y., Bansal, H., Guha, E., Keh, S. S., Arora, K., Garg, S., Xin, R., Muennighoff, N., Heckel, R., Mercat, J., Chen, M. F., Gururangan, S., Wortsman, M., Albalak, A., Bitton, Y., Nezhurina, M., Abbas, A., Hsieh, C.-Y., Ghosh, D., Gardner, J., Kilian, M., Zhang, H., Shao, R., Pratt, S. M., Sanyal, S., Ilharco, G., Daras, G., Marathe, K., Gokaslan, A., Zhang, J., Chandu, K. R., Nguyen, T., Vasiljevic, I., Kakade, S. M., Song, S., Sanghavi, S., Faghri, F., Oh, S., Zettlemoyer, L., Lo, K., El-Nouby, A., Pouransari, H., Toshev, A., Wang, S., Groeneveld, D., Soldaini, L., Koh, P. W., Jitsev, J., Kollar, T., Dimakis, A., Carmon, Y., Dave, A., Schmidt, L., and Shankar, V. (2024b). Datacomp-lm: In search of the next generation of training sets for language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.",
1258
+ "Lian, W., Wang, G., Goodson, B., Pentland, E., Cook, A., Vong, C., and \"Teknium\" (2023). Slimorca: An open dataset of gpt-4 augmented flan reasoning traces, with verification.",
1259
+ "Lin, S., Hilton, J., and Evans, O. (2022). Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252.",
1260
+ "Liu, J., Xia, C. S., Wang, Y., and Zhang, L. (2023). Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. Advances in Neural Information Processing Systems, 36:21558-21572.",
1261
+ "Ma, S., Wang, H., Ma, L., Wang, L., Wang, W., Huang, S., Dong, L., Wang, R., Xue, J., and Wei, F. (2024). The era of 1-bit llms: All large language models are in 1.58 bits. CoRR, abs/2402.17764.",
1262
+ "Mihaylov, T., Clark, P., Khot, T., and Sabharwal, A. (2018). Can a suit of armor conduct electricity? A new dataset for open book question answering. CoRR, abs/1809.02789.",
1263
+ "Penedo, G., Kydlícek, H., Allal, L. B., Lozhkov, A., Mitchell, M., Raffel, C. A., von Werra, L., and Wolf, T. (2024). The fineweb datasets: Decanting the web for the finest text data at scale. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.",
1264
+ "Rafailov, R., Sharma, A., Mitchell, E., Manning, C. D., Ermon, S., and Finn, C. (2023). Direct preference optimization: Your language model is secretly a reward model. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S., editors, Advances in Neural Information Processing Systems 36.",
1265
+ "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. (2020). WinoGrande: an adversarial winograd schema challenge at scale. In AAAI, pages 8732-8740.",
1266
+ "Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. (2017). Proximal policy optimization algorithms. CoRR, abs/1707.06347.",
1267
+ "Shao, Z., Wang, P., Zhu, Q., Xu, R., Song, J., Zhang, M., Li, Y. K., Wu, Y., and Guo, D. (2024). Deepseekmath: Pushing the limits of mathematical reasoning in open language models. CoRR, abs/2402.03300.",
1268
+ "Shazeer, N. (2020). GLU variants improve transformer. CoRR, abs/2002.05202."
1269
+ ],
1270
+ "bbox": [
1271
+ 171,
1272
+ 90,
1273
+ 826,
1274
+ 912
1275
+ ],
1276
+ "page_idx": 10
1277
+ },
1278
+ {
1279
+ "type": "page_number",
1280
+ "text": "11",
1281
+ "bbox": [
1282
+ 490,
1283
+ 935,
1284
+ 506,
1285
+ 946
1286
+ ],
1287
+ "page_idx": 10
1288
+ },
1289
+ {
1290
+ "type": "list",
1291
+ "sub_type": "ref_text",
1292
+ "list_items": [
1293
+ "Su, J., Ahmed, M. H. M., Lu, Y., Pan, S., Bo, W., and Liu, Y. (2024). Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063.",
1294
+ "Talmor, A., Herzig, J., Lourie, N., and Berant, J. (2019). Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4149-4158.",
1295
+ "Tang, Z., Zhang, X., Wang, B., and Wei, F. (2024). Mathscale: Scaling instruction tuning for mathematical reasoning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net.",
1296
+ "Team, F.-L. (2024). The falcon 3 family of open models.",
1297
+ "Team, G., Kamath, A., Ferret, J., Pathak, S., Vieillard, N., Merhej, R., Perrin, S., Matejovicova, T., Ram'e, A., Rivi'ere, M., et al. (2025). Gemma 3 technical report. arXiv preprint arXiv:2503.19786.",
1298
+ "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. In Guyon, I., von Luxburg, U., Bengio, S., Wallach, H. M., Fergus, R., Vishwanathan, S. V. N., and Garnett, R., editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008.",
1299
+ "Wang, H., Ma, S., Dong, L., Huang, S., Wang, H., Ma, L., Yang, F., Wang, R., Wu, Y., and Wei, F. (2023a). Bitnet: Scaling 1-bit transformers for large language models. CoRR, abs/2310.11453.",
1300
+ "Wang, H., Ma, S., Huang, S., Dong, L., Wang, W., Peng, Z., Wu, Y., Bajaj, P., Singhal, S., Benhaim, A., Patra, B., Liu, Z., Chaudhary, V., Song, X., and Wei, F. (2022). Foundation transformers. CoRR.",
1301
+ "Wang, H., Ma, S., Wang, R., and Wei, F. (2024a). Q-sparse: All large language models can be fully sparsely-activated. CoRR, abs/2407.10969.",
1302
+ "Wang, H., Ma, S., and Wei, F. (2024b). Bitnet a4.8: 4-bit activations for 1-bit llms. CoRR, abs/2411.04965.",
1303
+ "Wang, J., Zhou, H., Song, T., Cao, S., Xia, Y., Cao, T., Wei, J., Ma, S., Wang, H., and Wei, F. (2025). Bitnet.cpp: Efficient edge inference for ternary lms. CoRR, abs/2502.11880.",
1304
+ "Wang, L., Ma, L., Cao, S., Zheng, N., Zhang, Q., Xue, J., Miao, Z., Cao, T., and Yang, Y. (2023b). Ladder: Efficient tensor compilation on customized data format. In OSDI.",
1305
+ "Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., Lin, Q., and Jiang, D. (2024a). Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.",
1306
+ "Xu, Y., Han, X., Yang, Z., Wang, S., Zhu, Q., Liu, Z., Liu, W., and Che, W. (2024b). Onebit: Towards extremely low-bit large language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.",
1307
+ "Xu, Z., Jiang, F., Niu, L., Deng, Y., Poovendran, R., Choi, Y., and Lin, B. Y. (2024c). Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. CoRR, abs/2406.08464.",
1308
+ "Yadav, V., Bethard, S., and Surdeanu, M. (2019). Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering. In EMNLP-IJCNLP.",
1309
+ "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., Lin, H., Yang, J., Tu, J., Zhang, J., Yang, J., Yang, J., Zhou, J., Lin, J., Dang, K., Lu, K., Bao, K., Yang, K., Yu, L., Li, M., Xue, M., Zhang, P., Zhu, Q., Men, R., Lin, R., Li, T., Xia, T., Ren, X., Ren, X., Fan, Y., Su, Y., Zhang, Y., Wan, Y., Liu, Y., Cui, Z., Zhang, Z., and Qiu, Z. (2024). Qwen2.5 technical report. CoRR, abs/2412.15115."
1310
+ ],
1311
+ "bbox": [
1312
+ 173,
1313
+ 90,
1314
+ 828,
1315
+ 911
1316
+ ],
1317
+ "page_idx": 11
1318
+ },
1319
+ {
1320
+ "type": "page_number",
1321
+ "text": "12",
1322
+ "bbox": [
1323
+ 490,
1324
+ 935,
1325
+ 508,
1326
+ 946
1327
+ ],
1328
+ "page_idx": 11
1329
+ },
1330
+ {
1331
+ "type": "list",
1332
+ "sub_type": "ref_text",
1333
+ "list_items": [
1334
+ "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. (2019). HellaSwag: can a machine really finish your sentence? In Proceedings of the 57th Conference of the Association for Computational Linguistics, pages 4791-4800.",
1335
+ "Zhang, Y., Zhang, Z., and Lew, L. (2022). PokeBNN: A binary pursuit of lightweight accuracy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12465-12475. IEEE.",
1336
+ "Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. (2024). Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.",
1337
+ "Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E. P., Gonzalez, J. E., Stoica, I., and Zhang, H. (2024). Lmsys-chat-1m: A large-scale real-world LLM conversation dataset. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.",
1338
+ "Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. (2023). Judging lvm-as-a-judge with mt-bench and chatbot arena. In Advances in Neural Information Processing Systems 36.",
1339
+ "Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. (2023). Instruction-following evaluation for large language models. CoRR, abs/2311.07911."
1340
+ ],
1341
+ "bbox": [
1342
+ 171,
1343
+ 90,
1344
+ 826,
1345
+ 380
1346
+ ],
1347
+ "page_idx": 12
1348
+ },
1349
+ {
1350
+ "type": "text",
1351
+ "text": "A Open-weight Baselines",
1352
+ "text_level": 1,
1353
+ "bbox": [
1354
+ 171,
1355
+ 405,
1356
+ 401,
1357
+ 422
1358
+ ],
1359
+ "page_idx": 12
1360
+ },
1361
+ {
1362
+ "type": "text",
1363
+ "text": "We summarize the links to the open-weight LLMs evaluated in this work as below:",
1364
+ "bbox": [
1365
+ 171,
1366
+ 436,
1367
+ 718,
1368
+ 452
1369
+ ],
1370
+ "page_idx": 12
1371
+ },
1372
+ {
1373
+ "type": "list",
1374
+ "sub_type": "text",
1375
+ "list_items": [
1376
+ "- LLaMA 3.2 1B: meta-llama/Llama-3.2-1B-Instruct",
1377
+ "- Gemma-3 1B: google/gemma-3-1b-it",
1378
+ "Qwen2.5 0.5B: Qwen/Qwen2.5-0.5B-Instruct",
1379
+ "- Qwen2.5 1.5B: Qwen/Qwen2.5-1.5B-Instruct",
1380
+ "- Qwen2.5 3B: Qwen/Qwen2.5-3B-Instruct",
1381
+ "- SmolLM2 1.7B: HuggingFaceTB/SmolLM2-1.7B-Instruct",
1382
+ "- MiniCPM 2B: openbmb/MiniCPM-2B-dpo-bf16",
1383
+ "- Qwen2.5 1.5B-GPTQ-int4: Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4",
1384
+ "Qwen2.5 1.5B-AWQ-int4: Qwen/Qwen2.5-1.5B-Instruct-AWQ",
1385
+ "- Bonsai 0.5B: deepgrove/Bonsai",
1386
+ "- OLMo-Bitnet 1B: NousResearch/OLMo-Bitnet-1B",
1387
+ "- Falcon3-1.58bit 7B: tiiuae/Falcon3-7B-Instruct-1.58bit",
1388
+ "- Llama3-8B-1.58 8B: HF1BitLLM/Llama3-8B-1.58-100B-tokens"
1389
+ ],
1390
+ "bbox": [
1391
+ 215,
1392
+ 464,
1393
+ 730,
1394
+ 707
1395
+ ],
1396
+ "page_idx": 12
1397
+ },
1398
+ {
1399
+ "type": "text",
1400
+ "text": "B Evaluation Pipeline Details",
1401
+ "text_level": 1,
1402
+ "bbox": [
1403
+ 171,
1404
+ 727,
1405
+ 434,
1406
+ 744
1407
+ ],
1408
+ "page_idx": 12
1409
+ },
1410
+ {
1411
+ "type": "text",
1412
+ "text": "To ensure standardized evaluation, we employed established toolkits for different benchmark categories. Specifically:",
1413
+ "bbox": [
1414
+ 169,
1415
+ 758,
1416
+ 826,
1417
+ 787
1418
+ ],
1419
+ "page_idx": 12
1420
+ },
1421
+ {
1422
+ "type": "list",
1423
+ "sub_type": "text",
1424
+ "list_items": [
1425
+ "- For the HumanEval+ coding benchmark, we utilized the evalplus toolkit.",
1426
+ "- For the MATH-500 mathematical reasoning benchmark, we used a customized version of the math-evaluation-harness toolkit.",
1427
+ "- For the MT-Bench conversational benchmark, evaluation was performed using the official LLM Judge open-source codebase.",
1428
+ "- For all other benchmarks assessing language understanding, reasoning, knowledge, and comprehension, we used the standard lm-evaluation-harness framework."
1429
+ ],
1430
+ "bbox": [
1431
+ 215,
1432
+ 797,
1433
+ 825,
1434
+ 911
1435
+ ],
1436
+ "page_idx": 12
1437
+ },
1438
+ {
1439
+ "type": "page_number",
1440
+ "text": "13",
1441
+ "bbox": [
1442
+ 490,
1443
+ 935,
1444
+ 508,
1445
+ 946
1446
+ ],
1447
+ "page_idx": 12
1448
+ },
1449
+ {
1450
+ "type": "table",
1451
+ "img_path": "images/c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg",
1452
+ "table_caption": [],
1453
+ "table_footnote": [],
1454
+ "table_body": "<table><tr><td>Bits</td><td>ADD Energy</td><td>MUL Energy</td></tr><tr><td>FP16</td><td>0.16</td><td>0.34</td></tr><tr><td>INT8</td><td>0.007</td><td>0.07</td></tr></table>",
1455
+ "bbox": [
1456
+ 362,
1457
+ 88,
1458
+ 633,
1459
+ 147
1460
+ ],
1461
+ "page_idx": 13
1462
+ },
1463
+ {
1464
+ "type": "text",
1465
+ "text": "Table 4: ADD and MUL energy consumption (in pJ) of different precision at $7\\mathrm{nm}$ process nodes.",
1466
+ "bbox": [
1467
+ 181,
1468
+ 152,
1469
+ 815,
1470
+ 167
1471
+ ],
1472
+ "page_idx": 13
1473
+ },
1474
+ {
1475
+ "type": "text",
1476
+ "text": "Models were prompted using a chat format for generative tasks (e.g., GSM8K, IFEval, and MT-Bench), while default settings from the respective toolkits were used for other tasks.",
1477
+ "bbox": [
1478
+ 174,
1479
+ 193,
1480
+ 823,
1481
+ 220
1482
+ ],
1483
+ "page_idx": 13
1484
+ },
1485
+ {
1486
+ "type": "text",
1487
+ "text": "For energy consumption, we utilize the energy model in (Horowitz, 2014; Zhang et al., 2022) to estimate the arithmetic operations energy (AOE) of matrix multiplication. The sequence length is set as 512 tokens. We present the energy consumption for ADD and MUL operation at $7\\mathrm{nm}$ process nodes in Table 4.",
1488
+ "bbox": [
1489
+ 174,
1490
+ 227,
1491
+ 823,
1492
+ 282
1493
+ ],
1494
+ "page_idx": 13
1495
+ },
1496
+ {
1497
+ "type": "text",
1498
+ "text": "To assess CPU decoding performance, latency measurements were conducted on a Surface Laptop Studio 2 system powered by a 13th Gen Intel Core i7-13800H processor. The benchmarking process utilized 8 CPU threads. Specifically, the BitNet b1.58 2B4T model was tested using its bitnet.cpp implementation, whereas other models were evaluated using the llama.cpp framework. For each model, we generated 128 tokens and report the average latency per token for this task.",
1499
+ "bbox": [
1500
+ 174,
1501
+ 289,
1502
+ 823,
1503
+ 359
1504
+ ],
1505
+ "page_idx": 13
1506
+ },
1507
+ {
1508
+ "type": "page_number",
1509
+ "text": "14",
1510
+ "bbox": [
1511
+ 491,
1512
+ 935,
1513
+ 506,
1514
+ 946
1515
+ ],
1516
+ "page_idx": 13
1517
+ }
1518
+ ]
data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_model.json ADDED
@@ -0,0 +1,2384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "header",
5
+ "bbox": [
6
+ 0.279,
7
+ 0.123,
8
+ 0.721,
9
+ 0.149
10
+ ],
11
+ "angle": 0,
12
+ "content": "BitNet b1.58 2B4T Technical Report"
13
+ },
14
+ {
15
+ "type": "header",
16
+ "bbox": [
17
+ 0.267,
18
+ 0.201,
19
+ 0.735,
20
+ 0.244
21
+ ],
22
+ "angle": 0,
23
+ "content": "Shuming Ma* Hongyu Wang* Shaohan Huang Xingxing Zhang Ying Hu Ting Song Yan Xia Furu Wei https://aka.ms/GeneralAI"
24
+ },
25
+ {
26
+ "type": "title",
27
+ "bbox": [
28
+ 0.46,
29
+ 0.251,
30
+ 0.538,
31
+ 0.266
32
+ ],
33
+ "angle": 0,
34
+ "content": "Abstract"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.229,
40
+ 0.28,
41
+ 0.77,
42
+ 0.435
43
+ ],
44
+ "angle": 0,
45
+ "content": "We introduce BitNet b1.58 2B4T, the first open-source, native 1-bit Large Language Model (LLM) at the 2-billion parameter scale. Trained on a corpus of 4 trillion tokens, the model has been rigorously evaluated across benchmarks covering language understanding, mathematical reasoning, coding proficiency, and conversational ability. Our results demonstrate that BitNet b1.58 2B4T achieves performance on par with leading open-weight, full-precision LLMs of similar size, while offering significant advantages in computational efficiency, including substantially reduced memory footprint, energy consumption, and decoding latency. To facilitate further research and adoption, the model weights are released via Hugging Face along with open-source inference implementations for both GPU and CPU architectures."
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.235,
51
+ 0.442,
52
+ 0.581,
53
+ 0.455
54
+ ],
55
+ "angle": 0,
56
+ "content": "BitNet b1.58 2B4T (1.58-bit): bitnet-b1.58-2B-4T"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.255,
62
+ 0.456,
63
+ 0.64,
64
+ 0.469
65
+ ],
66
+ "angle": 0,
67
+ "content": "The packed weight of BitNet b1.58 2B4T, used for inference only"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.235,
73
+ 0.474,
74
+ 0.6,
75
+ 0.488
76
+ ],
77
+ "angle": 0,
78
+ "content": "BitNet b1.58 2B4T (bf16): bitnet-b1.58-2B-4T-bf16"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.256,
84
+ 0.488,
85
+ 0.632,
86
+ 0.502
87
+ ],
88
+ "angle": 0,
89
+ "content": "The master weight of BitNet b1.58 2B4T, used for training only"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.235,
95
+ 0.506,
96
+ 0.599,
97
+ 0.52
98
+ ],
99
+ "angle": 0,
100
+ "content": "BitNet b1.58 2B4T (gguf): bitnet-b1.58-2B-4T-gguf"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.256,
106
+ 0.52,
107
+ 0.615,
108
+ 0.535
109
+ ],
110
+ "angle": 0,
111
+ "content": "The GGUF format of BitNet b1.58 2B4T, used for bitnet.cpp"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.235,
117
+ 0.54,
118
+ 0.711,
119
+ 0.555
120
+ ],
121
+ "angle": 0,
122
+ "content": "BitNet b1.58 2B4T Code: bitnet.cpp Demo: aka.ms/bitnet-demo"
123
+ },
124
+ {
125
+ "type": "image",
126
+ "bbox": [
127
+ 0.223,
128
+ 0.563,
129
+ 0.771,
130
+ 0.825
131
+ ],
132
+ "angle": 0,
133
+ "content": null
134
+ },
135
+ {
136
+ "type": "image_caption",
137
+ "bbox": [
138
+ 0.171,
139
+ 0.83,
140
+ 0.825,
141
+ 0.861
142
+ ],
143
+ "angle": 0,
144
+ "content": "Figure 1: BitNet b1.58 2B4T advances the Pareto frontier defined by leading open-weight LLMs under 3B parameters in terms of performance versus memory, demonstrating superior efficiency."
145
+ },
146
+ {
147
+ "type": "page_footnote",
148
+ "bbox": [
149
+ 0.171,
150
+ 0.873,
151
+ 0.825,
152
+ 0.914
153
+ ],
154
+ "angle": 0,
155
+ "content": "* Equal contribution. ⋆ Corresponding author. S. Ma, S. Huang, X. Zhang, T. Song, Y. Xia and F. Wei are with Microsoft Research. H. Wang is with University of Chinese Academy of Sciences. Y. Hu is with Tsinghua University."
156
+ },
157
+ {
158
+ "type": "aside_text",
159
+ "bbox": [
160
+ 0.023,
161
+ 0.266,
162
+ 0.061,
163
+ 0.707
164
+ ],
165
+ "angle": 270,
166
+ "content": "arXiv:2504.12285v2 [cs.CL] 25 Apr 2025"
167
+ }
168
+ ],
169
+ [
170
+ {
171
+ "type": "title",
172
+ "bbox": [
173
+ 0.174,
174
+ 0.09,
175
+ 0.313,
176
+ 0.106
177
+ ],
178
+ "angle": 0,
179
+ "content": "1 Introduction"
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.171,
185
+ 0.121,
186
+ 0.827,
187
+ 0.233
188
+ ],
189
+ "angle": 0,
190
+ "content": "Open-source large language models (LLMs) have become pivotal in democratizing access to advanced AI capabilities, fostering innovation, and enabling research across diverse fields such as natural language processing, code generation, and vision computing (Dubey et al., 2024; Yang et al., 2024; Bai et al., 2025). Their public availability allows for widespread experimentation and adaptation. However, a significant barrier hinders their broader adoption: the substantial computational resources required for deployment and inference. State-of-the-art open LLMs typically require large memory footprints, consume considerable energy, and exhibit notable inference latency, rendering them impractical for many edge devices, resource-constrained environments, and real-time applications."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.171,
196
+ 0.238,
197
+ 0.828,
198
+ 0.392
199
+ ],
200
+ "angle": 0,
201
+ "content": "1-bit LLMs, representing an extreme yet promising form of model quantization where weights and potentially activations are constrained to binary \\(\\{-1, +1\\}\\) or ternary \\(\\{-1, 0, +1\\}\\), offer a compelling solution to the efficiency challenges. By drastically reducing the memory required to store weights and enabling highly efficient bitwise computations, they have the potential to significantly lower deployment costs, reduce energy consumption, and accelerate inference speeds. While prior work has explored 1-bit models, existing open efforts often fall into two categories: 1) post-training quantization (PTQ) methods applied to pre-trained full-precision models, which can lead to significant performance degradation (Xu et al., 2024b; Team, 2024), or 2) native 1-bit models (trained from scratch with 1-bit weights) that have been developed at relatively smaller scales (e.g., OLMo-Bitnet-1B²]) and may not yet match the capabilities of larger, full-precision counterparts. This performance gap has limited the practical impact of 1-bit LLMs thus far."
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.171,
207
+ 0.397,
208
+ 0.825,
209
+ 0.482
210
+ ],
211
+ "angle": 0,
212
+ "content": "To bridge this gap between efficiency and performance, we introduce BitNet b1.58 2B4T, the first open-source, native 1-bit LLM trained at scale. This model, comprising 2 billion parameters, was trained from scratch on a substantial dataset of 4 trillion tokens, leveraging architectural and training innovations specific to the 1-bit paradigm. The core contribution of this work is to demonstrate that a native 1-bit LLM, when trained effectively at scale, can achieve performance comparable to leading open-weight, full-precision models of similar size across a wide range of tasks."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.171,
218
+ 0.486,
219
+ 0.826,
220
+ 0.6
221
+ ],
222
+ "angle": 0,
223
+ "content": "This technical report details the development and evaluation of BitNet b1.58 2B4T. We describe the architecture and training methodology, and then present comprehensive evaluation results on standard benchmarks assessing language understanding, mathematical reasoning, coding proficiency, and multi-turn conversational abilities. Our findings confirm its strong performance relative to established full-precision baselines, coupled with significant advantages in efficiency. Finally, we announce the public release of the BitNet b1.58 2B4T model weights via Hugging Face and provide open-source inference code optimized for both GPU and CPU execution, aiming to facilitate further research and the practical deployment of highly efficient LLMs."
224
+ },
225
+ {
226
+ "type": "title",
227
+ "bbox": [
228
+ 0.172,
229
+ 0.618,
230
+ 0.314,
231
+ 0.634
232
+ ],
233
+ "angle": 0,
234
+ "content": "2 Architecture"
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.171,
240
+ 0.649,
241
+ 0.827,
242
+ 0.692
243
+ ],
244
+ "angle": 0,
245
+ "content": "The architecture of BitNet b1.58 2B4T is derived from the standard Transformer model (Vaswani et al., 2017), incorporating significant modifications based on the BitNet framework (Wang et al., 2023a; Ma et al., 2024). The model is trained entirely from scratch."
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.171,
251
+ 0.697,
252
+ 0.825,
253
+ 0.74
254
+ ],
255
+ "angle": 0,
256
+ "content": "The core architectural innovation lies in replacing the standard full-precision linear layers (torch(nn.Linear) with custom BitLinear layers. This constitutes the foundation of the BitNet approach. Within these BitLinear layers:"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.217,
262
+ 0.751,
263
+ 0.825,
264
+ 0.806
265
+ ],
266
+ "angle": 0,
267
+ "content": "- Weight Quantization: Model weights are quantized to 1.58 bits during the forward pass. This is achieved using an absolute mean (absmean) quantization scheme, which maps weights to ternary values \\(\\{-1,0, + 1\\}\\). This drastically reduces the model size and enables efficient mathematical operations."
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.217,
273
+ 0.812,
274
+ 0.825,
275
+ 0.853
276
+ ],
277
+ "angle": 0,
278
+ "content": "- Activation Quantization: Activations flowing through the linear projection are quantized to 8-bit integers. This employs an absolute maximum (absmax) quantization strategy, applied per-token."
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.217,
284
+ 0.858,
285
+ 0.825,
286
+ 0.886
287
+ ],
288
+ "angle": 0,
289
+ "content": "- Normalization: We incorporate subln normalization (Wang et al., 2022) to further enhance training stability, which can be particularly beneficial in quantized training regimes."
290
+ },
291
+ {
292
+ "type": "list",
293
+ "bbox": [
294
+ 0.217,
295
+ 0.751,
296
+ 0.825,
297
+ 0.886
298
+ ],
299
+ "angle": 0,
300
+ "content": null
301
+ },
302
+ {
303
+ "type": "page_footnote",
304
+ "bbox": [
305
+ 0.193,
306
+ 0.897,
307
+ 0.591,
308
+ 0.912
309
+ ],
310
+ "angle": 0,
311
+ "content": "<sup>2</sup>https://huggingface.co/NousResearch/OLMo-Bitnet-1B"
312
+ },
313
+ {
314
+ "type": "page_number",
315
+ "bbox": [
316
+ 0.494,
317
+ 0.936,
318
+ 0.505,
319
+ 0.948
320
+ ],
321
+ "angle": 0,
322
+ "content": "2"
323
+ }
324
+ ],
325
+ [
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.172,
330
+ 0.092,
331
+ 0.825,
332
+ 0.121
333
+ ],
334
+ "angle": 0,
335
+ "content": "Beyond the BitLinear layers, several established LLM techniques are integrated to enhance performance and stability:"
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.217,
341
+ 0.132,
342
+ 0.822,
343
+ 0.185
344
+ ],
345
+ "angle": 0,
346
+ "content": "- Activation Function (FFN): Within the feed-forward network (FFN) sub-layers, instead of the commonly used SwiGLU activation (Shazeer, 2020), BitNet b1.58 2B4T employs squared ReLU \\((\\mathrm{ReLU}^2)\\). This choice is motivated by its potential to improve model sparsity and computational characteristics within the 1-bit context (Wang et al., 2024b,a)."
347
+ },
348
+ {
349
+ "type": "text",
350
+ "bbox": [
351
+ 0.217,
352
+ 0.193,
353
+ 0.822,
354
+ 0.22
355
+ ],
356
+ "angle": 0,
357
+ "content": "- **Positional Embeddings:** Rotary Position Embeddings (RoPE) (Su et al., 2024) are used to inject positional information, a standard practice in modern high-performance LLMs."
358
+ },
359
+ {
360
+ "type": "text",
361
+ "bbox": [
362
+ 0.217,
363
+ 0.227,
364
+ 0.822,
365
+ 0.267
366
+ ],
367
+ "angle": 0,
368
+ "content": "- Bias Removal: Consistent with architectures like LLaMA, all bias terms are removed from the linear layers and normalization layers throughout the network, reducing parameter count and potentially simplifying quantization."
369
+ },
370
+ {
371
+ "type": "list",
372
+ "bbox": [
373
+ 0.217,
374
+ 0.132,
375
+ 0.822,
376
+ 0.267
377
+ ],
378
+ "angle": 0,
379
+ "content": null
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.171,
385
+ 0.281,
386
+ 0.825,
387
+ 0.337
388
+ ],
389
+ "angle": 0,
390
+ "content": "For tokenization, we adopt the tokenizer developed for LLaMA 3 (Dubey et al., 2024). This tokenizer implements a byte-level Byte-Pair Encoding (BPE) scheme with a vocabulary size of 128,256 tokens. This choice ensures robust handling of diverse text and code, and its widespread adoption facilitates straightforward integration with existing open-source tooling and ecosystems."
391
+ },
392
+ {
393
+ "type": "title",
394
+ "bbox": [
395
+ 0.172,
396
+ 0.358,
397
+ 0.281,
398
+ 0.375
399
+ ],
400
+ "angle": 0,
401
+ "content": "3 Training"
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.171,
407
+ 0.39,
408
+ 0.825,
409
+ 0.485
410
+ ],
411
+ "angle": 0,
412
+ "content": "The training process for BitNet b1.58 2B4T involved three distinct phases: large-scale pre-training followed by supervised fine-tuning (SFT) and direct preference optimization (DPO). While advanced techniques like Proximal Policy Optimization (PPO) or Group Relative Policy Optimization (GRPO) can further enhance capabilities such as mathematics and chain-of-thought reasoning (Schulman et al., 2017; Shao et al., 2024), the current version of BitNet b1.58 2B4T relies solely on pre-training, SFT, and DPO. The exploration of reinforcement learning methods remains a direction for future work."
413
+ },
414
+ {
415
+ "type": "title",
416
+ "bbox": [
417
+ 0.172,
418
+ 0.504,
419
+ 0.301,
420
+ 0.519
421
+ ],
422
+ "angle": 0,
423
+ "content": "3.1 Pre-training"
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.171,
429
+ 0.53,
430
+ 0.825,
431
+ 0.573
432
+ ],
433
+ "angle": 0,
434
+ "content": "The pre-training phase aimed to imbue the model with broad world knowledge and foundational language capabilities. We adapted general training strategies from established LLM practices (Dubey et al., 2024), with specific adjustments tailored for the 1-bit architecture."
435
+ },
436
+ {
437
+ "type": "title",
438
+ "bbox": [
439
+ 0.172,
440
+ 0.588,
441
+ 0.394,
442
+ 0.603
443
+ ],
444
+ "angle": 0,
445
+ "content": "3.1.1 Learning Rate Schedule"
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.172,
451
+ 0.613,
452
+ 0.504,
453
+ 0.628
454
+ ],
455
+ "angle": 0,
456
+ "content": "A two-stage learning rate schedule was employed."
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.211,
462
+ 0.639,
463
+ 0.822,
464
+ 0.692
465
+ ],
466
+ "angle": 0,
467
+ "content": "1. **Stage 1 (High Learning Rate):** The initial phase utilized a standard cosine decay schedule but commenced with a relatively high peak learning rate. This decision was informed by the observation that 1-bit models often exhibit greater training stability compared to their full-precision counterparts, allowing for more aggressive initial learning steps."
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.209,
473
+ 0.7,
474
+ 0.822,
475
+ 0.753
476
+ ],
477
+ "angle": 0,
478
+ "content": "2. **Stage 2 (Cooldown):** Approximately midway through the planned training token count, the learning rate was abruptly decayed and subsequently maintained via a cosine schedule with a significantly lower peak value. This \"cooldown\" phase allows the model to refine its representations on higher-quality data (see Section 3.1.3)."
479
+ },
480
+ {
481
+ "type": "list",
482
+ "bbox": [
483
+ 0.209,
484
+ 0.639,
485
+ 0.822,
486
+ 0.753
487
+ ],
488
+ "angle": 0,
489
+ "content": null
490
+ },
491
+ {
492
+ "type": "title",
493
+ "bbox": [
494
+ 0.172,
495
+ 0.772,
496
+ 0.389,
497
+ 0.786
498
+ ],
499
+ "angle": 0,
500
+ "content": "3.1.2 Weight Decay Schedule"
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.172,
506
+ 0.796,
507
+ 0.822,
508
+ 0.811
509
+ ],
510
+ "angle": 0,
511
+ "content": "Complementing the learning rate adjustments, a two-stage weight decay strategy was implemented."
512
+ },
513
+ {
514
+ "type": "text",
515
+ "bbox": [
516
+ 0.211,
517
+ 0.822,
518
+ 0.822,
519
+ 0.861
520
+ ],
521
+ "angle": 0,
522
+ "content": "1. **Stage 1:** During the first training stage, weight decay followed a cosine schedule, reaching a peak value of 0.1. This regularization helps prevent overfitting during the initial high learning-rate phase."
523
+ },
524
+ {
525
+ "type": "text",
526
+ "bbox": [
527
+ 0.209,
528
+ 0.87,
529
+ 0.822,
530
+ 0.908
531
+ ],
532
+ "angle": 0,
533
+ "content": "2. **Stage 2:** In the second stage, weight decay was effectively disabled (set to zero). This allows the model parameters to settle into finer-grained optima guided by the lower learning rate and curated data."
534
+ },
535
+ {
536
+ "type": "list",
537
+ "bbox": [
538
+ 0.209,
539
+ 0.822,
540
+ 0.822,
541
+ 0.908
542
+ ],
543
+ "angle": 0,
544
+ "content": null
545
+ },
546
+ {
547
+ "type": "page_number",
548
+ "bbox": [
549
+ 0.494,
550
+ 0.936,
551
+ 0.504,
552
+ 0.948
553
+ ],
554
+ "angle": 0,
555
+ "content": "3"
556
+ }
557
+ ],
558
+ [
559
+ {
560
+ "type": "title",
561
+ "bbox": [
562
+ 0.172,
563
+ 0.092,
564
+ 0.35,
565
+ 0.107
566
+ ],
567
+ "angle": 0,
568
+ "content": "3.1.3 Pre-training Data"
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.171,
574
+ 0.115,
575
+ 0.825,
576
+ 0.2
577
+ ],
578
+ "angle": 0,
579
+ "content": "The pre-training corpus comprised a mixture of publicly available text and code datasets, including large web crawls like DCLM (Li et al., 2024b) and educational web pages like FineWeb-EDU (Penedo et al., 2024). To enhance mathematical reasoning abilities, we also incorporated synthetically generated mathematical data. The data presentation strategy aligned with the two-stage training: the bulk of general web data was processed during Stage 1, while higher-quality curated datasets were emphasized during the Stage 2 cooldown phase, coinciding with the reduced learning rate."
580
+ },
581
+ {
582
+ "type": "title",
583
+ "bbox": [
584
+ 0.172,
585
+ 0.215,
586
+ 0.421,
587
+ 0.231
588
+ ],
589
+ "angle": 0,
590
+ "content": "3.2 Supervised Fine-tuning (SFT)"
591
+ },
592
+ {
593
+ "type": "text",
594
+ "bbox": [
595
+ 0.171,
596
+ 0.241,
597
+ 0.827,
598
+ 0.27
599
+ ],
600
+ "angle": 0,
601
+ "content": "Following pre-training, the model underwent supervised fine-tuning (SFT) to enhance its instruction-following capabilities and improve its performance in conversational interaction formats."
602
+ },
603
+ {
604
+ "type": "title",
605
+ "bbox": [
606
+ 0.172,
607
+ 0.284,
608
+ 0.295,
609
+ 0.299
610
+ ],
611
+ "angle": 0,
612
+ "content": "3.2.1 SFT Data"
613
+ },
614
+ {
615
+ "type": "text",
616
+ "bbox": [
617
+ 0.171,
618
+ 0.308,
619
+ 0.828,
620
+ 0.392
621
+ ],
622
+ "angle": 0,
623
+ "content": "The SFT phase utilized a diverse collection of publicly available instruction-following and conversational datasets. These included, but were not limited to, WildChat (Zhao et al., 2024), LMSYS-Chat1M (Zheng et al., 2024), WizardLM Evol-Instruct (Xu et al., 2024a), and SlimOrca (Lian et al., 2023). To further bolster specific capabilities, particularly in reasoning and complex instruction adherence, we supplemented these with synthetic datasets generated using methodologies like GLAN (Li et al., 2024a) and MathScale (Tang et al., 2024)."
624
+ },
625
+ {
626
+ "type": "title",
627
+ "bbox": [
628
+ 0.172,
629
+ 0.406,
630
+ 0.331,
631
+ 0.421
632
+ ],
633
+ "angle": 0,
634
+ "content": "3.2.2 Chat Template"
635
+ },
636
+ {
637
+ "type": "text",
638
+ "bbox": [
639
+ 0.171,
640
+ 0.43,
641
+ 0.827,
642
+ 0.459
643
+ ],
644
+ "angle": 0,
645
+ "content": "For conversational tasks during SFT and inference, the following chat template structure was employed:"
646
+ },
647
+ {
648
+ "type": "code",
649
+ "bbox": [
650
+ 0.171,
651
+ 0.469,
652
+ 0.613,
653
+ 0.54
654
+ ],
655
+ "angle": 0,
656
+ "content": "<|begin_of_text|>System: {system_message}<|eot_id|>\nUser: {user_message_1}<|eot_id|\nAssistant: {assistant_message_1}<|eot_id|\nUser: {user_message_2}<|eot_id|\nAssistant: {assistant_message_2}<|eot_id|..."
657
+ },
658
+ {
659
+ "type": "title",
660
+ "bbox": [
661
+ 0.172,
662
+ 0.554,
663
+ 0.371,
664
+ 0.569
665
+ ],
666
+ "angle": 0,
667
+ "content": "3.2.3 Optimization Details"
668
+ },
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.172,
673
+ 0.578,
674
+ 0.51,
675
+ 0.593
676
+ ],
677
+ "angle": 0,
678
+ "content": "Several optimization choices were key during SFT:"
679
+ },
680
+ {
681
+ "type": "text",
682
+ "bbox": [
683
+ 0.217,
684
+ 0.604,
685
+ 0.825,
686
+ 0.647
687
+ ],
688
+ "angle": 0,
689
+ "content": "- Loss Aggregation: Instead of averaging the cross-entropy loss across tokens within a batch (mean reduction), we employed summation. Empirically, we observed that summing the losses led to improved convergence and better final performance for this model."
690
+ },
691
+ {
692
+ "type": "text",
693
+ "bbox": [
694
+ 0.217,
695
+ 0.65,
696
+ 0.826,
697
+ 0.72
698
+ ],
699
+ "angle": 0,
700
+ "content": "- Hyperparameter Tuning: Careful tuning of the learning rate and the number of training epochs was performed. Consistent with our pre-training findings, the 1-bit model benefited from a relatively larger learning rate during SFT compared to typical full-precision model fine-tuning. Furthermore, achieving optimal convergence required extending the fine-tuning duration over a larger number of epochs than full-precision models of similar size."
701
+ },
702
+ {
703
+ "type": "list",
704
+ "bbox": [
705
+ 0.217,
706
+ 0.604,
707
+ 0.826,
708
+ 0.72
709
+ ],
710
+ "angle": 0,
711
+ "content": null
712
+ },
713
+ {
714
+ "type": "title",
715
+ "bbox": [
716
+ 0.172,
717
+ 0.736,
718
+ 0.482,
719
+ 0.752
720
+ ],
721
+ "angle": 0,
722
+ "content": "3.3 Direct Preference Optimization (DPO)"
723
+ },
724
+ {
725
+ "type": "text",
726
+ "bbox": [
727
+ 0.171,
728
+ 0.762,
729
+ 0.826,
730
+ 0.845
731
+ ],
732
+ "angle": 0,
733
+ "content": "To further align the model's behavior with human preferences regarding helpfulness and safety, we applied Direct Preference Optimization (DPO) (Rafailov et al., 2023) following the SFT phase. DPO offers an efficient alternative to traditional RLHF by directly optimizing the language model using preference data, thereby circumventing the need to train a separate reward model. This DPO stage served to refine the model's conversational prowess and overall alignment with desired interaction patterns in practical use cases."
734
+ },
735
+ {
736
+ "type": "title",
737
+ "bbox": [
738
+ 0.172,
739
+ 0.86,
740
+ 0.325,
741
+ 0.875
742
+ ],
743
+ "angle": 0,
744
+ "content": "3.3.1 Training Data"
745
+ },
746
+ {
747
+ "type": "text",
748
+ "bbox": [
749
+ 0.171,
750
+ 0.884,
751
+ 0.827,
752
+ 0.914
753
+ ],
754
+ "angle": 0,
755
+ "content": "The preference dataset used for DPO training was constructed from a combination of publicly available resources recognized for capturing diverse human judgments on model outputs. Specifically,"
756
+ },
757
+ {
758
+ "type": "page_number",
759
+ "bbox": [
760
+ 0.494,
761
+ 0.936,
762
+ 0.505,
763
+ 0.948
764
+ ],
765
+ "angle": 0,
766
+ "content": "4"
767
+ }
768
+ ],
769
+ [
770
+ {
771
+ "type": "text",
772
+ "bbox": [
773
+ 0.171,
774
+ 0.092,
775
+ 0.825,
776
+ 0.135
777
+ ],
778
+ "angle": 0,
779
+ "content": "we utilized UltraFeedback (Cui et al., 2024) and MagPie (Xu et al., 2024c). The aggregation of these datasets provided a robust and multifaceted preference signal, guiding the model towards generating responses more aligned with human expectations."
780
+ },
781
+ {
782
+ "type": "title",
783
+ "bbox": [
784
+ 0.172,
785
+ 0.148,
786
+ 0.34,
787
+ 0.163
788
+ ],
789
+ "angle": 0,
790
+ "content": "3.3.2 Training Details"
791
+ },
792
+ {
793
+ "type": "text",
794
+ "bbox": [
795
+ 0.171,
796
+ 0.171,
797
+ 0.827,
798
+ 0.256
799
+ ],
800
+ "angle": 0,
801
+ "content": "The DPO training phase was conducted for 2 epochs. We employed a learning rate of \\(2 \\times 10^{-7}\\) and set the DPO beta parameter, which controls the divergence from the reference policy, to 0.1. To enhance training efficiency during this phase, we integrated optimized kernels from the Liger Kernel library (Hsu et al., 2024). Qualitatively, our observations indicate that the DPO process effectively steered the model towards preferred response styles without inducing significant degradation in the core capabilities established during pre-training and SFT."
802
+ },
803
+ {
804
+ "type": "title",
805
+ "bbox": [
806
+ 0.172,
807
+ 0.274,
808
+ 0.298,
809
+ 0.29
810
+ ],
811
+ "angle": 0,
812
+ "content": "4 Evaluation"
813
+ },
814
+ {
815
+ "type": "text",
816
+ "bbox": [
817
+ 0.171,
818
+ 0.304,
819
+ 0.7,
820
+ 0.32
821
+ ],
822
+ "angle": 0,
823
+ "content": "We measure performance on a wide variety of benchmarks classified as follows:"
824
+ },
825
+ {
826
+ "type": "text",
827
+ "bbox": [
828
+ 0.217,
829
+ 0.33,
830
+ 0.825,
831
+ 0.385
832
+ ],
833
+ "angle": 0,
834
+ "content": "- Language understanding and reasoning: ARC-Easy (Yadav et al., 2019), ARC-Challenge (Yadav et al., 2019), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2020), PIQA (Bisk et al., 2019), OpenbookQA (Mihaylov et al., 2018), and CommonsenseQA (Talmor et al., 2019)"
835
+ },
836
+ {
837
+ "type": "text",
838
+ "bbox": [
839
+ 0.217,
840
+ 0.389,
841
+ 0.81,
842
+ 0.404
843
+ ],
844
+ "angle": 0,
845
+ "content": "- World knowledge: TruthfulQA (Lin et al., 2022) and MMLU (Hendrycks et al., 2021a)"
846
+ },
847
+ {
848
+ "type": "text",
849
+ "bbox": [
850
+ 0.217,
851
+ 0.407,
852
+ 0.804,
853
+ 0.423
854
+ ],
855
+ "angle": 0,
856
+ "content": "- Reading comprehension: TriviaQA (Joshi et al., 2017) and BoolQ (Clark et al., 2019)"
857
+ },
858
+ {
859
+ "type": "text",
860
+ "bbox": [
861
+ 0.217,
862
+ 0.426,
863
+ 0.825,
864
+ 0.454
865
+ ],
866
+ "angle": 0,
867
+ "content": "- Math and code: GSM8K (Cobbe et al., 2021), MATH-500 (Hendrycks et al., 2021b) and HumanEval+ (Liu et al., 2023)"
868
+ },
869
+ {
870
+ "type": "text",
871
+ "bbox": [
872
+ 0.217,
873
+ 0.458,
874
+ 0.825,
875
+ 0.485
876
+ ],
877
+ "angle": 0,
878
+ "content": "- Instruction following and conversation: IFEval (Zhou et al., 2023) and MT-bench (Zheng et al., 2023)"
879
+ },
880
+ {
881
+ "type": "list",
882
+ "bbox": [
883
+ 0.217,
884
+ 0.33,
885
+ 0.825,
886
+ 0.485
887
+ ],
888
+ "angle": 0,
889
+ "content": null
890
+ },
891
+ {
892
+ "type": "text",
893
+ "bbox": [
894
+ 0.171,
895
+ 0.497,
896
+ 0.827,
897
+ 0.581
898
+ ],
899
+ "angle": 0,
900
+ "content": "We compare BitNet b1.58 2B4T with leading open-weight full precision LLMs of similar size, including LLaMA 3.2 1B (Dubey et al., 2024), Gemma-3 1B (Team et al., 2025), Qwen2.5 1.5B (Yang et al., 2024), SmolLM2 1.7B (Allal et al., 2025) and MiniCPM 2B (Hu et al., 2024). All models are instruction-tuned versions. We re-run all benchmarks with a public evaluation pipeline for a fair comparison. More evaluation details are available at the appendix. The main results are presented in Table 1."
901
+ },
902
+ {
903
+ "type": "title",
904
+ "bbox": [
905
+ 0.172,
906
+ 0.596,
907
+ 0.307,
908
+ 0.61
909
+ ],
910
+ "angle": 0,
911
+ "content": "4.1 Main Results"
912
+ },
913
+ {
914
+ "type": "text",
915
+ "bbox": [
916
+ 0.171,
917
+ 0.621,
918
+ 0.827,
919
+ 0.691
920
+ ],
921
+ "angle": 0,
922
+ "content": "As shown in Table 1, BitNet b1.58 2B4T demonstrates remarkable resource efficiency. Its non-embedding memory footprint and estimated energy consumption (Horowitz, 2014; Zhang et al., 2022) during decoding are substantially lower compared to all the full-precision models evaluated, highlighting a significant advantage in operational cost and deployability on resource-constrained devices."
923
+ },
924
+ {
925
+ "type": "text",
926
+ "bbox": [
927
+ 0.171,
928
+ 0.697,
929
+ 0.827,
930
+ 0.795
931
+ ],
932
+ "angle": 0,
933
+ "content": "In terms of task performance, BitNet b1.58 2B4T proves highly competitive. It achieves the best results among the compared models on several benchmarks spanning reasoning, knowledge, and math capabilities. On other benchmarks, its performance is closely comparable to the top-performing full-precision models. While some full-precision models show slight advantages on specific tasks or the overall average, BitNet b1.58 2B4T delivers strong performance across the board. The results indicate that BitNet b1.58 2B4T achieves capabilities nearly on par with leading models in its size class while offering dramatically improved efficiency."
934
+ },
935
+ {
936
+ "type": "title",
937
+ "bbox": [
938
+ 0.171,
939
+ 0.81,
940
+ 0.563,
941
+ 0.826
942
+ ],
943
+ "angle": 0,
944
+ "content": "4.2 Comparison with Post-training Quantized Models"
945
+ },
946
+ {
947
+ "type": "text",
948
+ "bbox": [
949
+ 0.171,
950
+ 0.835,
951
+ 0.826,
952
+ 0.879
953
+ ],
954
+ "angle": 0,
955
+ "content": "We further investigate the efficiency-performance trade-off by comparing BitNet b1.58 2B4T against post-training quantized (PTQ) versions of a leading competitor, Qwen2.5 1.5B, using standard INT4 methods (GPTQ and AWQ). The results are summarized in Table 2."
956
+ },
957
+ {
958
+ "type": "text",
959
+ "bbox": [
960
+ 0.171,
961
+ 0.883,
962
+ 0.829,
963
+ 0.913
964
+ ],
965
+ "angle": 0,
966
+ "content": "While INT4 quantization successfully reduces the memory footprint of the full-precision model, BitNet b1.58 2B4T achieves an even lower memory requirement due to its native 1-bit architecture."
967
+ },
968
+ {
969
+ "type": "page_number",
970
+ "bbox": [
971
+ 0.494,
972
+ 0.936,
973
+ 0.505,
974
+ 0.948
975
+ ],
976
+ "angle": 0,
977
+ "content": "5"
978
+ }
979
+ ],
980
+ [
981
+ {
982
+ "type": "table",
983
+ "bbox": [
984
+ 0.182,
985
+ 0.088,
986
+ 0.813,
987
+ 0.673
988
+ ],
989
+ "angle": 0,
990
+ "content": "<table><tr><td>Benchmark (Metric)</td><td>LLaMA 3.21B</td><td>Gemma-31B</td><td>Qwen2.51.5B</td><td>SmolLM21.7B</td><td>MiniCPM2B</td><td>BitNet b1.582B</td></tr><tr><td>Memory(Non-emb)</td><td>2GB</td><td>1.4GB</td><td>2.6GB</td><td>3.2GB</td><td>4.8GB</td><td>0.4GB</td></tr><tr><td>Latency(CPU; TPOT)</td><td>48ms</td><td>41ms</td><td>65ms</td><td>67ms</td><td>124ms</td><td>29ms</td></tr><tr><td>Energy(Estimated)</td><td>0.258J</td><td>0.186J</td><td>0.347J</td><td>0.425J</td><td>0.649J</td><td>0.028J</td></tr><tr><td>Training Tokens(Pre-training)</td><td>9T(pruning &amp; distillation)</td><td>2T(distillation)</td><td>18T</td><td>11T</td><td>1.1T</td><td>4T</td></tr><tr><td>ARC-Challange(0-shot; Acc,norm)</td><td>37.80</td><td>38.40</td><td>46.67</td><td>43.52</td><td>44.80</td><td>49.91</td></tr><tr><td>ARC-Easy(0-shot; Acc,norm)</td><td>63.17</td><td>63.13</td><td>76.01</td><td>62.92</td><td>72.14</td><td>74.79</td></tr><tr><td>OpenbookQA(0-shot; Acc,norm)</td><td>34.80</td><td>38.80</td><td>40.80</td><td>46.00</td><td>40.20</td><td>41.60</td></tr><tr><td>BoolQ(0-shot; Acc)</td><td>64.65</td><td>74.22</td><td>78.04</td><td>75.78</td><td>80.67</td><td>80.18</td></tr><tr><td>HellaSwag(0-shot; Acc,norm)</td><td>60.80</td><td>57.69</td><td>68.28</td><td>71.71</td><td>70.81</td><td>68.44</td></tr><tr><td>PIQA(0-shot; Acc,norm)</td><td>74.21</td><td>71.93</td><td>76.12</td><td>76.12</td><td>76.66</td><td>77.09</td></tr><tr><td>WinoGrande(0-shot; Acc)</td><td>59.51</td><td>58.48</td><td>62.83</td><td>68.98</td><td>61.80</td><td>71.90</td></tr><tr><td>CommonsenseQA(10-shot; Acc)</td><td>58.48</td><td>42.10</td><td>76.41</td><td>63.55</td><td>71.74</td><td>71.58</td></tr><tr><td>TruthfulQA(10-shot; MC2)</td><td>43.80</td><td>38.66</td><td>46.67</td><td>39.90</td><td>41.41</td><td>45.31</td></tr><tr><td>TriviaQA(5-shot; EM)</td><td>37.60</td><td>23.49</td><td>38.37</td><td>45.97</td><td>34.13</td><td>33.57</td></tr><tr><td>MMLU(5-shot; Acc)</td><td>45.58</td><td>39.91</td><td>60.25</td><td>49.24</td><td>51.82</td><td>53.17</td></tr><tr><td>HumanEval+(0-shot; Pass@1)</td><td>31.10</td><td>37.20</td><td>50.60</td><td>28.00</td><td>43.90</td><td>38.40</td></tr><tr><td>GSM8K(4-shot; EM)</td><td>38.21</td><td>31.16</td><td>56.79</td><td>45.11</td><td>4.40</td><td>58.38</td></tr><tr><td>MATH-500(0-shot; EM)</td><td>23.00</td><td>42.00</td><td>53.00</td><td>17.60</td><td>14.80</td><td>43.40</td></tr><tr><td>IFEval(0-shot; Instruct-Strict)</td><td>62.71</td><td>66.67</td><td>50.12</td><td>57.91</td><td>36.81</td><td>53.48</td></tr><tr><td>MT-bench(0-shot; Average)</td><td>5.43</td><td>6.40</td><td>6.12</td><td>5.50</td><td>6.57</td><td>5.85</td></tr><tr><td>Average</td><td>44.90</td><td>43.74</td><td>55.23</td><td>48.70</td><td>42.05</td><td>54.19</td></tr></table>"
991
+ },
992
+ {
993
+ "type": "table_caption",
994
+ "bbox": [
995
+ 0.171,
996
+ 0.678,
997
+ 0.825,
998
+ 0.72
999
+ ],
1000
+ "angle": 0,
1001
+ "content": "Table 1: Comparison of BitNet b1.58 2B4T with leading open-weight full-precision LLMs of similar size (1B-2B parameters) on efficiency metrics and performance across a wide range of benchmarks. All models compared are instruction-tuned versions."
1002
+ },
1003
+ {
1004
+ "type": "text",
1005
+ "bbox": [
1006
+ 0.171,
1007
+ 0.814,
1008
+ 0.825,
1009
+ 0.913
1010
+ ],
1011
+ "angle": 0,
1012
+ "content": "More importantly, this superior memory efficiency does not compromise performance relative to the quantized models. Standard PTQ techniques lead to a noticeable degradation in performance compared to the original full-precision model. In contrast, BitNet b1.58 2B4T maintains stronger overall performance than the INT4 quantized versions of Qwen2.5-1.5B on the evaluated benchmarks. This comparison suggests that BitNet b1.58 2B4T represents a more favorable point on the efficiency-performance curve than applying conventional INT4 PTQ to existing architectures, offering better performance with lower resource usage."
1013
+ },
1014
+ {
1015
+ "type": "page_number",
1016
+ "bbox": [
1017
+ 0.494,
1018
+ 0.937,
1019
+ 0.504,
1020
+ 0.947
1021
+ ],
1022
+ "angle": 0,
1023
+ "content": "6"
1024
+ }
1025
+ ],
1026
+ [
1027
+ {
1028
+ "type": "table",
1029
+ "bbox": [
1030
+ 0.205,
1031
+ 0.089,
1032
+ 0.791,
1033
+ 0.284
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "<table><tr><td rowspan=\"2\">Benchmark (Metric)</td><td colspan=\"3\">Qwen2.5</td><td>BitNet b1.58</td></tr><tr><td>1.5B-bf16</td><td>1.5B-GPTQ-int4</td><td>1.5B-AWQ-int4</td><td>2B</td></tr><tr><td>Memory \n(Non-emb)</td><td>2.6GB</td><td>0.7GB</td><td>0.7GB</td><td>0.4GB</td></tr><tr><td>Activation</td><td>bf16</td><td>bf16</td><td>bf16</td><td>int8</td></tr><tr><td>MMLU \n(5-shot; Acc)</td><td>60.25</td><td>58.06</td><td>57.43</td><td>53.17</td></tr><tr><td>GSM8K \n(4-shot; EM)</td><td>56.79</td><td>50.57</td><td>50.64</td><td>58.38</td></tr><tr><td>IFEval \n(0-shot; Instruct-Strict)</td><td>50.12</td><td>47.84</td><td>45.44</td><td>53.48</td></tr><tr><td>Average</td><td>55.72</td><td>52.15</td><td>51.17</td><td>55.01</td></tr></table>"
1037
+ },
1038
+ {
1039
+ "type": "table_caption",
1040
+ "bbox": [
1041
+ 0.171,
1042
+ 0.289,
1043
+ 0.828,
1044
+ 0.334
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "Table 2: Comparison of BitNet b1.58 (2B) against Qwen2.5 1.5B in its original bf16 precision and after INT4 post-training quantization (GPTQ and AWQ). All models shown are based on instruction-tuned checkpoints."
1048
+ },
1049
+ {
1050
+ "type": "table",
1051
+ "bbox": [
1052
+ 0.194,
1053
+ 0.355,
1054
+ 0.803,
1055
+ 0.713
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "<table><tr><td>Benchmark (Metric)</td><td>Bonsai 0.5B</td><td>OLMo-Bitnet 1B</td><td>Falcon3-1.58bit 7B</td><td>Llama3-8B-1.58 8B</td><td>BitNet b1.58 2B</td></tr><tr><td>Native 1-bit</td><td>✓</td><td>✓</td><td>✘</td><td>✘</td><td>✓</td></tr><tr><td>ARC-Challange (0-shot; Acc,norm)</td><td>33.19</td><td>26.54</td><td>37.80</td><td>43.69</td><td>49.91</td></tr><tr><td>ARC-Easy (0-shot; Acc,norm)</td><td>58.25</td><td>25.38</td><td>65.03</td><td>70.71</td><td>74.79</td></tr><tr><td>OpenbookQA (0-shot; Acc,norm)</td><td>33.60</td><td>28.20</td><td>38.20</td><td>37.20</td><td>41.60</td></tr><tr><td>BoolQ (0-shot; Acc)</td><td>58.44</td><td>52.48</td><td>72.14</td><td>68.38</td><td>80.18</td></tr><tr><td>HellaSwag (0-shot; Acc,norm)</td><td>48.01</td><td>25.88</td><td>59.46</td><td>68.56</td><td>68.44</td></tr><tr><td>PIQA (0-shot; Acc,norm)</td><td>70.02</td><td>50.49</td><td>72.36</td><td>75.30</td><td>77.09</td></tr><tr><td>WinoGrande (0-shot; Acc)</td><td>54.46</td><td>51.54</td><td>60.14</td><td>60.93</td><td>71.90</td></tr><tr><td>CommonsenseQA (10-shot; Acc)</td><td>18.43</td><td>19.49</td><td>67.08</td><td>28.50</td><td>71.58</td></tr><tr><td>TruthfulQA (10-shot; MC2)</td><td>40.65</td><td>49.05</td><td>43.29</td><td>39.13</td><td>45.31</td></tr><tr><td>TriviaQA (5-shot; EM)</td><td>10.84</td><td>0.00</td><td>0.00</td><td>19.82</td><td>33.57</td></tr><tr><td>MMLU (5-shot; Acc)</td><td>25.74</td><td>25.47</td><td>42.79</td><td>35.04</td><td>53.17</td></tr><tr><td>Average</td><td>41.06</td><td>32.22</td><td>50.76</td><td>49.75</td><td>60.68</td></tr></table>"
1059
+ },
1060
+ {
1061
+ "type": "table_caption",
1062
+ "bbox": [
1063
+ 0.171,
1064
+ 0.717,
1065
+ 0.829,
1066
+ 0.761
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "Table 3: Performance comparison of BitNet b1.58 2B4T against other open-weight 1-bit models. This includes natively trained 1-bit models (Bonsai-0.5B, OLMo-Bitnet-1B) and larger models posttraining quantized to 1.58-bit (Falcon3-1.58bit-7B, Llama3-8B-1.58)."
1070
+ },
1071
+ {
1072
+ "type": "title",
1073
+ "bbox": [
1074
+ 0.171,
1075
+ 0.795,
1076
+ 0.522,
1077
+ 0.811
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "4.3 Comparison with Open-weight 1-bit Models"
1081
+ },
1082
+ {
1083
+ "type": "text",
1084
+ "bbox": [
1085
+ 0.171,
1086
+ 0.821,
1087
+ 0.825,
1088
+ 0.879
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "Finally, we situate BitNet b1.58 2B4T within the landscape of other models designed for or quantized to near 1-bit precision. We compare it against natively trained 1-bit models of smaller scale and significantly larger models post-training quantized to 1.58-bit precision. The comparative results are presented in Table 3."
1092
+ },
1093
+ {
1094
+ "type": "text",
1095
+ "bbox": [
1096
+ 0.171,
1097
+ 0.884,
1098
+ 0.829,
1099
+ 0.914
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "The evaluation clearly positions BitNet b1.58 2B4T as the leading model in this category. It demonstrates significantly stronger overall performance than all other compared 1-bit models, achieving"
1103
+ },
1104
+ {
1105
+ "type": "page_number",
1106
+ "bbox": [
1107
+ 0.494,
1108
+ 0.936,
1109
+ 0.506,
1110
+ 0.948
1111
+ ],
1112
+ "angle": 0,
1113
+ "content": "7"
1114
+ }
1115
+ ],
1116
+ [
1117
+ {
1118
+ "type": "text",
1119
+ "bbox": [
1120
+ 0.171,
1121
+ 0.092,
1122
+ 0.825,
1123
+ 0.176
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "the highest scores on the vast majority of benchmarks. Notably, BitNet b1.58 2B4T substantially outperforms not only the smaller, natively trained 1-bit models but also the much larger models (in terms of parameter count) that were quantized to 1-bit. This highlights the effectiveness of the native training approach employed by BitNet b1.58 2B4T, allowing it to set a new state-of-the-art performance level for models operating at this extreme level of quantization, even surpassing larger models subjected to post-training quantization."
1127
+ },
1128
+ {
1129
+ "type": "title",
1130
+ "bbox": [
1131
+ 0.172,
1132
+ 0.207,
1133
+ 0.423,
1134
+ 0.223
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "5 Inference Implementation"
1138
+ },
1139
+ {
1140
+ "type": "text",
1141
+ "bbox": [
1142
+ 0.171,
1143
+ 0.244,
1144
+ 0.827,
1145
+ 0.328
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": "Efficient inference is crucial for deploying Large Language Models, particularly for resource-constrained environments. The unique quantization scheme of BitNet b1.58 2B4T, employing 1.58-bit weights and 8-bit activations (W1.58A8), necessitates specialized implementations, as standard deep learning libraries often lack optimized kernels for such mixed-precision, low-bit formats. To address this, we developed and open-sourced dedicated inference libraries for both GPU and CPU platforms. The code is publicly available at https://aka.ms/bitnet."
1149
+ },
1150
+ {
1151
+ "type": "title",
1152
+ "bbox": [
1153
+ 0.172,
1154
+ 0.355,
1155
+ 0.318,
1156
+ 0.369
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "5.1 GPU Inference"
1160
+ },
1161
+ {
1162
+ "type": "text",
1163
+ "bbox": [
1164
+ 0.171,
1165
+ 0.385,
1166
+ 0.825,
1167
+ 0.455
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": "Current GPU architectures and their associated software libraries (e.g., cuBLAS, PyTorch kernels) are primarily optimized for operations involving standard data types like FP16, BF16, and INT8/INT4. Native, high-performance support for the specific W1.58A8 matrix multiplication required by BitNet b1.58 2B4T is generally unavailable. This limitation can hinder the realization of the theoretical efficiency gains offered by 1-bit models on existing hardware."
1171
+ },
1172
+ {
1173
+ "type": "text",
1174
+ "bbox": [
1175
+ 0.171,
1176
+ 0.46,
1177
+ 0.826,
1178
+ 0.613
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "To enable efficient GPU inference, we developed a custom CUDA kernel specifically designed for the W1.58A8 matrix multiplication. Since ternary weights \\(\\{-1,0, + 1\\}\\), representing 1.58 bits) cannot be stored efficiently using standard data types, we pack multiple weight values into a single 8-bit integer ('int8') for storage in High Bandwidth Memory (HBM). Specifically, four ternary values are encoded into one 'int8' value. During computation, the CUDA kernel loads the packed 'int8' weights from HBM into the GPU's faster on-chip Shared Memory (SRAM). It then unpacks these values back into a representation suitable for efficient ternary computation (e.g., reconstructing the -1, 0, +1 values) immediately before performing the matrix multiplication with the 8-bit activations. This 'pack-store-load-unpack-compute' strategy minimizes memory bandwidth usage while leveraging custom compute instructions. Further implementation details and optimization strategies are elaborated in the Ladder framework (Wang et al., 2023b)."
1182
+ },
1183
+ {
1184
+ "type": "text",
1185
+ "bbox": [
1186
+ 0.171,
1187
+ 0.619,
1188
+ 0.825,
1189
+ 0.687
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "While our custom kernel significantly improves performance compared to naive implementations, we note that current commodity GPU architectures are not optimally designed for the 1-bit models. We believe that future hardware innovations, potentially incorporating dedicated logic for low-bit operations, will be essential to fully unlock the performance and energy efficiency potential of models like BitNet b1.58."
1193
+ },
1194
+ {
1195
+ "type": "title",
1196
+ "bbox": [
1197
+ 0.172,
1198
+ 0.716,
1199
+ 0.317,
1200
+ 0.73
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "5.2 CPU Inference"
1204
+ },
1205
+ {
1206
+ "type": "text",
1207
+ "bbox": [
1208
+ 0.171,
1209
+ 0.746,
1210
+ 0.825,
1211
+ 0.789
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "To ensure broad accessibility and enable deployment on devices lacking powerful GPUs (e.g., edge devices, laptops, standard servers), we developed bitnet.cpp. This C++ library serves as an official reference implementation for CPU inference of 1-bit LLMs, including BitNet b1.58."
1215
+ },
1216
+ {
1217
+ "type": "text",
1218
+ "bbox": [
1219
+ 0.171,
1220
+ 0.794,
1221
+ 0.825,
1222
+ 0.864
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "bitnet.cpp provides optimized kernels tailored for efficient execution on standard CPU architectures. The kernels are designed to operate efficiently with the model's specific quantization scheme, avoiding the overhead of generic quantization libraries or intricate low-level bit manipulation where possible. It processes the weight elements in a manner consistent with the BitNet b1.58 training methodology, ensuring numerical accuracy (lossless inference relative to the training procedure)."
1226
+ },
1227
+ {
1228
+ "type": "text",
1229
+ "bbox": [
1230
+ 0.171,
1231
+ 0.87,
1232
+ 0.825,
1233
+ 0.913
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": "This approach delivers fast and accurate inference of 1.58-bit models directly on CPUs. More technical details and usage instructions can be found in the bitnet.cpp repository and associated technical report (Wang et al., 2025)."
1237
+ },
1238
+ {
1239
+ "type": "page_number",
1240
+ "bbox": [
1241
+ 0.494,
1242
+ 0.936,
1243
+ 0.504,
1244
+ 0.948
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": "8"
1248
+ }
1249
+ ],
1250
+ [
1251
+ {
1252
+ "type": "title",
1253
+ "bbox": [
1254
+ 0.172,
1255
+ 0.091,
1256
+ 0.303,
1257
+ 0.108
1258
+ ],
1259
+ "angle": 0,
1260
+ "content": "6 Conclusion"
1261
+ },
1262
+ {
1263
+ "type": "text",
1264
+ "bbox": [
1265
+ 0.171,
1266
+ 0.123,
1267
+ 0.825,
1268
+ 0.179
1269
+ ],
1270
+ "angle": 0,
1271
+ "content": "This technical report introduced BitNet b1.58 2B4T, a significant step towards highly efficient yet capable Large Language Models. As the first open-source, native 1-bit LLM trained at the 2-billion parameter scale on 4 trillion tokens, our work demonstrates the viability of extreme quantization directly within the training process."
1272
+ },
1273
+ {
1274
+ "type": "text",
1275
+ "bbox": [
1276
+ 0.171,
1277
+ 0.185,
1278
+ 0.827,
1279
+ 0.298
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "Comprehensive evaluations across benchmarks assessing language understanding, reasoning, mathematics, coding, and dialogue revealed that BitNet b1.58 2B4T achieves performance comparable to state-of-the-art open-weight, full-precision models of similar size. Crucially, this performance parity is achieved with dramatically reduced computational requirements, offering substantial savings in memory footprint, energy consumption, and inference latency. To facilitate practical use and further research, we developed and released optimized inference implementations for both GPU (via custom CUDA kernels) and CPU (via the 'bitnet.cpp' library), alongside the model weights available on Hugging Face."
1283
+ },
1284
+ {
1285
+ "type": "text",
1286
+ "bbox": [
1287
+ 0.171,
1288
+ 0.302,
1289
+ 0.828,
1290
+ 0.359
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "BitNet b1.58 2B4T represents a compelling proof-of-concept that challenges the necessity of full-precision weights for achieving high performance in LLMs at scale. It opens avenues for deploying powerful language models in resource-constrained environments where previous models were prohibitive, potentially democratizing access to advanced AI capabilities."
1294
+ },
1295
+ {
1296
+ "type": "title",
1297
+ "bbox": [
1298
+ 0.172,
1299
+ 0.38,
1300
+ 0.356,
1301
+ 0.397
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "7 Future Directions"
1305
+ },
1306
+ {
1307
+ "type": "text",
1308
+ "bbox": [
1309
+ 0.171,
1310
+ 0.414,
1311
+ 0.827,
1312
+ 0.43
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "While BitNet b1.58 2B4T demonstrates promising results, several exciting research directions remain:"
1316
+ },
1317
+ {
1318
+ "type": "text",
1319
+ "bbox": [
1320
+ 0.217,
1321
+ 0.443,
1322
+ 0.825,
1323
+ 0.498
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "- Scaling Laws and Larger Models: Investigating the scaling properties of native 1-bit LLMs is crucial. Future work will explore training larger models (e.g., 7B, 13B parameters and beyond) and training on even larger datasets to understand if the performance parity with full-precision models holds."
1327
+ },
1328
+ {
1329
+ "type": "text",
1330
+ "bbox": [
1331
+ 0.217,
1332
+ 0.506,
1333
+ 0.825,
1334
+ 0.577
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "- Hardware Co-Design and Optimization: The full potential of 1-bit models is likely hindered by current hardware limitations. Continued development of highly optimized kernels for existing hardware (GPUs, CPUs, NPUs) is needed. Furthermore, co-designing future hardware accelerators specifically optimized for 1-bit computations and data movement could unlock orders-of-magnitude improvements in speed and energy efficiency."
1338
+ },
1339
+ {
1340
+ "type": "text",
1341
+ "bbox": [
1342
+ 0.217,
1343
+ 0.583,
1344
+ 0.827,
1345
+ 0.667
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": "- Extended Sequence Length: Extending the maximum sequence length of BitNet b1.58 2B4T can process is crucial. This enhancement is vital for tasks demanding long-context understanding, such as summarizing lengthy documents or engaging in complex problem-solving, and is particularly critical for improving performance on long chain-of-thought reasoning tasks. Investigating efficient attention mechanisms suitable for low-bit models at longer sequence lengths will be key."
1349
+ },
1350
+ {
1351
+ "type": "text",
1352
+ "bbox": [
1353
+ 0.217,
1354
+ 0.673,
1355
+ 0.827,
1356
+ 0.716
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": "- Multilingual Capabilities: The current model is primarily trained on English-centric data. Extending the pre-training corpus and potentially adapting the architecture to effectively support multiple languages is a key direction for broader applicability."
1360
+ },
1361
+ {
1362
+ "type": "text",
1363
+ "bbox": [
1364
+ 0.217,
1365
+ 0.722,
1366
+ 0.825,
1367
+ 0.779
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": "- Multimodal Integration: Exploring the integration of 1-bit principles into multimodal architectures is another promising frontier. Developing efficient ways to process and fuse information from different modalities (e.g., text and images) within a low-bit framework could enable new applications."
1371
+ },
1372
+ {
1373
+ "type": "text",
1374
+ "bbox": [
1375
+ 0.217,
1376
+ 0.786,
1377
+ 0.827,
1378
+ 0.842
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "- Theoretical Understanding: Delving deeper into the theoretical underpinnings of why 1-bit training at scale is effective remains an open area. Analyzing the learning dynamics, loss landscapes, and representational properties of these models could yield valuable insights for future development."
1382
+ },
1383
+ {
1384
+ "type": "list",
1385
+ "bbox": [
1386
+ 0.217,
1387
+ 0.443,
1388
+ 0.827,
1389
+ 0.842
1390
+ ],
1391
+ "angle": 0,
1392
+ "content": null
1393
+ },
1394
+ {
1395
+ "type": "text",
1396
+ "bbox": [
1397
+ 0.171,
1398
+ 0.856,
1399
+ 0.827,
1400
+ 0.913
1401
+ ],
1402
+ "angle": 0,
1403
+ "content": "By pursuing these directions, we aim to further advance the capability and efficiency of 1-bit LLMs, paving the way for more sustainable and accessible artificial intelligence. The open-source release of BitNet b1.58 2B4T and its associated tools provides a foundation for the community to build upon these efforts."
1404
+ },
1405
+ {
1406
+ "type": "page_number",
1407
+ "bbox": [
1408
+ 0.494,
1409
+ 0.936,
1410
+ 0.506,
1411
+ 0.948
1412
+ ],
1413
+ "angle": 0,
1414
+ "content": "9"
1415
+ }
1416
+ ],
1417
+ [
1418
+ {
1419
+ "type": "title",
1420
+ "bbox": [
1421
+ 0.174,
1422
+ 0.09,
1423
+ 0.27,
1424
+ 0.107
1425
+ ],
1426
+ "angle": 0,
1427
+ "content": "References"
1428
+ },
1429
+ {
1430
+ "type": "ref_text",
1431
+ "bbox": [
1432
+ 0.174,
1433
+ 0.115,
1434
+ 0.829,
1435
+ 0.185
1436
+ ],
1437
+ "angle": 0,
1438
+ "content": "Allal, L. B., Lozhkov, A., Bakouch, E., Blázquez, G. M., Penedo, G., Tunstall, L., Marafioti, A., Kydlíček, H., Lajarín, A. P., Srivastav, V., Lochner, J., Fahlgren, C., Nguyen, X.-S., Fourrier, C., Burtenshaw, B., Larcher, H., Zhao, H., Zakka, C., Morlon, M., Raffel, C., von Werra, L., and Wolf, T. (2025). Smollm2: When smol goes big - data-centric training of a small language model. CoRR, abs/2502.02737."
1439
+ },
1440
+ {
1441
+ "type": "ref_text",
1442
+ "bbox": [
1443
+ 0.174,
1444
+ 0.196,
1445
+ 0.829,
1446
+ 0.252
1447
+ ],
1448
+ "angle": 0,
1449
+ "content": "Bai, S., Chen, K., Liu, X., Wang, J., Ge, W., Song, S., Dang, K., Wang, P., Wang, S., Tang, J., Zhong, H., Zhu, Y., Yang, M.-H., Li, Z., Wan, J., Wang, P., Ding, W., Fu, Z., Xu, Y., Ye, J., Zhang, X., Xie, T., Cheng, Z., Zhang, H., Yang, Z., Xu, H., and Lin, J. (2025). Qwen2.5-vl technical report. CoRR, abs/2502.13923."
1450
+ },
1451
+ {
1452
+ "type": "ref_text",
1453
+ "bbox": [
1454
+ 0.174,
1455
+ 0.263,
1456
+ 0.826,
1457
+ 0.293
1458
+ ],
1459
+ "angle": 0,
1460
+ "content": "Bisk, Y., Zellers, R., Bras, R. L., Gao, J., and Choi, Y. (2019). PIQA: reasoning about physical commonsense in natural language. CoRR, abs/1911.11641."
1461
+ },
1462
+ {
1463
+ "type": "ref_text",
1464
+ "bbox": [
1465
+ 0.174,
1466
+ 0.303,
1467
+ 0.826,
1468
+ 0.333
1469
+ ],
1470
+ "angle": 0,
1471
+ "content": "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. (2019). Boolq: Exploring the surprising difficulty of natural yes/no questions. CoRR, abs/1905.10044."
1472
+ },
1473
+ {
1474
+ "type": "ref_text",
1475
+ "bbox": [
1476
+ 0.174,
1477
+ 0.343,
1478
+ 0.829,
1479
+ 0.385
1480
+ ],
1481
+ "angle": 0,
1482
+ "content": "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. (2021). Training verifiers to solve math word problems. CoRR, abs/2110.14168."
1483
+ },
1484
+ {
1485
+ "type": "ref_text",
1486
+ "bbox": [
1487
+ 0.174,
1488
+ 0.396,
1489
+ 0.826,
1490
+ 0.44
1491
+ ],
1492
+ "angle": 0,
1493
+ "content": "Cui, G., Yuan, L., Ding, N., Yao, G., He, B., Zhu, W., Ni, Y., Xie, G., Xie, R., Lin, Y., Liu, Z., and Sun, M. (2024). ULTRAFEEDBACK: boosting language models with scaled AI feedback. In ICML. OpenReview.net."
1494
+ },
1495
+ {
1496
+ "type": "ref_text",
1497
+ "bbox": [
1498
+ 0.174,
1499
+ 0.45,
1500
+ 0.829,
1501
+ 0.631
1502
+ ],
1503
+ "angle": 0,
1504
+ "content": "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Rozière, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I. M., Misra, I., Evtimov, I., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., and et al. (2024). The llama 3 herd of models. CoRR, abs/2407.21783."
1505
+ },
1506
+ {
1507
+ "type": "ref_text",
1508
+ "bbox": [
1509
+ 0.174,
1510
+ 0.641,
1511
+ 0.829,
1512
+ 0.684
1513
+ ],
1514
+ "angle": 0,
1515
+ "content": "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. (2021a). Measuring massive multitask language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021."
1516
+ },
1517
+ {
1518
+ "type": "ref_text",
1519
+ "bbox": [
1520
+ 0.174,
1521
+ 0.695,
1522
+ 0.829,
1523
+ 0.751
1524
+ ],
1525
+ "angle": 0,
1526
+ "content": "Hendrycks, D., Burns, C., Kadavath, S., Arora, A., Basart, S., Tang, E., Song, D., and Steinhardt, J. (2021b). Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual."
1527
+ },
1528
+ {
1529
+ "type": "ref_text",
1530
+ "bbox": [
1531
+ 0.174,
1532
+ 0.762,
1533
+ 0.829,
1534
+ 0.805
1535
+ ],
1536
+ "angle": 0,
1537
+ "content": "Horowitz, M. (2014). 1.1 computing's energy problem (and what we can do about it). In 2014 IEEE International Conference on Solid-State Circuits Conference, ISSCC 2014, Digest of Technical Papers, San Francisco, CA, USA, February 9-13, 2014, pages 10-14."
1538
+ },
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.174,
1543
+ 0.816,
1544
+ 0.829,
1545
+ 0.845
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "Hsu, P.-L., Dai, Y., Kothapalli, V., Song, Q., Tang, S., Zhu, S., Shimizu, S., Sahni, S., Ning, H., and Chen, Y. (2024). Liger kernel: Efficient triton kernels for LLM training. CoRR, abs/2410.10989."
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.174,
1554
+ 0.856,
1555
+ 0.829,
1556
+ 0.912
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "Hu, S., Tu, Y., Han, X., He, C., Cui, G., Long, X., Zheng, Z., Fang, Y., Huang, Y., Zhao, W., Zhang, X., Thai, Z. L., Zhang, K., Wang, C., Yao, Y., Zhao, C., Zhou, J., Cai, J., Zhai, Z., Ding, N., Jia, C., Zeng, G., Li, D., Liu, Z., and Sun, M. (2024). Minicpm: Unveiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395."
1560
+ },
1561
+ {
1562
+ "type": "list",
1563
+ "bbox": [
1564
+ 0.174,
1565
+ 0.115,
1566
+ 0.829,
1567
+ 0.912
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": null
1571
+ },
1572
+ {
1573
+ "type": "page_number",
1574
+ "bbox": [
1575
+ 0.491,
1576
+ 0.936,
1577
+ 0.51,
1578
+ 0.948
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "10"
1582
+ }
1583
+ ],
1584
+ [
1585
+ {
1586
+ "type": "ref_text",
1587
+ "bbox": [
1588
+ 0.174,
1589
+ 0.091,
1590
+ 0.828,
1591
+ 0.15
1592
+ ],
1593
+ "angle": 0,
1594
+ "content": "Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. (2017). Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1601-1611."
1595
+ },
1596
+ {
1597
+ "type": "ref_text",
1598
+ "bbox": [
1599
+ 0.173,
1600
+ 0.156,
1601
+ 0.827,
1602
+ 0.213
1603
+ ],
1604
+ "angle": 0,
1605
+ "content": "Li, H., Dong, Q., Tang, Z., Wang, C., Zhang, X., Huang, H., Huang, S., Huang, X., Huang, Z., Zhang, D., Gu, Y., Cheng, X., Wang, X., Chen, S.-Q., Dong, L., Lu, W., Sui, Z., Wang, B., Lam, W., and Wei, F. (2024a). Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064."
1606
+ },
1607
+ {
1608
+ "type": "ref_text",
1609
+ "bbox": [
1610
+ 0.173,
1611
+ 0.221,
1612
+ 0.828,
1613
+ 0.389
1614
+ ],
1615
+ "angle": 0,
1616
+ "content": "Li, J., Fang, A., Smyrnis, G., Ivgi, M., Jordan, M., Gadre, S. Y., Bansal, H., Guha, E., Keh, S. S., Arora, K., Garg, S., Xin, R., Muennighoff, N., Heckel, R., Mercat, J., Chen, M. F., Gururangan, S., Wortsman, M., Albalak, A., Bitton, Y., Nezhurina, M., Abbas, A., Hsieh, C.-Y., Ghosh, D., Gardner, J., Kilian, M., Zhang, H., Shao, R., Pratt, S. M., Sanyal, S., Ilharco, G., Daras, G., Marathe, K., Gokaslan, A., Zhang, J., Chandu, K. R., Nguyen, T., Vasiljevic, I., Kakade, S. M., Song, S., Sanghavi, S., Faghri, F., Oh, S., Zettlemoyer, L., Lo, K., El-Nouby, A., Pouransari, H., Toshev, A., Wang, S., Groeneveld, D., Soldaini, L., Koh, P. W., Jitsev, J., Kollar, T., Dimakis, A., Carmon, Y., Dave, A., Schmidt, L., and Shankar, V. (2024b). Datacomp-lm: In search of the next generation of training sets for language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024."
1617
+ },
1618
+ {
1619
+ "type": "ref_text",
1620
+ "bbox": [
1621
+ 0.173,
1622
+ 0.397,
1623
+ 0.827,
1624
+ 0.427
1625
+ ],
1626
+ "angle": 0,
1627
+ "content": "Lian, W., Wang, G., Goodson, B., Pentland, E., Cook, A., Vong, C., and \"Teknium\" (2023). Slimorca: An open dataset of gpt-4 augmented flan reasoning traces, with verification."
1628
+ },
1629
+ {
1630
+ "type": "ref_text",
1631
+ "bbox": [
1632
+ 0.173,
1633
+ 0.434,
1634
+ 0.827,
1635
+ 0.478
1636
+ ],
1637
+ "angle": 0,
1638
+ "content": "Lin, S., Hilton, J., and Evans, O. (2022). Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252."
1639
+ },
1640
+ {
1641
+ "type": "ref_text",
1642
+ "bbox": [
1643
+ 0.173,
1644
+ 0.485,
1645
+ 0.827,
1646
+ 0.529
1647
+ ],
1648
+ "angle": 0,
1649
+ "content": "Liu, J., Xia, C. S., Wang, Y., and Zhang, L. (2023). Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. Advances in Neural Information Processing Systems, 36:21558-21572."
1650
+ },
1651
+ {
1652
+ "type": "ref_text",
1653
+ "bbox": [
1654
+ 0.173,
1655
+ 0.537,
1656
+ 0.827,
1657
+ 0.567
1658
+ ],
1659
+ "angle": 0,
1660
+ "content": "Ma, S., Wang, H., Ma, L., Wang, L., Wang, W., Huang, S., Dong, L., Wang, R., Xue, J., and Wei, F. (2024). The era of 1-bit llms: All large language models are in 1.58 bits. CoRR, abs/2402.17764."
1661
+ },
1662
+ {
1663
+ "type": "ref_text",
1664
+ "bbox": [
1665
+ 0.173,
1666
+ 0.575,
1667
+ 0.827,
1668
+ 0.605
1669
+ ],
1670
+ "angle": 0,
1671
+ "content": "Mihaylov, T., Clark, P., Khot, T., and Sabharwal, A. (2018). Can a suit of armor conduct electricity? A new dataset for open book question answering. CoRR, abs/1809.02789."
1672
+ },
1673
+ {
1674
+ "type": "ref_text",
1675
+ "bbox": [
1676
+ 0.173,
1677
+ 0.612,
1678
+ 0.828,
1679
+ 0.697
1680
+ ],
1681
+ "angle": 0,
1682
+ "content": "Penedo, G., Kydlícek, H., Allal, L. B., Lozhkov, A., Mitchell, M., Raffel, C. A., von Werra, L., and Wolf, T. (2024). The fineweb datasets: Decanting the web for the finest text data at scale. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024."
1683
+ },
1684
+ {
1685
+ "type": "ref_text",
1686
+ "bbox": [
1687
+ 0.173,
1688
+ 0.705,
1689
+ 0.828,
1690
+ 0.763
1691
+ ],
1692
+ "angle": 0,
1693
+ "content": "Rafailov, R., Sharma, A., Mitchell, E., Manning, C. D., Ermon, S., and Finn, C. (2023). Direct preference optimization: Your language model is secretly a reward model. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S., editors, Advances in Neural Information Processing Systems 36."
1694
+ },
1695
+ {
1696
+ "type": "ref_text",
1697
+ "bbox": [
1698
+ 0.173,
1699
+ 0.77,
1700
+ 0.825,
1701
+ 0.8
1702
+ ],
1703
+ "angle": 0,
1704
+ "content": "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. (2020). WinoGrande: an adversarial winograd schema challenge at scale. In AAAI, pages 8732-8740."
1705
+ },
1706
+ {
1707
+ "type": "ref_text",
1708
+ "bbox": [
1709
+ 0.173,
1710
+ 0.807,
1711
+ 0.825,
1712
+ 0.838
1713
+ ],
1714
+ "angle": 0,
1715
+ "content": "Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. (2017). Proximal policy optimization algorithms. CoRR, abs/1707.06347."
1716
+ },
1717
+ {
1718
+ "type": "ref_text",
1719
+ "bbox": [
1720
+ 0.173,
1721
+ 0.845,
1722
+ 0.827,
1723
+ 0.888
1724
+ ],
1725
+ "angle": 0,
1726
+ "content": "Shao, Z., Wang, P., Zhu, Q., Xu, R., Song, J., Zhang, M., Li, Y. K., Wu, Y., and Guo, D. (2024). Deepseekmath: Pushing the limits of mathematical reasoning in open language models. CoRR, abs/2402.03300."
1727
+ },
1728
+ {
1729
+ "type": "ref_text",
1730
+ "bbox": [
1731
+ 0.173,
1732
+ 0.897,
1733
+ 0.7,
1734
+ 0.913
1735
+ ],
1736
+ "angle": 0,
1737
+ "content": "Shazeer, N. (2020). GLU variants improve transformer. CoRR, abs/2002.05202."
1738
+ },
1739
+ {
1740
+ "type": "list",
1741
+ "bbox": [
1742
+ 0.173,
1743
+ 0.091,
1744
+ 0.828,
1745
+ 0.913
1746
+ ],
1747
+ "angle": 0,
1748
+ "content": null
1749
+ },
1750
+ {
1751
+ "type": "page_number",
1752
+ "bbox": [
1753
+ 0.491,
1754
+ 0.936,
1755
+ 0.508,
1756
+ 0.948
1757
+ ],
1758
+ "angle": 0,
1759
+ "content": "11"
1760
+ }
1761
+ ],
1762
+ [
1763
+ {
1764
+ "type": "ref_text",
1765
+ "bbox": [
1766
+ 0.174,
1767
+ 0.091,
1768
+ 0.826,
1769
+ 0.121
1770
+ ],
1771
+ "angle": 0,
1772
+ "content": "Su, J., Ahmed, M. H. M., Lu, Y., Pan, S., Bo, W., and Liu, Y. (2024). Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063."
1773
+ },
1774
+ {
1775
+ "type": "ref_text",
1776
+ "bbox": [
1777
+ 0.174,
1778
+ 0.127,
1779
+ 0.826,
1780
+ 0.199
1781
+ ],
1782
+ "angle": 0,
1783
+ "content": "Talmor, A., Herzig, J., Lourie, N., and Berant, J. (2019). Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4149-4158."
1784
+ },
1785
+ {
1786
+ "type": "ref_text",
1787
+ "bbox": [
1788
+ 0.174,
1789
+ 0.205,
1790
+ 0.826,
1791
+ 0.248
1792
+ ],
1793
+ "angle": 0,
1794
+ "content": "Tang, Z., Zhang, X., Wang, B., and Wei, F. (2024). Mathscale: Scaling instruction tuning for mathematical reasoning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net."
1795
+ },
1796
+ {
1797
+ "type": "ref_text",
1798
+ "bbox": [
1799
+ 0.175,
1800
+ 0.255,
1801
+ 0.547,
1802
+ 0.272
1803
+ ],
1804
+ "angle": 0,
1805
+ "content": "Team, F.-L. (2024). The falcon 3 family of open models."
1806
+ },
1807
+ {
1808
+ "type": "ref_text",
1809
+ "bbox": [
1810
+ 0.175,
1811
+ 0.278,
1812
+ 0.829,
1813
+ 0.31
1814
+ ],
1815
+ "angle": 0,
1816
+ "content": "Team, G., Kamath, A., Ferret, J., Pathak, S., Vieillard, N., Merhej, R., Perrin, S., Matejovicova, T., Ram'e, A., Rivi'ere, M., et al. (2025). Gemma 3 technical report. arXiv preprint arXiv:2503.19786."
1817
+ },
1818
+ {
1819
+ "type": "ref_text",
1820
+ "bbox": [
1821
+ 0.174,
1822
+ 0.315,
1823
+ 0.826,
1824
+ 0.386
1825
+ ],
1826
+ "angle": 0,
1827
+ "content": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. In Guyon, I., von Luxburg, U., Bengio, S., Wallach, H. M., Fergus, R., Vishwanathan, S. V. N., and Garnett, R., editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008."
1828
+ },
1829
+ {
1830
+ "type": "ref_text",
1831
+ "bbox": [
1832
+ 0.175,
1833
+ 0.393,
1834
+ 0.827,
1835
+ 0.424
1836
+ ],
1837
+ "angle": 0,
1838
+ "content": "Wang, H., Ma, S., Dong, L., Huang, S., Wang, H., Ma, L., Yang, F., Wang, R., Wu, Y., and Wei, F. (2023a). Bitnet: Scaling 1-bit transformers for large language models. CoRR, abs/2310.11453."
1839
+ },
1840
+ {
1841
+ "type": "ref_text",
1842
+ "bbox": [
1843
+ 0.175,
1844
+ 0.43,
1845
+ 0.827,
1846
+ 0.472
1847
+ ],
1848
+ "angle": 0,
1849
+ "content": "Wang, H., Ma, S., Huang, S., Dong, L., Wang, W., Peng, Z., Wu, Y., Bajaj, P., Singhal, S., Benhaim, A., Patra, B., Liu, Z., Chaudhary, V., Song, X., and Wei, F. (2022). Foundation transformers. CoRR."
1850
+ },
1851
+ {
1852
+ "type": "ref_text",
1853
+ "bbox": [
1854
+ 0.175,
1855
+ 0.48,
1856
+ 0.825,
1857
+ 0.51
1858
+ ],
1859
+ "angle": 0,
1860
+ "content": "Wang, H., Ma, S., Wang, R., and Wei, F. (2024a). Q-sparse: All large language models can be fully sparsely-activated. CoRR, abs/2407.10969."
1861
+ },
1862
+ {
1863
+ "type": "ref_text",
1864
+ "bbox": [
1865
+ 0.175,
1866
+ 0.516,
1867
+ 0.827,
1868
+ 0.546
1869
+ ],
1870
+ "angle": 0,
1871
+ "content": "Wang, H., Ma, S., and Wei, F. (2024b). Bitnet a4.8: 4-bit activations for 1-bit llms. CoRR, abs/2411.04965."
1872
+ },
1873
+ {
1874
+ "type": "ref_text",
1875
+ "bbox": [
1876
+ 0.175,
1877
+ 0.553,
1878
+ 0.827,
1879
+ 0.583
1880
+ ],
1881
+ "angle": 0,
1882
+ "content": "Wang, J., Zhou, H., Song, T., Cao, S., Xia, Y., Cao, T., Wei, J., Ma, S., Wang, H., and Wei, F. (2025). Bitnet.cpp: Efficient edge inference for ternary lms. CoRR, abs/2502.11880."
1883
+ },
1884
+ {
1885
+ "type": "ref_text",
1886
+ "bbox": [
1887
+ 0.175,
1888
+ 0.59,
1889
+ 0.827,
1890
+ 0.62
1891
+ ],
1892
+ "angle": 0,
1893
+ "content": "Wang, L., Ma, L., Cao, S., Zheng, N., Zhang, Q., Xue, J., Miao, Z., Cao, T., and Yang, Y. (2023b). Ladder: Efficient tensor compilation on customized data format. In OSDI."
1894
+ },
1895
+ {
1896
+ "type": "ref_text",
1897
+ "bbox": [
1898
+ 0.174,
1899
+ 0.626,
1900
+ 0.826,
1901
+ 0.683
1902
+ ],
1903
+ "angle": 0,
1904
+ "content": "Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., Lin, Q., and Jiang, D. (2024a). Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net."
1905
+ },
1906
+ {
1907
+ "type": "ref_text",
1908
+ "bbox": [
1909
+ 0.174,
1910
+ 0.69,
1911
+ 0.826,
1912
+ 0.761
1913
+ ],
1914
+ "angle": 0,
1915
+ "content": "Xu, Y., Han, X., Yang, Z., Wang, S., Zhu, Q., Liu, Z., Liu, W., and Che, W. (2024b). Onebit: Towards extremely low-bit large language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024."
1916
+ },
1917
+ {
1918
+ "type": "ref_text",
1919
+ "bbox": [
1920
+ 0.175,
1921
+ 0.768,
1922
+ 0.827,
1923
+ 0.799
1924
+ ],
1925
+ "angle": 0,
1926
+ "content": "Xu, Z., Jiang, F., Niu, L., Deng, Y., Poovendran, R., Choi, Y., and Lin, B. Y. (2024c). Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. CoRR, abs/2406.08464."
1927
+ },
1928
+ {
1929
+ "type": "ref_text",
1930
+ "bbox": [
1931
+ 0.175,
1932
+ 0.805,
1933
+ 0.825,
1934
+ 0.835
1935
+ ],
1936
+ "angle": 0,
1937
+ "content": "Yadav, V., Bethard, S., and Surdeanu, M. (2019). Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering. In EMNLP-IJCNLP."
1938
+ },
1939
+ {
1940
+ "type": "ref_text",
1941
+ "bbox": [
1942
+ 0.174,
1943
+ 0.842,
1944
+ 0.826,
1945
+ 0.912
1946
+ ],
1947
+ "angle": 0,
1948
+ "content": "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., Lin, H., Yang, J., Tu, J., Zhang, J., Yang, J., Yang, J., Zhou, J., Lin, J., Dang, K., Lu, K., Bao, K., Yang, K., Yu, L., Li, M., Xue, M., Zhang, P., Zhu, Q., Men, R., Lin, R., Li, T., Xia, T., Ren, X., Ren, X., Fan, Y., Su, Y., Zhang, Y., Wan, Y., Liu, Y., Cui, Z., Zhang, Z., and Qiu, Z. (2024). Qwen2.5 technical report. CoRR, abs/2412.15115."
1949
+ },
1950
+ {
1951
+ "type": "list",
1952
+ "bbox": [
1953
+ 0.174,
1954
+ 0.091,
1955
+ 0.829,
1956
+ 0.912
1957
+ ],
1958
+ "angle": 0,
1959
+ "content": null
1960
+ },
1961
+ {
1962
+ "type": "page_number",
1963
+ "bbox": [
1964
+ 0.491,
1965
+ 0.936,
1966
+ 0.509,
1967
+ 0.948
1968
+ ],
1969
+ "angle": 0,
1970
+ "content": "12"
1971
+ }
1972
+ ],
1973
+ [
1974
+ {
1975
+ "type": "ref_text",
1976
+ "bbox": [
1977
+ 0.172,
1978
+ 0.091,
1979
+ 0.826,
1980
+ 0.135
1981
+ ],
1982
+ "angle": 0,
1983
+ "content": "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. (2019). HellaSwag: can a machine really finish your sentence? In Proceedings of the 57th Conference of the Association for Computational Linguistics, pages 4791-4800."
1984
+ },
1985
+ {
1986
+ "type": "ref_text",
1987
+ "bbox": [
1988
+ 0.173,
1989
+ 0.144,
1990
+ 0.826,
1991
+ 0.174
1992
+ ],
1993
+ "angle": 0,
1994
+ "content": "Zhang, Y., Zhang, Z., and Lew, L. (2022). PokeBNN: A binary pursuit of lightweight accuracy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12465-12475. IEEE."
1995
+ },
1996
+ {
1997
+ "type": "ref_text",
1998
+ "bbox": [
1999
+ 0.173,
2000
+ 0.182,
2001
+ 0.827,
2002
+ 0.226
2003
+ ],
2004
+ "angle": 0,
2005
+ "content": "Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. (2024). Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net."
2006
+ },
2007
+ {
2008
+ "type": "ref_text",
2009
+ "bbox": [
2010
+ 0.173,
2011
+ 0.233,
2012
+ 0.827,
2013
+ 0.291
2014
+ ],
2015
+ "angle": 0,
2016
+ "content": "Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E. P., Gonzalez, J. E., Stoica, I., and Zhang, H. (2024). Lmsys-chat-1m: A large-scale real-world LLM conversation dataset. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net."
2017
+ },
2018
+ {
2019
+ "type": "ref_text",
2020
+ "bbox": [
2021
+ 0.173,
2022
+ 0.299,
2023
+ 0.827,
2024
+ 0.343
2025
+ ],
2026
+ "angle": 0,
2027
+ "content": "Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. (2023). Judging lvm-as-a-judge with mt-bench and chatbot arena. In Advances in Neural Information Processing Systems 36."
2028
+ },
2029
+ {
2030
+ "type": "ref_text",
2031
+ "bbox": [
2032
+ 0.173,
2033
+ 0.351,
2034
+ 0.827,
2035
+ 0.381
2036
+ ],
2037
+ "angle": 0,
2038
+ "content": "Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. (2023). Instruction-following evaluation for large language models. CoRR, abs/2311.07911."
2039
+ },
2040
+ {
2041
+ "type": "list",
2042
+ "bbox": [
2043
+ 0.172,
2044
+ 0.091,
2045
+ 0.827,
2046
+ 0.381
2047
+ ],
2048
+ "angle": 0,
2049
+ "content": null
2050
+ },
2051
+ {
2052
+ "type": "title",
2053
+ "bbox": [
2054
+ 0.173,
2055
+ 0.406,
2056
+ 0.402,
2057
+ 0.424
2058
+ ],
2059
+ "angle": 0,
2060
+ "content": "A Open-weight Baselines"
2061
+ },
2062
+ {
2063
+ "type": "text",
2064
+ "bbox": [
2065
+ 0.172,
2066
+ 0.437,
2067
+ 0.719,
2068
+ 0.453
2069
+ ],
2070
+ "angle": 0,
2071
+ "content": "We summarize the links to the open-weight LLMs evaluated in this work as below:"
2072
+ },
2073
+ {
2074
+ "type": "text",
2075
+ "bbox": [
2076
+ 0.217,
2077
+ 0.465,
2078
+ 0.622,
2079
+ 0.479
2080
+ ],
2081
+ "angle": 0,
2082
+ "content": "- LLaMA 3.2 1B: meta-llama/Llama-3.2-1B-Instruct"
2083
+ },
2084
+ {
2085
+ "type": "text",
2086
+ "bbox": [
2087
+ 0.217,
2088
+ 0.484,
2089
+ 0.508,
2090
+ 0.499
2091
+ ],
2092
+ "angle": 0,
2093
+ "content": "- Gemma-3 1B: google/gemma-3-1b-it"
2094
+ },
2095
+ {
2096
+ "type": "text",
2097
+ "bbox": [
2098
+ 0.217,
2099
+ 0.503,
2100
+ 0.563,
2101
+ 0.518
2102
+ ],
2103
+ "angle": 0,
2104
+ "content": "Qwen2.5 0.5B: Qwen/Qwen2.5-0.5B-Instruct"
2105
+ },
2106
+ {
2107
+ "type": "text",
2108
+ "bbox": [
2109
+ 0.217,
2110
+ 0.522,
2111
+ 0.563,
2112
+ 0.537
2113
+ ],
2114
+ "angle": 0,
2115
+ "content": "- Qwen2.5 1.5B: Qwen/Qwen2.5-1.5B-Instruct"
2116
+ },
2117
+ {
2118
+ "type": "text",
2119
+ "bbox": [
2120
+ 0.217,
2121
+ 0.541,
2122
+ 0.534,
2123
+ 0.556
2124
+ ],
2125
+ "angle": 0,
2126
+ "content": "- Qwen2.5 3B: Qwen/Qwen2.5-3B-Instruct"
2127
+ },
2128
+ {
2129
+ "type": "text",
2130
+ "bbox": [
2131
+ 0.217,
2132
+ 0.56,
2133
+ 0.647,
2134
+ 0.575
2135
+ ],
2136
+ "angle": 0,
2137
+ "content": "- SmolLM2 1.7B: HuggingFaceTB/SmolLM2-1.7B-Instruct"
2138
+ },
2139
+ {
2140
+ "type": "text",
2141
+ "bbox": [
2142
+ 0.217,
2143
+ 0.579,
2144
+ 0.57,
2145
+ 0.594
2146
+ ],
2147
+ "angle": 0,
2148
+ "content": "- MiniCPM 2B: openbmb/MiniCPM-2B-dpo-bf16"
2149
+ },
2150
+ {
2151
+ "type": "text",
2152
+ "bbox": [
2153
+ 0.217,
2154
+ 0.598,
2155
+ 0.731,
2156
+ 0.613
2157
+ ],
2158
+ "angle": 0,
2159
+ "content": "- Qwen2.5 1.5B-GPTQ-int4: Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4"
2160
+ },
2161
+ {
2162
+ "type": "text",
2163
+ "bbox": [
2164
+ 0.217,
2165
+ 0.618,
2166
+ 0.673,
2167
+ 0.633
2168
+ ],
2169
+ "angle": 0,
2170
+ "content": "Qwen2.5 1.5B-AWQ-int4: Qwen/Qwen2.5-1.5B-Instruct-AWQ"
2171
+ },
2172
+ {
2173
+ "type": "text",
2174
+ "bbox": [
2175
+ 0.217,
2176
+ 0.637,
2177
+ 0.464,
2178
+ 0.652
2179
+ ],
2180
+ "angle": 0,
2181
+ "content": "- Bonsai 0.5B: deepgrove/Bonsai"
2182
+ },
2183
+ {
2184
+ "type": "text",
2185
+ "bbox": [
2186
+ 0.217,
2187
+ 0.656,
2188
+ 0.593,
2189
+ 0.67
2190
+ ],
2191
+ "angle": 0,
2192
+ "content": "- OLMo-Bitnet 1B: NousResearch/OLMo-Bitnet-1B"
2193
+ },
2194
+ {
2195
+ "type": "text",
2196
+ "bbox": [
2197
+ 0.217,
2198
+ 0.675,
2199
+ 0.664,
2200
+ 0.689
2201
+ ],
2202
+ "angle": 0,
2203
+ "content": "- Falcon3-1.58bit 7B: tiiuae/Falcon3-7B-Instruct-1.58bit"
2204
+ },
2205
+ {
2206
+ "type": "text",
2207
+ "bbox": [
2208
+ 0.217,
2209
+ 0.694,
2210
+ 0.686,
2211
+ 0.708
2212
+ ],
2213
+ "angle": 0,
2214
+ "content": "- Llama3-8B-1.58 8B: HF1BitLLM/Llama3-8B-1.58-100B-tokens"
2215
+ },
2216
+ {
2217
+ "type": "list",
2218
+ "bbox": [
2219
+ 0.217,
2220
+ 0.465,
2221
+ 0.731,
2222
+ 0.708
2223
+ ],
2224
+ "angle": 0,
2225
+ "content": null
2226
+ },
2227
+ {
2228
+ "type": "title",
2229
+ "bbox": [
2230
+ 0.172,
2231
+ 0.728,
2232
+ 0.436,
2233
+ 0.746
2234
+ ],
2235
+ "angle": 0,
2236
+ "content": "B Evaluation Pipeline Details"
2237
+ },
2238
+ {
2239
+ "type": "text",
2240
+ "bbox": [
2241
+ 0.171,
2242
+ 0.759,
2243
+ 0.828,
2244
+ 0.789
2245
+ ],
2246
+ "angle": 0,
2247
+ "content": "To ensure standardized evaluation, we employed established toolkits for different benchmark categories. Specifically:"
2248
+ },
2249
+ {
2250
+ "type": "text",
2251
+ "bbox": [
2252
+ 0.217,
2253
+ 0.799,
2254
+ 0.724,
2255
+ 0.814
2256
+ ],
2257
+ "angle": 0,
2258
+ "content": "- For the HumanEval+ coding benchmark, we utilized the evalplus toolkit."
2259
+ },
2260
+ {
2261
+ "type": "text",
2262
+ "bbox": [
2263
+ 0.217,
2264
+ 0.818,
2265
+ 0.826,
2266
+ 0.845
2267
+ ],
2268
+ "angle": 0,
2269
+ "content": "- For the MATH-500 mathematical reasoning benchmark, we used a customized version of the math-evaluation-harness toolkit."
2270
+ },
2271
+ {
2272
+ "type": "text",
2273
+ "bbox": [
2274
+ 0.217,
2275
+ 0.851,
2276
+ 0.825,
2277
+ 0.879
2278
+ ],
2279
+ "angle": 0,
2280
+ "content": "- For the MT-Bench conversational benchmark, evaluation was performed using the official LLM Judge open-source codebase."
2281
+ },
2282
+ {
2283
+ "type": "text",
2284
+ "bbox": [
2285
+ 0.217,
2286
+ 0.884,
2287
+ 0.825,
2288
+ 0.912
2289
+ ],
2290
+ "angle": 0,
2291
+ "content": "- For all other benchmarks assessing language understanding, reasoning, knowledge, and comprehension, we used the standard lm-evaluation-harness framework."
2292
+ },
2293
+ {
2294
+ "type": "list",
2295
+ "bbox": [
2296
+ 0.217,
2297
+ 0.799,
2298
+ 0.826,
2299
+ 0.912
2300
+ ],
2301
+ "angle": 0,
2302
+ "content": null
2303
+ },
2304
+ {
2305
+ "type": "page_number",
2306
+ "bbox": [
2307
+ 0.491,
2308
+ 0.936,
2309
+ 0.509,
2310
+ 0.948
2311
+ ],
2312
+ "angle": 0,
2313
+ "content": "13"
2314
+ }
2315
+ ],
2316
+ [
2317
+ {
2318
+ "type": "table",
2319
+ "bbox": [
2320
+ 0.364,
2321
+ 0.089,
2322
+ 0.634,
2323
+ 0.148
2324
+ ],
2325
+ "angle": 0,
2326
+ "content": "<table><tr><td>Bits</td><td>ADD Energy</td><td>MUL Energy</td></tr><tr><td>FP16</td><td>0.16</td><td>0.34</td></tr><tr><td>INT8</td><td>0.007</td><td>0.07</td></tr></table>"
2327
+ },
2328
+ {
2329
+ "type": "table_caption",
2330
+ "bbox": [
2331
+ 0.182,
2332
+ 0.153,
2333
+ 0.816,
2334
+ 0.169
2335
+ ],
2336
+ "angle": 0,
2337
+ "content": "Table 4: ADD and MUL energy consumption (in pJ) of different precision at \\(7\\mathrm{nm}\\) process nodes."
2338
+ },
2339
+ {
2340
+ "type": "text",
2341
+ "bbox": [
2342
+ 0.175,
2343
+ 0.194,
2344
+ 0.825,
2345
+ 0.222
2346
+ ],
2347
+ "angle": 0,
2348
+ "content": "Models were prompted using a chat format for generative tasks (e.g., GSM8K, IFEval, and MT-Bench), while default settings from the respective toolkits were used for other tasks."
2349
+ },
2350
+ {
2351
+ "type": "text",
2352
+ "bbox": [
2353
+ 0.175,
2354
+ 0.228,
2355
+ 0.825,
2356
+ 0.284
2357
+ ],
2358
+ "angle": 0,
2359
+ "content": "For energy consumption, we utilize the energy model in (Horowitz, 2014; Zhang et al., 2022) to estimate the arithmetic operations energy (AOE) of matrix multiplication. The sequence length is set as 512 tokens. We present the energy consumption for ADD and MUL operation at \\(7\\mathrm{nm}\\) process nodes in Table 4."
2360
+ },
2361
+ {
2362
+ "type": "text",
2363
+ "bbox": [
2364
+ 0.175,
2365
+ 0.29,
2366
+ 0.825,
2367
+ 0.36
2368
+ ],
2369
+ "angle": 0,
2370
+ "content": "To assess CPU decoding performance, latency measurements were conducted on a Surface Laptop Studio 2 system powered by a 13th Gen Intel Core i7-13800H processor. The benchmarking process utilized 8 CPU threads. Specifically, the BitNet b1.58 2B4T model was tested using its bitnet.cpp implementation, whereas other models were evaluated using the llama.cpp framework. For each model, we generated 128 tokens and report the average latency per token for this task."
2371
+ },
2372
+ {
2373
+ "type": "page_number",
2374
+ "bbox": [
2375
+ 0.492,
2376
+ 0.936,
2377
+ 0.508,
2378
+ 0.947
2379
+ ],
2380
+ "angle": 0,
2381
+ "content": "14"
2382
+ }
2383
+ ]
2384
+ ]
data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d926b0f3a796c4f78416808392482b06374c328a9d086452c09bfa09c74cfb85
3
+ size 308962
data/2025/2504_12xxx/2504.12285/full.md ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Abstract
2
+
3
+ We introduce BitNet b1.58 2B4T, the first open-source, native 1-bit Large Language Model (LLM) at the 2-billion parameter scale. Trained on a corpus of 4 trillion tokens, the model has been rigorously evaluated across benchmarks covering language understanding, mathematical reasoning, coding proficiency, and conversational ability. Our results demonstrate that BitNet b1.58 2B4T achieves performance on par with leading open-weight, full-precision LLMs of similar size, while offering significant advantages in computational efficiency, including substantially reduced memory footprint, energy consumption, and decoding latency. To facilitate further research and adoption, the model weights are released via Hugging Face along with open-source inference implementations for both GPU and CPU architectures.
4
+
5
+ BitNet b1.58 2B4T (1.58-bit): bitnet-b1.58-2B-4T
6
+
7
+ The packed weight of BitNet b1.58 2B4T, used for inference only
8
+
9
+ BitNet b1.58 2B4T (bf16): bitnet-b1.58-2B-4T-bf16
10
+
11
+ The master weight of BitNet b1.58 2B4T, used for training only
12
+
13
+ BitNet b1.58 2B4T (gguf): bitnet-b1.58-2B-4T-gguf
14
+
15
+ The GGUF format of BitNet b1.58 2B4T, used for bitnet.cpp
16
+
17
+ BitNet b1.58 2B4T Code: bitnet.cpp Demo: aka.ms/bitnet-demo
18
+
19
+ ![](images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg)
20
+ Figure 1: BitNet b1.58 2B4T advances the Pareto frontier defined by leading open-weight LLMs under 3B parameters in terms of performance versus memory, demonstrating superior efficiency.
21
+
22
+ # 1 Introduction
23
+
24
+ Open-source large language models (LLMs) have become pivotal in democratizing access to advanced AI capabilities, fostering innovation, and enabling research across diverse fields such as natural language processing, code generation, and vision computing (Dubey et al., 2024; Yang et al., 2024; Bai et al., 2025). Their public availability allows for widespread experimentation and adaptation. However, a significant barrier hinders their broader adoption: the substantial computational resources required for deployment and inference. State-of-the-art open LLMs typically require large memory footprints, consume considerable energy, and exhibit notable inference latency, rendering them impractical for many edge devices, resource-constrained environments, and real-time applications.
25
+
26
+ 1-bit LLMs, representing an extreme yet promising form of model quantization where weights and potentially activations are constrained to binary $\{-1, +1\}$ or ternary $\{-1, 0, +1\}$ , offer a compelling solution to the efficiency challenges. By drastically reducing the memory required to store weights and enabling highly efficient bitwise computations, they have the potential to significantly lower deployment costs, reduce energy consumption, and accelerate inference speeds. While prior work has explored 1-bit models, existing open efforts often fall into two categories: 1) post-training quantization (PTQ) methods applied to pre-trained full-precision models, which can lead to significant performance degradation (Xu et al., 2024b; Team, 2024), or 2) native 1-bit models (trained from scratch with 1-bit weights) that have been developed at relatively smaller scales (e.g., OLMo-Bitnet-1B²]) and may not yet match the capabilities of larger, full-precision counterparts. This performance gap has limited the practical impact of 1-bit LLMs thus far.
27
+
28
+ To bridge this gap between efficiency and performance, we introduce BitNet b1.58 2B4T, the first open-source, native 1-bit LLM trained at scale. This model, comprising 2 billion parameters, was trained from scratch on a substantial dataset of 4 trillion tokens, leveraging architectural and training innovations specific to the 1-bit paradigm. The core contribution of this work is to demonstrate that a native 1-bit LLM, when trained effectively at scale, can achieve performance comparable to leading open-weight, full-precision models of similar size across a wide range of tasks.
29
+
30
+ This technical report details the development and evaluation of BitNet b1.58 2B4T. We describe the architecture and training methodology, and then present comprehensive evaluation results on standard benchmarks assessing language understanding, mathematical reasoning, coding proficiency, and multi-turn conversational abilities. Our findings confirm its strong performance relative to established full-precision baselines, coupled with significant advantages in efficiency. Finally, we announce the public release of the BitNet b1.58 2B4T model weights via Hugging Face and provide open-source inference code optimized for both GPU and CPU execution, aiming to facilitate further research and the practical deployment of highly efficient LLMs.
31
+
32
+ # 2 Architecture
33
+
34
+ The architecture of BitNet b1.58 2B4T is derived from the standard Transformer model (Vaswani et al., 2017), incorporating significant modifications based on the BitNet framework (Wang et al., 2023a; Ma et al., 2024). The model is trained entirely from scratch.
35
+
36
+ The core architectural innovation lies in replacing the standard full-precision linear layers (torch(nn.Linear) with custom BitLinear layers. This constitutes the foundation of the BitNet approach. Within these BitLinear layers:
37
+
38
+ - Weight Quantization: Model weights are quantized to 1.58 bits during the forward pass. This is achieved using an absolute mean (absmean) quantization scheme, which maps weights to ternary values $\{-1,0, + 1\}$ . This drastically reduces the model size and enables efficient mathematical operations.
39
+ - Activation Quantization: Activations flowing through the linear projection are quantized to 8-bit integers. This employs an absolute maximum (absmax) quantization strategy, applied per-token.
40
+ - Normalization: We incorporate subln normalization (Wang et al., 2022) to further enhance training stability, which can be particularly beneficial in quantized training regimes.
41
+
42
+ Beyond the BitLinear layers, several established LLM techniques are integrated to enhance performance and stability:
43
+
44
+ - Activation Function (FFN): Within the feed-forward network (FFN) sub-layers, instead of the commonly used SwiGLU activation (Shazeer, 2020), BitNet b1.58 2B4T employs squared ReLU $(\mathrm{ReLU}^2)$ . This choice is motivated by its potential to improve model sparsity and computational characteristics within the 1-bit context (Wang et al., 2024b,a).
45
+ - **Positional Embeddings:** Rotary Position Embeddings (RoPE) (Su et al., 2024) are used to inject positional information, a standard practice in modern high-performance LLMs.
46
+ - Bias Removal: Consistent with architectures like LLaMA, all bias terms are removed from the linear layers and normalization layers throughout the network, reducing parameter count and potentially simplifying quantization.
47
+
48
+ For tokenization, we adopt the tokenizer developed for LLaMA 3 (Dubey et al., 2024). This tokenizer implements a byte-level Byte-Pair Encoding (BPE) scheme with a vocabulary size of 128,256 tokens. This choice ensures robust handling of diverse text and code, and its widespread adoption facilitates straightforward integration with existing open-source tooling and ecosystems.
49
+
50
+ # 3 Training
51
+
52
+ The training process for BitNet b1.58 2B4T involved three distinct phases: large-scale pre-training followed by supervised fine-tuning (SFT) and direct preference optimization (DPO). While advanced techniques like Proximal Policy Optimization (PPO) or Group Relative Policy Optimization (GRPO) can further enhance capabilities such as mathematics and chain-of-thought reasoning (Schulman et al., 2017; Shao et al., 2024), the current version of BitNet b1.58 2B4T relies solely on pre-training, SFT, and DPO. The exploration of reinforcement learning methods remains a direction for future work.
53
+
54
+ # 3.1 Pre-training
55
+
56
+ The pre-training phase aimed to imbue the model with broad world knowledge and foundational language capabilities. We adapted general training strategies from established LLM practices (Dubey et al., 2024), with specific adjustments tailored for the 1-bit architecture.
57
+
58
+ # 3.1.1 Learning Rate Schedule
59
+
60
+ A two-stage learning rate schedule was employed.
61
+
62
+ 1. **Stage 1 (High Learning Rate):** The initial phase utilized a standard cosine decay schedule but commenced with a relatively high peak learning rate. This decision was informed by the observation that 1-bit models often exhibit greater training stability compared to their full-precision counterparts, allowing for more aggressive initial learning steps.
63
+ 2. **Stage 2 (Cooldown):** Approximately midway through the planned training token count, the learning rate was abruptly decayed and subsequently maintained via a cosine schedule with a significantly lower peak value. This "cooldown" phase allows the model to refine its representations on higher-quality data (see Section 3.1.3).
64
+
65
+ # 3.1.2 Weight Decay Schedule
66
+
67
+ Complementing the learning rate adjustments, a two-stage weight decay strategy was implemented.
68
+
69
+ 1. **Stage 1:** During the first training stage, weight decay followed a cosine schedule, reaching a peak value of 0.1. This regularization helps prevent overfitting during the initial high learning-rate phase.
70
+ 2. **Stage 2:** In the second stage, weight decay was effectively disabled (set to zero). This allows the model parameters to settle into finer-grained optima guided by the lower learning rate and curated data.
71
+
72
+ # 3.1.3 Pre-training Data
73
+
74
+ The pre-training corpus comprised a mixture of publicly available text and code datasets, including large web crawls like DCLM (Li et al., 2024b) and educational web pages like FineWeb-EDU (Penedo et al., 2024). To enhance mathematical reasoning abilities, we also incorporated synthetically generated mathematical data. The data presentation strategy aligned with the two-stage training: the bulk of general web data was processed during Stage 1, while higher-quality curated datasets were emphasized during the Stage 2 cooldown phase, coinciding with the reduced learning rate.
75
+
76
+ # 3.2 Supervised Fine-tuning (SFT)
77
+
78
+ Following pre-training, the model underwent supervised fine-tuning (SFT) to enhance its instruction-following capabilities and improve its performance in conversational interaction formats.
79
+
80
+ # 3.2.1 SFT Data
81
+
82
+ The SFT phase utilized a diverse collection of publicly available instruction-following and conversational datasets. These included, but were not limited to, WildChat (Zhao et al., 2024), LMSYS-Chat1M (Zheng et al., 2024), WizardLM Evol-Instruct (Xu et al., 2024a), and SlimOrca (Lian et al., 2023). To further bolster specific capabilities, particularly in reasoning and complex instruction adherence, we supplemented these with synthetic datasets generated using methodologies like GLAN (Li et al., 2024a) and MathScale (Tang et al., 2024).
83
+
84
+ # 3.2.2 Chat Template
85
+
86
+ For conversational tasks during SFT and inference, the following chat template structure was employed:
87
+
88
+ ```txt
89
+ <|begin_of_text|>System: {system_message}<|eot_id|>
90
+ User: {user_message_1}<|eot_id|
91
+ Assistant: {assistant_message_1}<|eot_id|
92
+ User: {user_message_2}<|eot_id|
93
+ Assistant: {assistant_message_2}<|eot_id|...
94
+ ```
95
+
96
+ # 3.2.3 Optimization Details
97
+
98
+ Several optimization choices were key during SFT:
99
+
100
+ - Loss Aggregation: Instead of averaging the cross-entropy loss across tokens within a batch (mean reduction), we employed summation. Empirically, we observed that summing the losses led to improved convergence and better final performance for this model.
101
+ - Hyperparameter Tuning: Careful tuning of the learning rate and the number of training epochs was performed. Consistent with our pre-training findings, the 1-bit model benefited from a relatively larger learning rate during SFT compared to typical full-precision model fine-tuning. Furthermore, achieving optimal convergence required extending the fine-tuning duration over a larger number of epochs than full-precision models of similar size.
102
+
103
+ # 3.3 Direct Preference Optimization (DPO)
104
+
105
+ To further align the model's behavior with human preferences regarding helpfulness and safety, we applied Direct Preference Optimization (DPO) (Rafailov et al., 2023) following the SFT phase. DPO offers an efficient alternative to traditional RLHF by directly optimizing the language model using preference data, thereby circumventing the need to train a separate reward model. This DPO stage served to refine the model's conversational prowess and overall alignment with desired interaction patterns in practical use cases.
106
+
107
+ # 3.3.1 Training Data
108
+
109
+ The preference dataset used for DPO training was constructed from a combination of publicly available resources recognized for capturing diverse human judgments on model outputs. Specifically,
110
+
111
+ we utilized UltraFeedback (Cui et al., 2024) and MagPie (Xu et al., 2024c). The aggregation of these datasets provided a robust and multifaceted preference signal, guiding the model towards generating responses more aligned with human expectations.
112
+
113
+ # 3.3.2 Training Details
114
+
115
+ The DPO training phase was conducted for 2 epochs. We employed a learning rate of $2 \times 10^{-7}$ and set the DPO beta parameter, which controls the divergence from the reference policy, to 0.1. To enhance training efficiency during this phase, we integrated optimized kernels from the Liger Kernel library (Hsu et al., 2024). Qualitatively, our observations indicate that the DPO process effectively steered the model towards preferred response styles without inducing significant degradation in the core capabilities established during pre-training and SFT.
116
+
117
+ # 4 Evaluation
118
+
119
+ We measure performance on a wide variety of benchmarks classified as follows:
120
+
121
+ - Language understanding and reasoning: ARC-Easy (Yadav et al., 2019), ARC-Challenge (Yadav et al., 2019), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2020), PIQA (Bisk et al., 2019), OpenbookQA (Mihaylov et al., 2018), and CommonsenseQA (Talmor et al., 2019)
122
+ - World knowledge: TruthfulQA (Lin et al., 2022) and MMLU (Hendrycks et al., 2021a)
123
+ - Reading comprehension: TriviaQA (Joshi et al., 2017) and BoolQ (Clark et al., 2019)
124
+ - Math and code: GSM8K (Cobbe et al., 2021), MATH-500 (Hendrycks et al., 2021b) and HumanEval+ (Liu et al., 2023)
125
+ - Instruction following and conversation: IFEval (Zhou et al., 2023) and MT-bench (Zheng et al., 2023)
126
+
127
+ We compare BitNet b1.58 2B4T with leading open-weight full precision LLMs of similar size, including LLaMA 3.2 1B (Dubey et al., 2024), Gemma-3 1B (Team et al., 2025), Qwen2.5 1.5B (Yang et al., 2024), SmolLM2 1.7B (Allal et al., 2025) and MiniCPM 2B (Hu et al., 2024). All models are instruction-tuned versions. We re-run all benchmarks with a public evaluation pipeline for a fair comparison. More evaluation details are available at the appendix. The main results are presented in Table 1.
128
+
129
+ # 4.1 Main Results
130
+
131
+ As shown in Table 1, BitNet b1.58 2B4T demonstrates remarkable resource efficiency. Its non-embedding memory footprint and estimated energy consumption (Horowitz, 2014; Zhang et al., 2022) during decoding are substantially lower compared to all the full-precision models evaluated, highlighting a significant advantage in operational cost and deployability on resource-constrained devices.
132
+
133
+ In terms of task performance, BitNet b1.58 2B4T proves highly competitive. It achieves the best results among the compared models on several benchmarks spanning reasoning, knowledge, and math capabilities. On other benchmarks, its performance is closely comparable to the top-performing full-precision models. While some full-precision models show slight advantages on specific tasks or the overall average, BitNet b1.58 2B4T delivers strong performance across the board. The results indicate that BitNet b1.58 2B4T achieves capabilities nearly on par with leading models in its size class while offering dramatically improved efficiency.
134
+
135
+ # 4.2 Comparison with Post-training Quantized Models
136
+
137
+ We further investigate the efficiency-performance trade-off by comparing BitNet b1.58 2B4T against post-training quantized (PTQ) versions of a leading competitor, Qwen2.5 1.5B, using standard INT4 methods (GPTQ and AWQ). The results are summarized in Table 2.
138
+
139
+ While INT4 quantization successfully reduces the memory footprint of the full-precision model, BitNet b1.58 2B4T achieves an even lower memory requirement due to its native 1-bit architecture.
140
+
141
+ <table><tr><td>Benchmark (Metric)</td><td>LLaMA 3.21B</td><td>Gemma-31B</td><td>Qwen2.51.5B</td><td>SmolLM21.7B</td><td>MiniCPM2B</td><td>BitNet b1.582B</td></tr><tr><td>Memory(Non-emb)</td><td>2GB</td><td>1.4GB</td><td>2.6GB</td><td>3.2GB</td><td>4.8GB</td><td>0.4GB</td></tr><tr><td>Latency(CPU; TPOT)</td><td>48ms</td><td>41ms</td><td>65ms</td><td>67ms</td><td>124ms</td><td>29ms</td></tr><tr><td>Energy(Estimated)</td><td>0.258J</td><td>0.186J</td><td>0.347J</td><td>0.425J</td><td>0.649J</td><td>0.028J</td></tr><tr><td>Training Tokens(Pre-training)</td><td>9T(pruning &amp; distillation)</td><td>2T(distillation)</td><td>18T</td><td>11T</td><td>1.1T</td><td>4T</td></tr><tr><td>ARC-Challange(0-shot; Acc,norm)</td><td>37.80</td><td>38.40</td><td>46.67</td><td>43.52</td><td>44.80</td><td>49.91</td></tr><tr><td>ARC-Easy(0-shot; Acc,norm)</td><td>63.17</td><td>63.13</td><td>76.01</td><td>62.92</td><td>72.14</td><td>74.79</td></tr><tr><td>OpenbookQA(0-shot; Acc,norm)</td><td>34.80</td><td>38.80</td><td>40.80</td><td>46.00</td><td>40.20</td><td>41.60</td></tr><tr><td>BoolQ(0-shot; Acc)</td><td>64.65</td><td>74.22</td><td>78.04</td><td>75.78</td><td>80.67</td><td>80.18</td></tr><tr><td>HellaSwag(0-shot; Acc,norm)</td><td>60.80</td><td>57.69</td><td>68.28</td><td>71.71</td><td>70.81</td><td>68.44</td></tr><tr><td>PIQA(0-shot; Acc,norm)</td><td>74.21</td><td>71.93</td><td>76.12</td><td>76.12</td><td>76.66</td><td>77.09</td></tr><tr><td>WinoGrande(0-shot; Acc)</td><td>59.51</td><td>58.48</td><td>62.83</td><td>68.98</td><td>61.80</td><td>71.90</td></tr><tr><td>CommonsenseQA(10-shot; Acc)</td><td>58.48</td><td>42.10</td><td>76.41</td><td>63.55</td><td>71.74</td><td>71.58</td></tr><tr><td>TruthfulQA(10-shot; MC2)</td><td>43.80</td><td>38.66</td><td>46.67</td><td>39.90</td><td>41.41</td><td>45.31</td></tr><tr><td>TriviaQA(5-shot; EM)</td><td>37.60</td><td>23.49</td><td>38.37</td><td>45.97</td><td>34.13</td><td>33.57</td></tr><tr><td>MMLU(5-shot; Acc)</td><td>45.58</td><td>39.91</td><td>60.25</td><td>49.24</td><td>51.82</td><td>53.17</td></tr><tr><td>HumanEval+(0-shot; Pass@1)</td><td>31.10</td><td>37.20</td><td>50.60</td><td>28.00</td><td>43.90</td><td>38.40</td></tr><tr><td>GSM8K(4-shot; EM)</td><td>38.21</td><td>31.16</td><td>56.79</td><td>45.11</td><td>4.40</td><td>58.38</td></tr><tr><td>MATH-500(0-shot; EM)</td><td>23.00</td><td>42.00</td><td>53.00</td><td>17.60</td><td>14.80</td><td>43.40</td></tr><tr><td>IFEval(0-shot; Instruct-Strict)</td><td>62.71</td><td>66.67</td><td>50.12</td><td>57.91</td><td>36.81</td><td>53.48</td></tr><tr><td>MT-bench(0-shot; Average)</td><td>5.43</td><td>6.40</td><td>6.12</td><td>5.50</td><td>6.57</td><td>5.85</td></tr><tr><td>Average</td><td>44.90</td><td>43.74</td><td>55.23</td><td>48.70</td><td>42.05</td><td>54.19</td></tr></table>
142
+
143
+ Table 1: Comparison of BitNet b1.58 2B4T with leading open-weight full-precision LLMs of similar size (1B-2B parameters) on efficiency metrics and performance across a wide range of benchmarks. All models compared are instruction-tuned versions.
144
+
145
+ More importantly, this superior memory efficiency does not compromise performance relative to the quantized models. Standard PTQ techniques lead to a noticeable degradation in performance compared to the original full-precision model. In contrast, BitNet b1.58 2B4T maintains stronger overall performance than the INT4 quantized versions of Qwen2.5-1.5B on the evaluated benchmarks. This comparison suggests that BitNet b1.58 2B4T represents a more favorable point on the efficiency-performance curve than applying conventional INT4 PTQ to existing architectures, offering better performance with lower resource usage.
146
+
147
+ <table><tr><td rowspan="2">Benchmark (Metric)</td><td colspan="3">Qwen2.5</td><td>BitNet b1.58</td></tr><tr><td>1.5B-bf16</td><td>1.5B-GPTQ-int4</td><td>1.5B-AWQ-int4</td><td>2B</td></tr><tr><td>Memory
148
+ (Non-emb)</td><td>2.6GB</td><td>0.7GB</td><td>0.7GB</td><td>0.4GB</td></tr><tr><td>Activation</td><td>bf16</td><td>bf16</td><td>bf16</td><td>int8</td></tr><tr><td>MMLU
149
+ (5-shot; Acc)</td><td>60.25</td><td>58.06</td><td>57.43</td><td>53.17</td></tr><tr><td>GSM8K
150
+ (4-shot; EM)</td><td>56.79</td><td>50.57</td><td>50.64</td><td>58.38</td></tr><tr><td>IFEval
151
+ (0-shot; Instruct-Strict)</td><td>50.12</td><td>47.84</td><td>45.44</td><td>53.48</td></tr><tr><td>Average</td><td>55.72</td><td>52.15</td><td>51.17</td><td>55.01</td></tr></table>
152
+
153
+ Table 2: Comparison of BitNet b1.58 (2B) against Qwen2.5 1.5B in its original bf16 precision and after INT4 post-training quantization (GPTQ and AWQ). All models shown are based on instruction-tuned checkpoints.
154
+
155
+ <table><tr><td>Benchmark (Metric)</td><td>Bonsai 0.5B</td><td>OLMo-Bitnet 1B</td><td>Falcon3-1.58bit 7B</td><td>Llama3-8B-1.58 8B</td><td>BitNet b1.58 2B</td></tr><tr><td>Native 1-bit</td><td>✓</td><td>✓</td><td>✘</td><td>✘</td><td>✓</td></tr><tr><td>ARC-Challange (0-shot; Acc,norm)</td><td>33.19</td><td>26.54</td><td>37.80</td><td>43.69</td><td>49.91</td></tr><tr><td>ARC-Easy (0-shot; Acc,norm)</td><td>58.25</td><td>25.38</td><td>65.03</td><td>70.71</td><td>74.79</td></tr><tr><td>OpenbookQA (0-shot; Acc,norm)</td><td>33.60</td><td>28.20</td><td>38.20</td><td>37.20</td><td>41.60</td></tr><tr><td>BoolQ (0-shot; Acc)</td><td>58.44</td><td>52.48</td><td>72.14</td><td>68.38</td><td>80.18</td></tr><tr><td>HellaSwag (0-shot; Acc,norm)</td><td>48.01</td><td>25.88</td><td>59.46</td><td>68.56</td><td>68.44</td></tr><tr><td>PIQA (0-shot; Acc,norm)</td><td>70.02</td><td>50.49</td><td>72.36</td><td>75.30</td><td>77.09</td></tr><tr><td>WinoGrande (0-shot; Acc)</td><td>54.46</td><td>51.54</td><td>60.14</td><td>60.93</td><td>71.90</td></tr><tr><td>CommonsenseQA (10-shot; Acc)</td><td>18.43</td><td>19.49</td><td>67.08</td><td>28.50</td><td>71.58</td></tr><tr><td>TruthfulQA (10-shot; MC2)</td><td>40.65</td><td>49.05</td><td>43.29</td><td>39.13</td><td>45.31</td></tr><tr><td>TriviaQA (5-shot; EM)</td><td>10.84</td><td>0.00</td><td>0.00</td><td>19.82</td><td>33.57</td></tr><tr><td>MMLU (5-shot; Acc)</td><td>25.74</td><td>25.47</td><td>42.79</td><td>35.04</td><td>53.17</td></tr><tr><td>Average</td><td>41.06</td><td>32.22</td><td>50.76</td><td>49.75</td><td>60.68</td></tr></table>
156
+
157
+ Table 3: Performance comparison of BitNet b1.58 2B4T against other open-weight 1-bit models. This includes natively trained 1-bit models (Bonsai-0.5B, OLMo-Bitnet-1B) and larger models posttraining quantized to 1.58-bit (Falcon3-1.58bit-7B, Llama3-8B-1.58).
158
+
159
+ # 4.3 Comparison with Open-weight 1-bit Models
160
+
161
+ Finally, we situate BitNet b1.58 2B4T within the landscape of other models designed for or quantized to near 1-bit precision. We compare it against natively trained 1-bit models of smaller scale and significantly larger models post-training quantized to 1.58-bit precision. The comparative results are presented in Table 3.
162
+
163
+ The evaluation clearly positions BitNet b1.58 2B4T as the leading model in this category. It demonstrates significantly stronger overall performance than all other compared 1-bit models, achieving
164
+
165
+ the highest scores on the vast majority of benchmarks. Notably, BitNet b1.58 2B4T substantially outperforms not only the smaller, natively trained 1-bit models but also the much larger models (in terms of parameter count) that were quantized to 1-bit. This highlights the effectiveness of the native training approach employed by BitNet b1.58 2B4T, allowing it to set a new state-of-the-art performance level for models operating at this extreme level of quantization, even surpassing larger models subjected to post-training quantization.
166
+
167
+ # 5 Inference Implementation
168
+
169
+ Efficient inference is crucial for deploying Large Language Models, particularly for resource-constrained environments. The unique quantization scheme of BitNet b1.58 2B4T, employing 1.58-bit weights and 8-bit activations (W1.58A8), necessitates specialized implementations, as standard deep learning libraries often lack optimized kernels for such mixed-precision, low-bit formats. To address this, we developed and open-sourced dedicated inference libraries for both GPU and CPU platforms. The code is publicly available at https://aka.ms/bitnet.
170
+
171
+ # 5.1 GPU Inference
172
+
173
+ Current GPU architectures and their associated software libraries (e.g., cuBLAS, PyTorch kernels) are primarily optimized for operations involving standard data types like FP16, BF16, and INT8/INT4. Native, high-performance support for the specific W1.58A8 matrix multiplication required by BitNet b1.58 2B4T is generally unavailable. This limitation can hinder the realization of the theoretical efficiency gains offered by 1-bit models on existing hardware.
174
+
175
+ To enable efficient GPU inference, we developed a custom CUDA kernel specifically designed for the W1.58A8 matrix multiplication. Since ternary weights $\{-1,0, + 1\}$ , representing 1.58 bits) cannot be stored efficiently using standard data types, we pack multiple weight values into a single 8-bit integer ('int8') for storage in High Bandwidth Memory (HBM). Specifically, four ternary values are encoded into one 'int8' value. During computation, the CUDA kernel loads the packed 'int8' weights from HBM into the GPU's faster on-chip Shared Memory (SRAM). It then unpacks these values back into a representation suitable for efficient ternary computation (e.g., reconstructing the -1, 0, +1 values) immediately before performing the matrix multiplication with the 8-bit activations. This 'pack-store-load-unpack-compute' strategy minimizes memory bandwidth usage while leveraging custom compute instructions. Further implementation details and optimization strategies are elaborated in the Ladder framework (Wang et al., 2023b).
176
+
177
+ While our custom kernel significantly improves performance compared to naive implementations, we note that current commodity GPU architectures are not optimally designed for the 1-bit models. We believe that future hardware innovations, potentially incorporating dedicated logic for low-bit operations, will be essential to fully unlock the performance and energy efficiency potential of models like BitNet b1.58.
178
+
179
+ # 5.2 CPU Inference
180
+
181
+ To ensure broad accessibility and enable deployment on devices lacking powerful GPUs (e.g., edge devices, laptops, standard servers), we developed bitnet.cpp. This C++ library serves as an official reference implementation for CPU inference of 1-bit LLMs, including BitNet b1.58.
182
+
183
+ bitnet.cpp provides optimized kernels tailored for efficient execution on standard CPU architectures. The kernels are designed to operate efficiently with the model's specific quantization scheme, avoiding the overhead of generic quantization libraries or intricate low-level bit manipulation where possible. It processes the weight elements in a manner consistent with the BitNet b1.58 training methodology, ensuring numerical accuracy (lossless inference relative to the training procedure).
184
+
185
+ This approach delivers fast and accurate inference of 1.58-bit models directly on CPUs. More technical details and usage instructions can be found in the bitnet.cpp repository and associated technical report (Wang et al., 2025).
186
+
187
+ # 6 Conclusion
188
+
189
+ This technical report introduced BitNet b1.58 2B4T, a significant step towards highly efficient yet capable Large Language Models. As the first open-source, native 1-bit LLM trained at the 2-billion parameter scale on 4 trillion tokens, our work demonstrates the viability of extreme quantization directly within the training process.
190
+
191
+ Comprehensive evaluations across benchmarks assessing language understanding, reasoning, mathematics, coding, and dialogue revealed that BitNet b1.58 2B4T achieves performance comparable to state-of-the-art open-weight, full-precision models of similar size. Crucially, this performance parity is achieved with dramatically reduced computational requirements, offering substantial savings in memory footprint, energy consumption, and inference latency. To facilitate practical use and further research, we developed and released optimized inference implementations for both GPU (via custom CUDA kernels) and CPU (via the 'bitnet.cpp' library), alongside the model weights available on Hugging Face.
192
+
193
+ BitNet b1.58 2B4T represents a compelling proof-of-concept that challenges the necessity of full-precision weights for achieving high performance in LLMs at scale. It opens avenues for deploying powerful language models in resource-constrained environments where previous models were prohibitive, potentially democratizing access to advanced AI capabilities.
194
+
195
+ # 7 Future Directions
196
+
197
+ While BitNet b1.58 2B4T demonstrates promising results, several exciting research directions remain:
198
+
199
+ - Scaling Laws and Larger Models: Investigating the scaling properties of native 1-bit LLMs is crucial. Future work will explore training larger models (e.g., 7B, 13B parameters and beyond) and training on even larger datasets to understand if the performance parity with full-precision models holds.
200
+ - Hardware Co-Design and Optimization: The full potential of 1-bit models is likely hindered by current hardware limitations. Continued development of highly optimized kernels for existing hardware (GPUs, CPUs, NPUs) is needed. Furthermore, co-designing future hardware accelerators specifically optimized for 1-bit computations and data movement could unlock orders-of-magnitude improvements in speed and energy efficiency.
201
+ - Extended Sequence Length: Extending the maximum sequence length of BitNet b1.58 2B4T can process is crucial. This enhancement is vital for tasks demanding long-context understanding, such as summarizing lengthy documents or engaging in complex problem-solving, and is particularly critical for improving performance on long chain-of-thought reasoning tasks. Investigating efficient attention mechanisms suitable for low-bit models at longer sequence lengths will be key.
202
+ - Multilingual Capabilities: The current model is primarily trained on English-centric data. Extending the pre-training corpus and potentially adapting the architecture to effectively support multiple languages is a key direction for broader applicability.
203
+ - Multimodal Integration: Exploring the integration of 1-bit principles into multimodal architectures is another promising frontier. Developing efficient ways to process and fuse information from different modalities (e.g., text and images) within a low-bit framework could enable new applications.
204
+ - Theoretical Understanding: Delving deeper into the theoretical underpinnings of why 1-bit training at scale is effective remains an open area. Analyzing the learning dynamics, loss landscapes, and representational properties of these models could yield valuable insights for future development.
205
+
206
+ By pursuing these directions, we aim to further advance the capability and efficiency of 1-bit LLMs, paving the way for more sustainable and accessible artificial intelligence. The open-source release of BitNet b1.58 2B4T and its associated tools provides a foundation for the community to build upon these efforts.
207
+
208
+ # References
209
+
210
+ Allal, L. B., Lozhkov, A., Bakouch, E., Blázquez, G. M., Penedo, G., Tunstall, L., Marafioti, A., Kydlíček, H., Lajarín, A. P., Srivastav, V., Lochner, J., Fahlgren, C., Nguyen, X.-S., Fourrier, C., Burtenshaw, B., Larcher, H., Zhao, H., Zakka, C., Morlon, M., Raffel, C., von Werra, L., and Wolf, T. (2025). Smollm2: When smol goes big - data-centric training of a small language model. CoRR, abs/2502.02737.
211
+ Bai, S., Chen, K., Liu, X., Wang, J., Ge, W., Song, S., Dang, K., Wang, P., Wang, S., Tang, J., Zhong, H., Zhu, Y., Yang, M.-H., Li, Z., Wan, J., Wang, P., Ding, W., Fu, Z., Xu, Y., Ye, J., Zhang, X., Xie, T., Cheng, Z., Zhang, H., Yang, Z., Xu, H., and Lin, J. (2025). Qwen2.5-vl technical report. CoRR, abs/2502.13923.
212
+ Bisk, Y., Zellers, R., Bras, R. L., Gao, J., and Choi, Y. (2019). PIQA: reasoning about physical commonsense in natural language. CoRR, abs/1911.11641.
213
+ Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. (2019). Boolq: Exploring the surprising difficulty of natural yes/no questions. CoRR, abs/1905.10044.
214
+ Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. (2021). Training verifiers to solve math word problems. CoRR, abs/2110.14168.
215
+ Cui, G., Yuan, L., Ding, N., Yao, G., He, B., Zhu, W., Ni, Y., Xie, G., Xie, R., Lin, Y., Liu, Z., and Sun, M. (2024). ULTRAFEEDBACK: boosting language models with scaled AI feedback. In ICML. OpenReview.net.
216
+ Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Rozière, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I. M., Misra, I., Evtimov, I., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., and et al. (2024). The llama 3 herd of models. CoRR, abs/2407.21783.
217
+ Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. (2021a). Measuring massive multitask language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021.
218
+ Hendrycks, D., Burns, C., Kadavath, S., Arora, A., Basart, S., Tang, E., Song, D., and Steinhardt, J. (2021b). Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual.
219
+ Horowitz, M. (2014). 1.1 computing's energy problem (and what we can do about it). In 2014 IEEE International Conference on Solid-State Circuits Conference, ISSCC 2014, Digest of Technical Papers, San Francisco, CA, USA, February 9-13, 2014, pages 10-14.
220
+ Hsu, P.-L., Dai, Y., Kothapalli, V., Song, Q., Tang, S., Zhu, S., Shimizu, S., Sahni, S., Ning, H., and Chen, Y. (2024). Liger kernel: Efficient triton kernels for LLM training. CoRR, abs/2410.10989.
221
+ Hu, S., Tu, Y., Han, X., He, C., Cui, G., Long, X., Zheng, Z., Fang, Y., Huang, Y., Zhao, W., Zhang, X., Thai, Z. L., Zhang, K., Wang, C., Yao, Y., Zhao, C., Zhou, J., Cai, J., Zhai, Z., Ding, N., Jia, C., Zeng, G., Li, D., Liu, Z., and Sun, M. (2024). Minicpm: Unveiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395.
222
+
223
+ Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. (2017). Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1601-1611.
224
+ Li, H., Dong, Q., Tang, Z., Wang, C., Zhang, X., Huang, H., Huang, S., Huang, X., Huang, Z., Zhang, D., Gu, Y., Cheng, X., Wang, X., Chen, S.-Q., Dong, L., Lu, W., Sui, Z., Wang, B., Lam, W., and Wei, F. (2024a). Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064.
225
+ Li, J., Fang, A., Smyrnis, G., Ivgi, M., Jordan, M., Gadre, S. Y., Bansal, H., Guha, E., Keh, S. S., Arora, K., Garg, S., Xin, R., Muennighoff, N., Heckel, R., Mercat, J., Chen, M. F., Gururangan, S., Wortsman, M., Albalak, A., Bitton, Y., Nezhurina, M., Abbas, A., Hsieh, C.-Y., Ghosh, D., Gardner, J., Kilian, M., Zhang, H., Shao, R., Pratt, S. M., Sanyal, S., Ilharco, G., Daras, G., Marathe, K., Gokaslan, A., Zhang, J., Chandu, K. R., Nguyen, T., Vasiljevic, I., Kakade, S. M., Song, S., Sanghavi, S., Faghri, F., Oh, S., Zettlemoyer, L., Lo, K., El-Nouby, A., Pouransari, H., Toshev, A., Wang, S., Groeneveld, D., Soldaini, L., Koh, P. W., Jitsev, J., Kollar, T., Dimakis, A., Carmon, Y., Dave, A., Schmidt, L., and Shankar, V. (2024b). Datacomp-lm: In search of the next generation of training sets for language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.
226
+ Lian, W., Wang, G., Goodson, B., Pentland, E., Cook, A., Vong, C., and "Teknium" (2023). Slimorca: An open dataset of gpt-4 augmented flan reasoning traces, with verification.
227
+ Lin, S., Hilton, J., and Evans, O. (2022). Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252.
228
+ Liu, J., Xia, C. S., Wang, Y., and Zhang, L. (2023). Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. Advances in Neural Information Processing Systems, 36:21558-21572.
229
+ Ma, S., Wang, H., Ma, L., Wang, L., Wang, W., Huang, S., Dong, L., Wang, R., Xue, J., and Wei, F. (2024). The era of 1-bit llms: All large language models are in 1.58 bits. CoRR, abs/2402.17764.
230
+ Mihaylov, T., Clark, P., Khot, T., and Sabharwal, A. (2018). Can a suit of armor conduct electricity? A new dataset for open book question answering. CoRR, abs/1809.02789.
231
+ Penedo, G., Kydlícek, H., Allal, L. B., Lozhkov, A., Mitchell, M., Raffel, C. A., von Werra, L., and Wolf, T. (2024). The fineweb datasets: Decanting the web for the finest text data at scale. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.
232
+ Rafailov, R., Sharma, A., Mitchell, E., Manning, C. D., Ermon, S., and Finn, C. (2023). Direct preference optimization: Your language model is secretly a reward model. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S., editors, Advances in Neural Information Processing Systems 36.
233
+ Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. (2020). WinoGrande: an adversarial winograd schema challenge at scale. In AAAI, pages 8732-8740.
234
+ Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. (2017). Proximal policy optimization algorithms. CoRR, abs/1707.06347.
235
+ Shao, Z., Wang, P., Zhu, Q., Xu, R., Song, J., Zhang, M., Li, Y. K., Wu, Y., and Guo, D. (2024). Deepseekmath: Pushing the limits of mathematical reasoning in open language models. CoRR, abs/2402.03300.
236
+ Shazeer, N. (2020). GLU variants improve transformer. CoRR, abs/2002.05202.
237
+
238
+ Su, J., Ahmed, M. H. M., Lu, Y., Pan, S., Bo, W., and Liu, Y. (2024). Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063.
239
+ Talmor, A., Herzig, J., Lourie, N., and Berant, J. (2019). Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4149-4158.
240
+ Tang, Z., Zhang, X., Wang, B., and Wei, F. (2024). Mathscale: Scaling instruction tuning for mathematical reasoning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net.
241
+ Team, F.-L. (2024). The falcon 3 family of open models.
242
+ Team, G., Kamath, A., Ferret, J., Pathak, S., Vieillard, N., Merhej, R., Perrin, S., Matejovicova, T., Ram'e, A., Rivi'ere, M., et al. (2025). Gemma 3 technical report. arXiv preprint arXiv:2503.19786.
243
+ Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. In Guyon, I., von Luxburg, U., Bengio, S., Wallach, H. M., Fergus, R., Vishwanathan, S. V. N., and Garnett, R., editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008.
244
+ Wang, H., Ma, S., Dong, L., Huang, S., Wang, H., Ma, L., Yang, F., Wang, R., Wu, Y., and Wei, F. (2023a). Bitnet: Scaling 1-bit transformers for large language models. CoRR, abs/2310.11453.
245
+ Wang, H., Ma, S., Huang, S., Dong, L., Wang, W., Peng, Z., Wu, Y., Bajaj, P., Singhal, S., Benhaim, A., Patra, B., Liu, Z., Chaudhary, V., Song, X., and Wei, F. (2022). Foundation transformers. CoRR.
246
+ Wang, H., Ma, S., Wang, R., and Wei, F. (2024a). Q-sparse: All large language models can be fully sparsely-activated. CoRR, abs/2407.10969.
247
+ Wang, H., Ma, S., and Wei, F. (2024b). Bitnet a4.8: 4-bit activations for 1-bit llms. CoRR, abs/2411.04965.
248
+ Wang, J., Zhou, H., Song, T., Cao, S., Xia, Y., Cao, T., Wei, J., Ma, S., Wang, H., and Wei, F. (2025). Bitnet.cpp: Efficient edge inference for ternary lms. CoRR, abs/2502.11880.
249
+ Wang, L., Ma, L., Cao, S., Zheng, N., Zhang, Q., Xue, J., Miao, Z., Cao, T., and Yang, Y. (2023b). Ladder: Efficient tensor compilation on customized data format. In OSDI.
250
+ Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., Lin, Q., and Jiang, D. (2024a). Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.
251
+ Xu, Y., Han, X., Yang, Z., Wang, S., Zhu, Q., Liu, Z., Liu, W., and Che, W. (2024b). Onebit: Towards extremely low-bit large language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.
252
+ Xu, Z., Jiang, F., Niu, L., Deng, Y., Poovendran, R., Choi, Y., and Lin, B. Y. (2024c). Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. CoRR, abs/2406.08464.
253
+ Yadav, V., Bethard, S., and Surdeanu, M. (2019). Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering. In EMNLP-IJCNLP.
254
+ Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., Lin, H., Yang, J., Tu, J., Zhang, J., Yang, J., Yang, J., Zhou, J., Lin, J., Dang, K., Lu, K., Bao, K., Yang, K., Yu, L., Li, M., Xue, M., Zhang, P., Zhu, Q., Men, R., Lin, R., Li, T., Xia, T., Ren, X., Ren, X., Fan, Y., Su, Y., Zhang, Y., Wan, Y., Liu, Y., Cui, Z., Zhang, Z., and Qiu, Z. (2024). Qwen2.5 technical report. CoRR, abs/2412.15115.
255
+
256
+ Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. (2019). HellaSwag: can a machine really finish your sentence? In Proceedings of the 57th Conference of the Association for Computational Linguistics, pages 4791-4800.
257
+ Zhang, Y., Zhang, Z., and Lew, L. (2022). PokeBNN: A binary pursuit of lightweight accuracy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12465-12475. IEEE.
258
+ Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. (2024). Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.
259
+ Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E. P., Gonzalez, J. E., Stoica, I., and Zhang, H. (2024). Lmsys-chat-1m: A large-scale real-world LLM conversation dataset. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.
260
+ Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. (2023). Judging lvm-as-a-judge with mt-bench and chatbot arena. In Advances in Neural Information Processing Systems 36.
261
+ Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. (2023). Instruction-following evaluation for large language models. CoRR, abs/2311.07911.
262
+
263
+ # A Open-weight Baselines
264
+
265
+ We summarize the links to the open-weight LLMs evaluated in this work as below:
266
+
267
+ - LLaMA 3.2 1B: meta-llama/Llama-3.2-1B-Instruct
268
+ - Gemma-3 1B: google/gemma-3-1b-it
269
+ Qwen2.5 0.5B: Qwen/Qwen2.5-0.5B-Instruct
270
+ - Qwen2.5 1.5B: Qwen/Qwen2.5-1.5B-Instruct
271
+ - Qwen2.5 3B: Qwen/Qwen2.5-3B-Instruct
272
+ - SmolLM2 1.7B: HuggingFaceTB/SmolLM2-1.7B-Instruct
273
+ - MiniCPM 2B: openbmb/MiniCPM-2B-dpo-bf16
274
+ - Qwen2.5 1.5B-GPTQ-int4: Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4
275
+ Qwen2.5 1.5B-AWQ-int4: Qwen/Qwen2.5-1.5B-Instruct-AWQ
276
+ - Bonsai 0.5B: deepgrove/Bonsai
277
+ - OLMo-Bitnet 1B: NousResearch/OLMo-Bitnet-1B
278
+ - Falcon3-1.58bit 7B: tiiuae/Falcon3-7B-Instruct-1.58bit
279
+ - Llama3-8B-1.58 8B: HF1BitLLM/Llama3-8B-1.58-100B-tokens
280
+
281
+ # B Evaluation Pipeline Details
282
+
283
+ To ensure standardized evaluation, we employed established toolkits for different benchmark categories. Specifically:
284
+
285
+ - For the HumanEval+ coding benchmark, we utilized the evalplus toolkit.
286
+ - For the MATH-500 mathematical reasoning benchmark, we used a customized version of the math-evaluation-harness toolkit.
287
+ - For the MT-Bench conversational benchmark, evaluation was performed using the official LLM Judge open-source codebase.
288
+ - For all other benchmarks assessing language understanding, reasoning, knowledge, and comprehension, we used the standard lm-evaluation-harness framework.
289
+
290
+ <table><tr><td>Bits</td><td>ADD Energy</td><td>MUL Energy</td></tr><tr><td>FP16</td><td>0.16</td><td>0.34</td></tr><tr><td>INT8</td><td>0.007</td><td>0.07</td></tr></table>
291
+
292
+ Table 4: ADD and MUL energy consumption (in pJ) of different precision at $7\mathrm{nm}$ process nodes.
293
+
294
+ Models were prompted using a chat format for generative tasks (e.g., GSM8K, IFEval, and MT-Bench), while default settings from the respective toolkits were used for other tasks.
295
+
296
+ For energy consumption, we utilize the energy model in (Horowitz, 2014; Zhang et al., 2022) to estimate the arithmetic operations energy (AOE) of matrix multiplication. The sequence length is set as 512 tokens. We present the energy consumption for ADD and MUL operation at $7\mathrm{nm}$ process nodes in Table 4.
297
+
298
+ To assess CPU decoding performance, latency measurements were conducted on a Surface Laptop Studio 2 system powered by a 13th Gen Intel Core i7-13800H processor. The benchmarking process utilized 8 CPU threads. Specifically, the BitNet b1.58 2B4T model was tested using its bitnet.cpp implementation, whereas other models were evaluated using the llama.cpp framework. For each model, we generated 128 tokens and report the average latency per token for this task.
data/2025/2504_12xxx/2504.12285/images/571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg ADDED

Git LFS Details

  • SHA256: 604d68ef59293e9bd79df3be45e945046ad6aae7555f6bbf19d5eb9edf9d48e7
  • Pointer size: 131 Bytes
  • Size of remote file: 174 kB
data/2025/2504_12xxx/2504.12285/images/7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg ADDED

Git LFS Details

  • SHA256: 0152a4ae7d760410e36011b3b10910ae5bd65eac122341c9fa7ff0268a5c0567
  • Pointer size: 131 Bytes
  • Size of remote file: 102 kB
data/2025/2504_12xxx/2504.12285/images/c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg ADDED

Git LFS Details

  • SHA256: 0411361340a9328567c2d9d4a0f10fe6274d9aa2313b61e717366e0e95571266
  • Pointer size: 130 Bytes
  • Size of remote file: 12.9 kB
data/2025/2504_12xxx/2504.12285/images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg ADDED

Git LFS Details

  • SHA256: be1bf80aa43564ddac84a7386e32e63688e7871fc0a0dd47c8fbbc39de35aa3e
  • Pointer size: 130 Bytes
  • Size of remote file: 50.4 kB
data/2025/2504_12xxx/2504.12285/images/ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg ADDED

Git LFS Details

  • SHA256: 6496f9cca7afd4690e82a22b63cc6e392b6e1dafcb37cefeac08f091616afaa7
  • Pointer size: 130 Bytes
  • Size of remote file: 56.4 kB
data/2025/2504_12xxx/2504.12285/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b9c02e5159ae54c2c8b60b312b18b1999800a44d25455c6fae804aa2e705d5d
3
+ size 4597473
data/2025/2504_12xxx/2504.12369/full.md ADDED
@@ -0,0 +1,542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # WORLDMEM: Long-term Consistent World Simulation with Memory
2
+
3
+ Zeqi Xiao $^{1}$ Yushi Lan $^{1}$ Yifan Zhou $^{1}$ Wenqi Ouyang $^{1}$ Shuai Yang $^{2}$ Yanhong Zeng $^{3}$ Xingang Pan $^{1}$
4
+
5
+ $^{1}$ S-Lab, Nanyang Technological University,
6
+
7
+ $^{2}$ Wangxuan Institute of Computer Technology, Peking University
8
+
9
+ 3Shanghai AI Laboratory
10
+
11
+ {zeqi001, yushi001, yifan006, wenqi.ouyang, xingang.pan}@ntu.edu.sg
12
+
13
+ williamyang@pku.edu.cn, zengyh1900@gmail.com
14
+
15
+ # Abstract
16
+
17
+ World simulation has gained increasing popularity due to its ability to model virtual environments and predict the consequences of actions. However, the limited temporal context window often leads to failures in maintaining long-term consistency, particularly in preserving 3D spatial consistency. In this work, we present WOrLD-MEM, a framework that enhances scene generation with a memory bank consisting of memory units that store memory frames and states (e.g., poses and timestamps). By employing state-aware memory attention that effectively extracts relevant information from these memory frames based on their states, our method is capable of accurately reconstructing previously observed scenes, even under significant viewpoint or temporal gaps. Furthermore, by incorporating timestamps into the states, our framework not only models a static world but also captures its dynamic evolution over time, enabling both perception and interaction within the simulated world. Extensive experiments in both virtual and real scenarios validate the effectiveness of our approach. Project page at https://xizaoqu.github.io/worldmem.
18
+
19
+ # 1 Introduction
20
+
21
+ World simulation has gained significant attention for its ability to model environments and predict the outcomes of actions (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024). Recent advances in video diffusion models have further propelled this field, enabling high-fidelity rollouts of potential future scenarios based on user actions, such as navigating through an environment or interacting with objects. These capabilities make world simulators particularly promising for applications in autonomous navigation (Feng et al., 2024; Bar et al., 2024) and as viable alternatives to traditional game engines (Decart et al., 2024; Parker-Holder et al., 2024).
22
+
23
+ Despite these advances, a fundamental challenge remains: the limited probing horizon. Due to computational and memory constraints, video generative models operate within a fixed context window and are unable to condition on the full sequence of past generations. Consequently, most existing methods simply discard previously generated content, leading to a critical issue of world inconsistency, which is also revealed in Wang et al. (2025). As illustrated in Figure 1(a), when the camera moves away and returns, the regenerated content diverges from the earlier scene, violating the coherence expected in a consistent world.
24
+
25
+ A natural solution is to maintain an external memory that stores and retrieves relevant historical information outside the generative loop. While intuitive, formulating such a memory mechanism is
26
+
27
+ ![](images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg)
28
+ Figure 1: WORLDMEM enables long-term consistent world generation with an integrated memory mechanism. (a) Previous world generation methods typically face the problem of inconsistent world due to limited temporal context window size. (b) WORLDMEM empowers the agent to explore diverse and consistent worlds with an expansive action space, e.g., crafting environments by placing objects like pumpkin light or freely roaming around. Most importantly, after exploring for a while and glancing back, we find the objects we placed are still there, with the inspiring sight of the light melting the surrounding snow, testifying to the passage of time. Red and green boxes indicate scenes that should be consistent.
29
+
30
+ non-trivial. A direct approach might involve explicit 3D scene reconstruction to preserve geometry and detail. However, 3D representations are inflexible in dynamic and evolving environments and are prone to loss of detail, especially for large, unbounded scenes (Wu et al., 2025a).
31
+
32
+ Instead, we argue that geometry-free representations offer a more flexible solution. These representations, however, pose their own challenges – particularly in balancing detail retention with memory scalability. For example, implicit approaches like storing abstract features via LoRA modules (Hong et al., 2024) offer compactness but lose visual fidelity and spatial specificity. Some recent works represent visual scenes as discrete tokens encoding fine-grained visual information (Sajjadi et al., 2022; Jiang et al., 2025), but they are limited by a fixed token and struggle to capture the complexity of diverse and evolving environments. To address this issue, we observe that for generating the immediate future, only a small subset of historical content is typically relevant. Based on this, we propose a token-level memory bank that stores all previously generated latent tokens, and retrieves a targeted subset for each generation step based on relevance.
33
+
34
+ Conditioning on the retrieved memory requires spatial-temporal reasoning. In contrast to prior work where memory aids local temporal smoothness (Zheng et al., 2024a) or semantic coherence (Wu et al., 2025b; Rahman et al., 2023), long-term world simulation demands reasoning over large spatiotemporal gaps, e.g., memory and query may differ in viewpoint and time, and retain exact scenes with detail. To facilitate this reasoning, we propose augmenting each memory unit with explicit state cues, including spatial location, viewpoint, and timestamp. These cues serve as anchors for reasoning and are embedded as part of the query-key attention mechanism. Through this state-aware attention, our model can effectively reason the current frame with past observations, facilitating accurate and coherent generation. Importantly, such a design leverages standard attention architectures, enabling it to scale naturally with modern hardware and model capacity.
35
+
36
+ Motivated by this idea, we build our approach, WOrLDMEM, on top of the Conditional Diffusion Transformer (CDiT) (Peebles and Xie, 2023) and the Diffusion Forcing (DF) paradigm (Chen et al., 2025), which autoregressively generates first-person viewpoints conditioned on external action signals. As discussed above, at the core of WOrLDMEM is a memory mechanism composed of a memory bank and memory attention. To ensure efficient and relevant memory retrieval from the bank, we introduce a confidence-based selection strategy that scores memory units based on field-of-view
37
+
38
+ (FOV) overlap and temporal proximity. In the memory attention, the latent tokens being generated act as queries, attending to the memory tokens (as keys and values) to incorporate relevant historical context. To ensure robust correspondence across varying viewpoints and time gaps, we enrich both queries and keys with state-aware embeddings. A relative embedding design is introduced to ease the learning of spatial and temporal relationships. This pipeline enables precise, scalable reasoning over long-range memory, ensuring consistency in dynamic and evolving world simulations.
39
+
40
+ We evaluate WOrLDMEM on a customized Minecraft benchmark (Fan et al., 2022) and on RealEstate10K (Zhou et al., 2018). The Minecraft benchmark includes diverse terrains (e.g., plains, savannas, and deserts) and various action modalities (movement, viewpoint control, and event triggers), which is a wonderful environment for idea verification. Extensive experiments show that WOrLDMEM significantly improves 3D spatial consistency, enabling robust viewpoint reasoning and high-fidelity scene generation, as shown in Figure 1(b). Furthermore, in dynamic environments, WOrLDMEM accurately tracks and follows evolving events and environment changes, demonstrating its ability to both perceive and interact with the generated world. We hope our promising results and scalable designs will inspire future research on memory-based world simulation.
41
+
42
+ # 2 Related Work
43
+
44
+ Video diffusion model. With the rapid advancement of diffusion models (Song et al., 2020; Peebles and Xie, 2023; Chen et al., 2025), video generation has made significant strides (Wang et al., 2023a,b; Chen et al., 2023; Guo et al., 2023; OpenAI, 2024; Jin et al., 2024; Yin et al., 2024). The field has evolved from traditional U-Net-based architectures (Wang et al., 2023a; Chen et al., 2023; Guo et al., 2023) to Transformer-based frameworks (OpenAI, 2024; Ma et al., 2024; Zheng et al., 2024b), enabling video diffusion models to generate highly realistic and temporally coherent videos. Recently, autoregressive video generation (Chen et al., 2025; Kim et al., 2024; Henschel et al., 2024) has emerged as a promising approach to extend video length, theoretically indefinitely. Notably, Diffusion Forcing (Chen et al., 2025) introduces a per-frame noise-level denoising paradigm. Unlike the full-sequence paradigm, which applies a uniform noise level across all frames, per-frame noise-level denoising offers a more flexible approach, enabling autoregressive generation.
45
+
46
+ Interactive world simulation. World simulation aims to model an environment by predicting the next state given the current state and action. This concept has been extensively explored in the construction of world models (Ha and Schmidhuber, 2018b) for agent learning (Ha and Schmidhuber, 2018a; Hafner et al., 2019, 2020; Hu et al., 2023; Beattie et al., 2016; Yang et al., 2023). With advances in video generation, high-quality world simulation with robust control has become feasible, leading to numerous works focusing on interactive world simulation (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024; Yu et al., 2025c,a,b). These approaches enable agents to navigate generated environments and interact with them based on external commands.
47
+
48
+ However, due to context window limitations, such methods discard previously generated content, leading to inconsistencies in the simulated world, particularly in maintaining 3D spatial coherence.
49
+
50
+ Consistent world simulation. Ensuring the consistency of a generated world is crucial for effective world simulation Wang et al. (2025). Existing approaches can be broadly categorized into two types: geometric-based and geometric-free. The geometric-based methods explicitly reconstruct the generated world into a 3D/4D representation (Liu et al., 2024; Gao et al., 2024; Wang and Agapito, 2024; Ren et al., 2025; Yu et al., 2024b,a; Liang et al., 2024). While this strategy can reliably maintain consistency, it imposes strict constraints on flexibility: Once the world is reconstructed, modifying or interacting with it becomes challenging. Geometric-free methods focus on implicit learning. Methods like Alonso et al. (2025); Valevski et al. (2024) ensure consistency by overfitting to predefined scenarios (e.g., specific CS:GO or DOOM maps), limiting scalability. StreamingT2V (Henschel et al., 2024) maintains long-term consistency by continuing on both global and local visual contexts from previous frames, while SlowFastGen (Hong et al., 2024) progressively trains LoRA (Hu et al., 2022) modules for memory recall. However, these methods rely on abstract representations, making accurate scene reconstruction challenging. In contrast, our approach retrieves information from previously generated frames and their states, ensuring world consistency without overfitting to specific scenarios.
51
+
52
+ ![](images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg)
53
+
54
+ ![](images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg)
55
+
56
+ ![](images/a94869f851f3a9f0a5887da9940203db5f53e43246aa8bb56a01ceb394a21328.jpg)
57
+ (c) State Embedding
58
+
59
+ ![](images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg)
60
+ (b) Input Difference
61
+ (d) Memory Block
62
+ Figure 2: Comprehensive overview of WOrLDMEM. The framework comprises a conditional diffusion transformer integrated with memory blocks, with a dedicated memory bank storing memory units from previously generated content. By retrieving these memory units from the memory bank and incorporating the information by memory blocks to guide generation, our approach ensures long-term consistency in world simulation.
63
+
64
+ # 3 WORLDMEM
65
+
66
+ This section details the methodology of WOrLDMEM. Sec. 3.1 introduces the relevant preliminaries, while Sec. 3.2 describes the interactive world simulator serving as our baseline. Sec. 3.3 and 3.4 present the core of our proposed memory mechanism.
67
+
68
+ # 3.1 Preliminary
69
+
70
+ Video diffusion models. Video diffusion models generate video sequences by iteratively denoising Gaussian noise through a learned reverse process:
71
+
72
+ $$
73
+ p _ {\theta} \left(\mathbf {x} _ {t} ^ {k - 1} \mid \mathbf {x} _ {t} ^ {k}\right) = \mathcal {N} \left(\mathbf {x} _ {t} ^ {k - 1}; \mu_ {\theta} \left(\mathbf {x} _ {t} ^ {k}, k\right), \sigma_ {k} ^ {2} \mathbf {I}\right), \tag {1}
74
+ $$
75
+
76
+ where all frames $(\mathbf{x}_t^k)_{1\leq t\leq T}$ share the same noise level $k$ and $T$ is the context window length. This full-sequence approach enables global guidance but lacks flexibility in sequence length and autoregressive generation.
77
+
78
+ Autoregressive video generation. Autoregressive video generation aims to extend videos over the long term by predicting frames sequentially (Kondratyuk et al., 2024; Wu et al., 2023). While various methods exist for autoregressive generation, Diffusion Forcing (DF) (Chen et al., 2025) provides a neat and effective approach to achieve this. Specifically, DF introduces per-frame noise levels $k_{t}$ :
79
+
80
+ $$
81
+ p _ {\theta} \left(\mathbf {x} _ {t} ^ {k _ {t} - 1} \mid \mathbf {x} _ {t} ^ {k _ {t}}\right) = \mathcal {N} \left(\mathbf {x} _ {t} ^ {k _ {t} - 1}; \mu_ {\theta} \left(\mathbf {x} _ {t} ^ {k _ {t}}, k _ {t}\right), \sigma_ {k _ {t}} ^ {2} \mathbf {I}\right), \tag {2}
82
+ $$
83
+
84
+ Unlike full-sequence diffusion, DF generates video flexibly and stably beyond the training horizon. Autoregressive generation is a special case when only the last one or a few frames are noisy. With autoregressive video generation, long-term interactive world simulation becomes feasible.
85
+
86
+ # 3.2 Interactive World Simulation
87
+
88
+ Before introducing the memory mechanism, we first present our interactive world simulator, which models long video sequences using an auto-regressive conditional diffusion transformer. Interaction is achieved by embedding external control signals, primarily actions, into the model through dedicated conditioning modules (Parker-Holder et al., 2024; Decart et al., 2024; Yu et al., 2025c).
89
+
90
+ Following prior work (Decart et al., 2024), we adopt a conditional Diffusion Transformer (DiT) (Peebles and Xie, 2023) architecture for video generation, and Diffusion Forecasting (DF) (Chen et al.,
91
+
92
+ 2025) for autoregressive prediction. As shown in Figure 2(a), our model consists of multiple DiT blocks with spatial and temporal modules for spatiotemporal reasoning. The temporal module applies causal attention to ensure that each frame only attends to preceding frames.
93
+
94
+ The actions are injected by first projected into the embedding space using a multi-layer perceptron (MLP). The resulting action embeddings are added to the denoising timestep embeddings and injected into the temporal blocks using Adaptive Layer Normalization (AdaLN) (Xu et al., 2019), following the paradigm of Bar et al. (2024); Decart et al. (2024). In our Minecraft experiments, the action space contains 25 dimensions, including movements, view adjustments, and event triggers. We also apply timestep embeddings to the spatial blocks in the same manner, although this is omitted from the figure for clarity. Standard architectural components such as residual connections, multi-head attention, and feedforward networks are also not shown.
95
+
96
+ The combination of conditional DiT and DF provides a strong baseline for long-term interactive video generation. However, due to the computational cost of video synthesis, the temporal context window remains limited. As a result, content outside this window is forgotten, which leads to inconsistencies during long-term generation (Decart et al., 2024).
97
+
98
+ # 3.3 Memory Representation and Retrieval
99
+
100
+ To address the limited context window of video generative models, we introduce a memory mechanism that enables the model to retain and retrieve information beyond the current generation window. This mechanism maintains a memory bank composed of historical frames and their associated state information: $\{(\mathbf{x}_i^m,\mathbf{p}_i,t_i)\}_{i = 1}^N$ where $\mathbf{x}_i^m$ denotes a memory frame, $\mathbf{p}_i\in \mathbb{R}^5$ (x,y,z, pitch, yaw) is its pose, and $t_i$ is the timestamp. Each tuple is referred to as a memory unit. We save $\mathbf{m}_i$ in token-level, which is compressed by the visual encoder but retains enough details for reconstruction. The corresponding states $\{(\mathbf{p},t)\}$ play a critical role not only in memory retrieval but also in enabling state-aware memory conditioning.
101
+
102
+ # Algorithm 1: Memory Retrieval Algorithm
103
+
104
+ Input: Memory bank of $N$ historical states $\{(\mathbf{x}_i^m,\mathbf{p}_i,t_i)\}_{i = 1}^N;$
105
+
106
+ Current state $(\mathbf{x}_c,\mathbf{p}_c,t_c)$ ; memory condition length $L_{M}$
107
+
108
+ Similarity threshold $tr$ ; weights $w_{o}$ , $w_{t}$ .
109
+
110
+ Output: A list of selected state indices $S$
111
+
112
+ Compute Confidence Score:
113
+
114
+ Compute FOV overlap ratio o via Monte Carlo sampling.
115
+
116
+ Compute time difference $\mathbf{d} = \mathrm{Concat}\big(\{|t_i - t_c|\}_{i = 1}^n\big)$
117
+
118
+ Compute confidence $\alpha = \mathbf{o}\cdot w_{o} - \mathbf{d}\cdot w_{t}$
119
+
120
+ Selection with Similarity Filtering:
121
+
122
+ Initialize $S = \varnothing$
123
+
124
+ for $m = 1$ to $L_{M}$ do
125
+
126
+ Select $i^{*}$ with highest $\alpha_{i^{*}}$
127
+
128
+ Append $i^{*}$ to $S$
129
+
130
+ Remove all $j$ where similarity $(i^{*},j) > tr$
131
+
132
+ return $S$
133
+
134
+ Memory Retrieval. Since the number of memory frames available for conditioning is limited, an efficient strategy is required to sample memory units from the memory bank. We adopt a greedy matching algorithm based on frame-pair similarity, where similarity is defined using the field-of-view (FOV) overlap ratio and timestamp differences as confidence measures. Algorithm 1 presents our approach to memory retrieval. Although simple, this strategy proves effective in retrieving relevant information for conditioning. Moreover, the model's reasoning over memory helps maintain performance even when the retrieved content is imperfect.
135
+
136
+ # 3.4 State-aware Memory Condition
137
+
138
+ After retrieving necessary memory units, unlike prior methods that use memory mainly for temporal smoothness (Zheng et al., 2024a) or semantic guidance (Wu et al., 2025b; Rahman et al., 2023), our goal is to explicitly reconstruct previously seen visual content – even under significant viewpoint or scene changes. This requires the model to perform spatiotemporal reasoning to extract relevant information from memory, which we model using cross-attention (Vaswani et al., 2017). Since relying solely on visual tokens can be ambiguous, we incorporate the corresponding states as cues to enable state-aware attention.
139
+
140
+ State Embedding. State embedding provides essential spatial and temporal context for memory retrieval. To encode spatial information, we adopt Plücker embedding (Sitzmann et al., 2021) to convert 5D poses $\mathbf{p} \in \mathbb{R}^5$ into dense positional features $\mathrm{PE}(\mathbf{p}) \in \mathbb{R}^{h \times w \times 6}$ , following (He et al., 2024; Gao et al., 2024). Temporal context is captured via a lightweight MLP over sinusoidal embedded
141
+
142
+ ![](images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg)
143
+ Figure 3: Qualitative results. We showcase WORLDMEM's capabilities through two sets of examples. Top: A comparison with Ground Truth (GT). WORLDMEM accurately models diverse dynamics (e.g., rain) by conditioning on 600 past frames, ensuring temporal consistency. Bottom: Interaction with the world. Objects like hay in the desert or wheat in the plains persist over time, with wheat visibly growing. For the best experience, see the supplementary videos.
144
+
145
+ $(SE)$ timestamps. The final embedding is (Figure 2 (c)):
146
+
147
+ $$
148
+ \mathbf {E} = G _ {p} (\mathrm {P E} (\mathbf {p})) + G _ {t} (\mathrm {S E} (t)), \tag {3}
149
+ $$
150
+
151
+ where $G_{p}$ and $G_{t}$ are MLPs mapping pose and time into a shared space.
152
+
153
+ State-aware Memory Attention. To support reconstruction under viewpoint and temporal shifts, we introduce a state-aware attention mechanism that incorporates spatial-temporal cues into memory retrieval. By conditioning attention on both visual features and state information, the model achieves more accurate reasoning between input and memory.
154
+
155
+ Let $\mathbf{X}_q\in \mathbb{R}^{l_q\times d}$ denote the flattened feature map of input frames (queries), and $\mathbf{X}_k\in \mathbb{R}^{l_k\times d}$ the concatenated memory features (keys and values). We first enrich both with their corresponding state embeddings $\mathbf{E}_q$ and $\mathbf{E}_k$ :
156
+
157
+ $$
158
+ \tilde {\mathbf {X}} _ {q} = \mathbf {X} _ {q} + \mathbf {E} _ {q}, \quad \tilde {\mathbf {X}} _ {k} = \mathbf {X} _ {k} + \mathbf {E} _ {k}. \tag {4}
159
+ $$
160
+
161
+ Cross-attention is then applied to retrieve relevant memory content and output updated $\mathbf{X}^{\prime}$ :
162
+
163
+ $$
164
+ \mathbf {X} ^ {\prime} = \operatorname {C r o s s A t t n} (Q = p _ {q} (\tilde {\mathbf {X}} _ {q}), K = p _ {k} (\tilde {\mathbf {X}} _ {k}), V = p _ {v} (\mathbf {X} _ {k})), \tag {5}
165
+ $$
166
+
167
+ where $p_q, p_k$ , and $p_v$ are learnable projections.
168
+
169
+ To simplify the reasoning space, we adopt a relative state formulation. For each query frame, the state is set to a zero reference (e.g., the pose is reset to the identity and the timestamp to zero), while the states of key frames are normalized to relative values. This design, illustrated in Figure 2(d), improves alignment under viewpoint changes and simplifies the learning objective.
170
+
171
+ ![](images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg)
172
+ Figure 4: Within context window evaluation. The motion sequence involves turning right and returning to the original position, showing self-contained consistency.
173
+
174
+ Table 1: Evaluation on Minecraft
175
+
176
+ <table><tr><td colspan="4">Within context window</td></tr><tr><td>Methods</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>Full Seq.</td><td>14.35</td><td>0.0691</td><td>13.87</td></tr><tr><td>DF</td><td>20.56</td><td>0.0094</td><td>13.88</td></tr><tr><td>Ours</td><td>21.01</td><td>0.0072</td><td>13.73</td></tr><tr><td colspan="4">Beyond context window</td></tr><tr><td>Methods</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>Full Seq.</td><td>/</td><td>/</td><td>/</td></tr><tr><td>DF</td><td>18.04</td><td>0.4376</td><td>51.28</td></tr><tr><td>Ours</td><td>19.32</td><td>0.1429</td><td>15.37</td></tr></table>
177
+
178
+ ![](images/d1e6c276910048364854297556611dd2a45a1eea429cab3c84376a57362243d5.jpg)
179
+ Figure 5: Beyond context window evaluation. Diffusion-Forcing suffers inconsistency over time, while ours maintains quality and recovers past scenes.
180
+
181
+ Table 2: Ablation on embedding designs
182
+
183
+ <table><tr><td>Pose type</td><td>Embed. type</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>Sparse</td><td>Absolute</td><td>14.67</td><td>0.2887</td><td>39.23</td></tr><tr><td>Dense</td><td>Absolute</td><td>17.63</td><td>0.1830</td><td>29.34</td></tr><tr><td>Dense</td><td>Relative</td><td>19.32</td><td>0.1429</td><td>15.37</td></tr></table>
184
+
185
+ Table 3: Ablation on memory retrieve strategy
186
+
187
+ <table><tr><td>Strategy</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>Random</td><td>12.32</td><td>0.3224</td><td>47.35</td></tr><tr><td>+ Confidence Filter</td><td>17.12</td><td>0.1863</td><td>24.33</td></tr><tr><td>+ Similarity Filter</td><td>19.32</td><td>0.1429</td><td>15.37</td></tr></table>
188
+
189
+ Incorporating memory into pipeline. We incorporate memory frames into the pipeline by treating them as clean inputs during both training and inference. As shown in Figure 2 (a-b), during training, memory frames are assigned the lowest noise level $k_{\mathrm{min}}$ , while context window frames receive independently sampled noise levels from the range $[k_{\mathrm{min}}, k_{\mathrm{max}}]$ . During inference, both memory and context frames are assigned $k_{\mathrm{min}}$ , while the current generating frames are assigned $k_{\mathrm{max}}$ .
190
+
191
+ To restrict memory influence only to memory blocks, we apply a temporal attention mask:
192
+
193
+ $$
194
+ A _ {\text {m a s k}} (i, j) = \left\{ \begin{array}{l l} 1, & i \leq L _ {M} \text {a n d} j = i \\ 1, & i > L _ {M} \text {a n d} j \leq i \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {6}
195
+ $$
196
+
197
+ where $L_{M}$ is the number of memory frames that are appended before frames within the context window. This guarantees causal attention while preventing memory units from affecting each other.
198
+
199
+ # 4 Experiments
200
+
201
+ Datasets. We use MineDojo (Fan et al., 2022) to create diverse training and evaluation datasets in Minecraft, configuring diverse environments (e.g., plains, savannas, ice plains, and deserts), agent actions, and interactions. For real-world scenes, we utilize RealEstate10K (Zhou et al., 2018) with camera pose annotations to evaluate long-term world consistency.
202
+
203
+ Metrics. For quantitative evaluation, we employ reconstruction metrics, where the method of obtaining ground truth (GT) varies by specific settings. We then assess the consistency and quality of the generated videos using PSNR, LPIPS (Zhang et al., 2018), and reconstruction FID (rFID) (Heusel et al., 2017), which collectively measure pixel-level fidelity, perceptual similarity, and overall realism.
204
+
205
+ Experimental details. For our experiments on Minecraft (Fan et al., 2022), we utilize the Oasis (Decart et al., 2024) as the base model. Our model is trained using the Adam optimizer with a fixed
206
+
207
+ ![](images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg)
208
+ Figure 6: Results on RealEstate (Zhou et al., 2018). We visualize loop closure consistency over a full camera rotation. The visual similarity between the first and last frames serves as a qualitative indicator of 3D spatial consistency.
209
+
210
+ ![](images/ddc2586f3345e661f33248b6cc0087c4703090e0712fed894a0128507250e4b5.jpg)
211
+
212
+ Table 4: Evaluation on RealEstate10K
213
+
214
+ <table><tr><td>Methods</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>CameraCtrl (He et al., 2024)</td><td>13.19</td><td>0.3328</td><td>133.81</td></tr><tr><td>TrajAttn (Xiao et al., 2024)</td><td>14.22</td><td>0.3698</td><td>128.36</td></tr><tr><td>Viewcrafter (Yu et al., 2024c)</td><td>21.72</td><td>0.1729</td><td>58.43</td></tr><tr><td>DFoT (Song et al., 2025)</td><td>16.42</td><td>0.2933</td><td>110.34</td></tr><tr><td>Ours</td><td>23.34</td><td>0.1672</td><td>43.14</td></tr></table>
215
+
216
+ learning rate of $2 \times 10^{-5}$ . Training is conducted at a resolution of $640 \times 360$ , where frames are first encoded into a latent space via a VAE at a resolution of $32 \times 18$ , then further patchified to $16 \times 9$ . Our training dataset comprises approximately 12K long videos, each containing 1500 frames, generated from Fan et al. (2022). During training, we employ an 8-frame temporal context window alongside an 8-frame memory window. The model is trained for approximately 500K steps using 4 GPUs, with a batch size of 4 per GPU. For the hyperparameters specified in Algorithm 1 of the main paper, we set the similarity threshold $tr$ to 0.9, $w_{o}$ to 1, and $w_{t}$ to $0.2 / t_{c}$ . For the noise levels in Eq. (5) and Eq. (6), we set $k_{\min}$ to 15 and $k_{\max}$ to 1000.
217
+
218
+ For our experiments on RealEstate10K (Zhou et al., 2018), we adopt DFoT (Song et al., 2025) as the base model. The RealEstate10K dataset provides a training set of approximately 65K short video clips. Training is conducted at a resolution of $256 \times 256$ , with frames patched to $128 \times 128$ . The model is trained for approximately 50K steps using 4 GPUs, with a batch size of 8 per GPU.
219
+
220
+ # 4.1 Results on Generation Benchmark
221
+
222
+ Comparisons on Minecraft Benchmark. We compare our approach with a standard full-sequence (Full Seq.) training method (He et al., 2024; Wang et al., 2024) and Diffusion Forcing (DF) (Chen et al., 2025). The key differences are as follows: the full-sequence conditional diffusion transformer (Peebles and Xie, 2023) maintains the same noise level during training and inference, DF introduces different noise levels for training and inference, and our method incorporates a memory mechanism. To assess both short-term and long-term world consistency, we conduct evaluations within and beyond the context window. We evaluate both settings on 300 test videos. In the following experiments, the agent's poses are generated by the game simulator as ground truth. However, in real-world scenarios, only the action input is available, and the pose is not directly observable. In such cases, the next-frame pose can be predicted based on the previous scenes, past states, and the upcoming action. We explore this design choice in the supplementary material.
223
+
224
+ Within context window. For this experiment, all methods use a context window of 16, while our approach additionally maintains a memory window of 8. We test on customized motion scenarios (e.g., turn left, then turn right or move forward, then backward) to assess self-contained consistency, where the ground truth consists of previously generated frames at the same positions. As shown in Table 1 and Figure 4, the full-sequence baseline suffers from inconsistencies even within its own context window. DF improves consistency by enabling greater information exchange among generated frames. Our memory-based approach achieves the best performance, demonstrating the effectiveness of integrating a dedicated memory mechanism.
225
+
226
+ Table 5: Ablation on sampling strategy for training
227
+
228
+ <table><tr><td>Sampling strategy</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>Small-range</td><td>13.23</td><td>0.3786</td><td>46.55</td></tr><tr><td>Large-range</td><td>15.11</td><td>0.3855</td><td>42.96</td></tr><tr><td>Progressive</td><td>19.32</td><td>0.1429</td><td>15.37</td></tr></table>
229
+
230
+ Beyond context window. In this setting, all methods use a context window of 8 and generate 100 future frames; our method further employs a memory window of 8 while initializing a 600-frame memory bank. We compute the reconstruction error using the subsequent 100 ground truth frames after 600 frames. Full-sequence methods can not roll out that long so we exclude it. DF exhibits poor PSNR and LPIPS scores, indicating severe inconsistency with the ground truth beyond the context window. Additionally, its low rFID suggests notable quality degradation. In contrast, our memory-augmented approach consistently outperforms others across all metrics, demonstrating superior long-term consistency and quality preservation. Figure 5 further substantiates these findings.
231
+
232
+ Figure 3 showcases WORLDMEM's capabilities. The top section demonstrates its ability to operate in a free action space across diverse environments. Given a 600-frame memory bank, our model generates 100 future frames while preserving the ground truth's actions and poses, ensuring strong world consistency. The bottom section highlights dynamic environment interaction. By using timestamps as embeddings, the model remembers environmental changes and captures natural event evolution, such as plant growth over time.
233
+
234
+ Comparisons on Real Scenarios. We compare our method with prior works (He et al., 2024; Xiao et al., 2024; Yu et al., 2024c; Song et al., 2025) on the RealEstate10K dataset (Zhou et al., 2018). We design 5 evaluation trajectories, each starting and ending at the same pose, across 100 scenes. The trajectory lengths range from 37 to 60 frames – exceeding the training lengths of all baselines (maximum 25 frames).
235
+
236
+ CameraCtrl (He et al., 2024), TrajAttn (Xiao et al., 2024), and DFoT (Song et al., 2025) discard past frames and suffer from inconsistency. Viewcrafter (Yu et al., 2024c) incorporates explicit 3D reconstruction, yielding better results, but is constrained by errors in post-processing such as reconstruction and rendering. As shown in Table 4 and Figure 6, our approach achieves superior performance across all metrics. However, the RealEstate dataset inherently limits the full potential of our method, as it consists of short, non-interactive clips with limited temporal complexity. We leave evaluation under more challenging and interactive real-world scenarios for future work.
237
+
238
+ # 4.2 Ablation
239
+
240
+ **Embedding designs.** The design of embeddings within the memory block is crucial for cross-frame relationship modeling. We evaluate three strategies (Table 2): (1) sparse pose embedding with absolute encoding, (2) dense pose embedding with absolute encoding, and (3) dense pose embedding with relative encoding. Results show that dense pose embeddings (Plücker embedding) significantly enhance all metrics, emphasizing the benefits of richer pose representations. Switching from absolute to relative encoding further improves performance, particularly in LPIPS and rFID, by facilitating relationship reasoning and information retrieval. As illustrated in Figure 7, absolute embeddings accumulate errors over time, while relative embeddings maintain stability even beyond 300 frames.
241
+
242
+ Sampling strategy for training. We compare different sampling strategies during training in the Minecraft benchmark. Small-range sampling restricts memory conditioning to frames within $2\mathrm{m}$ in the Minecraft world, while large-range sampling extends this range to $8\mathrm{m}$ . Progressive sampling, on the other hand, begins with small-range samples for initial training steps and then gradually expands to large-range samples.
243
+
244
+ As shown in Table 5, both small-range and large-range sampling struggle with consistency and quality, whereas progressive sampling significantly improves all metrics. This suggests that gradually increasing difficulty during training helps the model learn to reason and effectively query information from memory blocks.
245
+
246
+ Time condition. We ablate the effectiveness of the timestamp condition (for both embedding and retrieval) in Table 6. We curate 100 video samples featuring placing events and evaluate whether future generations align with event progression. As shown in the table, incorporating the time
247
+
248
+ ![](images/c891de3c6e4cfb733593f322c26a2b2245c57e9ed2ba6ef2a8161dce2e9e97c1.jpg)
249
+ Figure 7: Long-term Generation Comparison. This figure presents the PSNR of different ablation methods compared to the ground truth over a 300-frame sequence. The results show that our method without memory blocks or using random memory retrieval exhibits immediate inconsistencies with the ground truth. Additionally, the model lacking relative embeddings begins to degrade significantly beyond 100 frames. In contrast, our full method maintains strong consistency even beyond 300 frames.
250
+
251
+ ![](images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg)
252
+ Figure 8: Results w/o and w/ time condition. Without timestamps, the model fails to differentiate memory units from the same location at different times, causing errors. With time conditioning, it aligns with the updated world state, ensuring consistency.
253
+
254
+ Table 6: Ablation on time condition
255
+
256
+ <table><tr><td>Time condition</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>w/o</td><td>17.17</td><td>0.1989</td><td>23.89</td></tr><tr><td>w/</td><td>19.12</td><td>0.1613</td><td>16.53</td></tr></table>
257
+
258
+ condition significantly improves PSNR and LPIPS, indicating that adding temporal information helps the model faithfully reproduce event changes in world simulation. Since events like plant growth are inherently unpredictable, we do not conduct quantitative evaluations on such cases but instead provide qualitative illustrations in Figure 8.
259
+
260
+ Memory retrieve strategy. We analyze memory retrieval strategies in Table 3. Random sampling from the memory bank leads to poor performance and severe quality degradation, as evidenced by a sharp drop in rFID and rapid divergence from the ground truth (Figure 7). The confidence-based filtering significantly enhances consistency and generation quality. Additionally, we refine retrieval by filtering out redundant memory units based on similarity, further improving all evaluation metrics and demonstrating the effectiveness of our approach.
261
+
262
+ # 5 Limitations and Future works
263
+
264
+ Despite the effectiveness of our approach, certain issues warrant further exploration. First, we cannot guarantee that we can always retrieve all necessary information from the memory bank In some corner cases (e.g., when views are blocked by obstacles), relying solely on view overlap may be insufficient. Second, our current interaction with the environment lacks diversity and realism. In future work, we plan to extend our models to real-world scenarios with more realistic and varied interactions. Lastly, our memory design still entails linearly increasing memory usage, which may impose limitations when handling extremely long sequences.
265
+
266
+ # 6 Conclusion
267
+
268
+ In conclusion, WOrLDMEM tackles the longstanding challenge of maintaining long-term consistency in world simulation by employing a memory bank of past frames and associated states. Its memory attention mechanism enables accurate reconstruction of previously observed scenes, even under large viewpoints or temporal gaps, and effectively models dynamic changes over time. Extensive experiments in both virtual and real settings confirm WOrLDMEM's capacity for robust, immersive world simulation. We hope our work will encourage further research on the design and applications of memory-based world simulators.
269
+
270
+ Acknowledgements. This research is supported by the National Research Foundation, Singapore, under its NRF Fellowship Award <NRF-NRFF16-2024-0003>. This research is also supported by NTU SUG-NAP, as well as cash and in-kind funding from NTU S-Lab and industry partner(s).
271
+
272
+ # References
273
+
274
+ Eloi Alonso, Adam Jelley, Vincent Micheli, Anssi Kanervisto, Amos J Storkey, Tim Pearce, and François Fleuret. Diffusion for world modeling: Visual details matter in atari. Advances in Neural Information Processing Systems, 37:58757-58791, 2025.
275
+ Amir Bar, Gaoyue Zhou, Danny Tran, Trevor Darrell, and Yann LeCun. Navigation world models, 2024.
276
+ Charles Beattie, Joel Z Leibo, Denis Teplyashin, Tom Ward, Marcus Wainwright, Heinrich Kuttler, Andrew Lefrancq, Simon Green, Víctor Valdés, Amir Sadik, et al. Deepmind lab. arXiv preprint arXiv:1612.03801, 2016.
277
+ Boyuan Chen, Diego Martí Monsó, Yilun Du, Max Simchowitz, Russ Tedrake, and Vincent Sitzmann. Diffusion forcing: Next-token prediction meets full-sequence diffusion. Advances in Neural Information Processing Systems, 37:24081-24125, 2025.
278
+ Haoxin Chen, Menghan Xia, Yingqing He, Yong Zhang, Xiaodong Cun, Shaoshu Yang, Jinbo Xing, Yaofang Liu, Qifeng Chen, Xintao Wang, et al. Videocraft1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512, 2023.
279
+ Decart, Julian Quevedo, Quinn McIntyre, Spruce Campbell, Xinlei Chen, and Robert Wachen. Oasis: A universe in a transformer. 2024. Project website.
280
+ Linxi Fan, Guanzhi Wang, Yunfan Jiang, Ajay Mandlekar, Yuncong Yang, Haoyi Zhu, Andrew Tang, DeAn Huang, Yuke Zhu, and Anima Anandkumar. Minedojo: Building open-ended embodied agents with internet-scale knowledge. Advances in Neural Information Processing Systems, 35:18343-18362, 2022.
281
+ Ruili Feng, Han Zhang, Zhantao Yang, Jie Xiao, Zhilei Shu, Zhiheng Liu, Andy Zheng, Yukun Huang, Yu Liu, and Hongyang Zhang. The matrix: Infinite-horizon world generation with real-time moving control. arXiv preprint arXiv:2412.03568, 2024.
282
+ Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. arXiv preprint arXiv:2405.10314, 2024.
283
+ Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023.
284
+ David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018a.
285
+ David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018b.
286
+ Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019.
287
+ Danijar Hafner, Timothy Lillicrap, Mohammad Norouzi, and Jimmy Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020.
288
+ Hao He, Yinghao Xu, Yuwei Guo, Gordon Wetzstein, Bo Dai, Hongsheng Li, and Ceyuan Yang. Cameractrol: Enabling camera control for text-to-video generation. arXiv preprint arXiv:2404.02101, 2024.
289
+ Roberto Henschel, Levon Khachatryan, Daniil Hayrapetyan, Hayk Poghosyan, Vahram Tadevosyan, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Streamingt2v: Consistent, dynamic, and extendable long video generation from text. arXiv preprint arXiv:2403.14773, 2024.
290
+ Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.
291
+ Yining Hong, Beide Liu, Maxine Wu, Yuanhao Zhai, Kai-Wei Chang, Linjie Li, Kevin Lin, Chung-Ching Lin, Jianfeng Wang, Zhengyuan Yang, Ying Nian Wu, and Lijuan Wang Wang. Slowfast-vgen: Slow-fast learning for action-driven long video generation. arXiv preprint arXiv:2410.23277, 2024.
292
+ Anthony Hu, Lloyd Russell, Hudson Yeo, Zak Murez, George Fedoseev, Alex Kendall, Jamie Shotton, and Gianluca Corrado. Gaia-1: A generative world model for autonomous driving. arXiv preprint arXiv:2309.17080, 2023.
293
+
294
+ Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022.
295
+ Hanwen Jiang, Hao Tan, Peng Wang, Haian Jin, Yue Zhao, Sai Bi, Kai Zhang, Fujun Luan, Kalyan Sunkavalli, Qixing Huang, et al. Rayzer: A self-supervised large view synthesis model. arXiv preprint arXiv:2505.00702, 2025.
296
+ Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024.
297
+ Jihwan Kim, Junoh Kang, Jinyoung Choi, and Bohyung Han. FIFO-diffusion: Generating infinite videos from text without training. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.
298
+ Dan Kondratyuk, Lijun Yu, Xiuye Gu, José Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Josh Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A. Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation, 2024.
299
+ Hanwen Liang, Junli Cao, Vidit Goel, Guocheng Qian, Sergei Korolev, Demetri Terzopoulos, Konstantinos N Plataniotis, Sergey Tulyakov, and Jian Ren. Wonderland: Navigating 3d scenes from a single image. arXiv preprint arXiv:2412.12091, 2024.
300
+ Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model. arXiv preprint arXiv:2408.16767, 2024.
301
+ Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Ziwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. Latte: Latent diffusion transformer for video generation. arXiv preprint arXiv:2401.03048, 2024.
302
+ OpenAI. Video generation models as world simulators. https://openai.com/research/video-generation-models-as-world-simulators, 2024.
303
+ Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjeyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Roktaschel. Genie 2: A large-scale foundation world model. 2024.
304
+ William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023.
305
+ Tanzila Rahman, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, Shweta Mahajan, and Leonid Sigal. Make-a-story: Visual memory conditioned consistent story generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2493-2502, 2023.
306
+ Xuanchi Ren, Tianchang Shen, Jiahui Huang, Huan Ling, Yifan Lu, Merlin Nimier-David, Thomas Müller, Alexander Keller, Sanja Fidler, and Jun Gao. Gen3c: 3d-informed world-consistent video generation with precise camera control. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025.
307
+ Mehdi SM Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, et al. Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6229-6238, 2022.
308
+ Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021.
309
+ Kiwhan Song, Boyuan Chen, Max Simchowitz, Yilun Du, Russ Tedrake, and Vincent Sitzmann. History-guided video diffusion. arXiv preprint arXiv:2502.06764, 2025.
310
+
311
+ Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020.
312
+ Dani Valevski, Yaniv Leviathan, Moab Arar, and Shlomi Fruchter. Diffusion models are real-time game engines. arXiv preprint arXiv:2408.14837, 2024.
313
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need, 2017.
314
+ Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024.
315
+ Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023a.
316
+ Jing Wang, Fengzhuo Zhang, Xiaoli Li, Vincent YF Tan, Tianyu Pang, Chao Du, Aixin Sun, and Zhuoran Yang. Error analyses of auto-regressive video diffusion models: A unified framework. arXiv preprint arXiv:2503.10704, 2025.
317
+ Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023b.
318
+ Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. Motionctrl: A unified and flexible motion controller for video generation. In ACM SIGGRAPH 2024 Conference Papers, pages 1-11, 2024.
319
+ Sibo Wu, Congrong Xu, Binbin Huang, Andreas Geiger, and Anpei Chen. Genfusion: Closing the loop between reconstruction and generation via videos. arXiv preprint arXiv:2503.21219, 2025a.
320
+ Tong Wu, Zhihao Fan, Xiao Liu, Yeyun Gong, Yelong Shen, Jian Jiao, Hai-Tao Zheng, Juntao Li, Zhongyu Wei, Jian Guo, Nan Duan, and Weizhu Chen. Ar-diffusion: Auto-regressive diffusion model for text generation, 2023.
321
+ Xindi Wu, Uriel Singer, Zhaojiang Lin, Andrea Madotto, Xide Xia, Yifan Xu, Paul Crook, Xin Luna Dong, and Seungwhan Moon. Corgi: Cached memory guided video generation. In 2025 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 4585-4594. IEEE, 2025b.
322
+ Zeqi Xiao, Wenqi Ouyang, Yifan Zhou, Shuai Yang, Lei Yang, Jianlou Si, and Xingang Pan. Trajectory attention for fine-grained video motion control. arXiv preprint arXiv:2411.19324, 2024.
323
+ Jingjing Xu, Xu Sun, Zhiyuan Zhang, Guangxiang Zhao, and Junyang Lin. Understanding and improving layer normalization. Advances in neural information processing systems, 32, 2019.
324
+ Mengjiao Yang, Yilun Du, Kamyar Ghasemipour, Jonathan Tompson, Dale Schuurmans, and Pieter Abbeel. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 1(2):6, 2023.
325
+ Tianwei Yin, Qiang Zhang, Richard Zhang, William T Freeman, Fredo Durand, Eli Shechtman, and Xun Huang. From slow bidirectional to fast causal video generators. arXiv preprint arXiv:2412.07772, 2024.
326
+ Hong-Xing Yu, Haoyi Duan, Charles Herrmann, William T Freeman, and Jiajun Wu. Wonderworld: Interactive 3d scene generation from a single image. arXiv preprint arXiv:2406.09394, 2024a.
327
+ Hong-Xing Yu, Haoyi Duan, Junhwa Hur, Kyle Sargent, Michael Rubinstein, William T Freeman, Forrester Cole, Deqing Sun, Noah Snavely, Jiajun Wu, et al. Wonderjourney: Going from anywhere to everywhere. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6658-6667, 2024b.
328
+ Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, Kun Gai, Hao Chen, and Xihui Liu. A survey of interactive generative video. arXiv preprint arXiv:2504.21853, 2025a.
329
+ Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Position: Interactive generative video as next-generation game engine. arXiv preprint arXiv:2503.17359, 2025b.
330
+ Jiwen Yu, Yiran Qin, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Gamefactory: Creating new games with generative interactive videos. arXiv preprint arXiv:2501.08325, 2025c.
331
+
332
+ Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024c.
333
+ Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018.
334
+ Longtao Zheng, Yifan Zhang, Hanzhong Guo, Jiachun Pan, Zhenxiong Tan, Jiahao Lu, Chuanxin Tang, Bo An, and Shuicheng Yan. Memo: Memory-guided diffusion for expressive talking video generation. arXiv preprint arXiv:2412.04448, 2024a.
335
+ Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. Open-sora: Democratizing efficient video production for all, 2024b.
336
+ Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images. In SIGGRAPH, 2018.
337
+
338
+ # 7 Supplementary Materials
339
+
340
+ # 7.1 Details and Experiments
341
+
342
+ **Embedding designs.** We present the detailed designs of embeddings for timesteps, actions, poses, and timestamps in Figure 10, where $F, C, H, W, A$ denote the frame number, channel count, height, width, and action count, respectively.
343
+
344
+ The input pose is parameterized by position $(x,z,y)$ and orientation (pitch $\theta$ and yaw $\phi$ ). The extrinsic matrix $\mathbf{T} \in \mathbb{R}^{4 \times 4}$ is formed as:
345
+
346
+ $$
347
+ \mathbf {T} = \left[ \begin{array}{l l} \mathbf {R} _ {c} & \mathbf {c} \\ \mathbf {0} ^ {T} & 1 \end{array} \right], \tag {7}
348
+ $$
349
+
350
+ where $\mathbf{c} = (x,z,y)^T$ and $\mathbf{R}_c = \mathbf{R}_y(\phi)\mathbf{R}_x(\theta)$
351
+
352
+ To encode camera pose, we adopt the Plücker embedding. Given a pixel $(u,v)$ with normalized camera coordinates:
353
+
354
+ $$
355
+ \boldsymbol {\pi} _ {u v} = \mathbf {K} ^ {- 1} [ u, v, 1 ] ^ {T}, \tag {8}
356
+ $$
357
+
358
+ its world direction is:
359
+
360
+ $$
361
+ \mathbf {d} _ {u v} = \mathbf {R} _ {c} \boldsymbol {\pi} _ {u v} + \mathbf {c}. \tag {9}
362
+ $$
363
+
364
+ The Plücker embedding is:
365
+
366
+ $$
367
+ \mathbf {l} _ {u v} = \left(\mathbf {c} \times \mathbf {d} _ {u v}, \mathbf {d} _ {u v}\right) \in \mathbb {R} ^ {6}. \tag {10}
368
+ $$
369
+
370
+ For a frame of size $H \times W$ , the full embedding is:
371
+
372
+ $$
373
+ \mathbf {L} _ {i} \in \mathbb {R} ^ {H \times W \times 6}. \tag {11}
374
+ $$
375
+
376
+ Memory context length. We evaluate how different memory context lengths affect performance in the Minecraft benchmark. Table 7 shows that increasing the context length from 1 to 8 steadily boosts PSNR, lowers LPIPS, and reduces rFID. However, extending the length to 16 deteriorates results, indicating that excessive memory frames may introduce noise or reduce retrieval precision. A context length of 8 provides the best trade-off, yielding the highest PSNR and the lowest LPIPS and rFID.
377
+
378
+ Pose prediction. For interactive play, ground truth poses are not accessible. To address this, we designed a lightweight pose prediction module that estimates the pose of the next frame. As illustrated in Figure 9, the predictor takes the previous image, the previous pose, and the upcoming action as inputs and outputs the predicted next pose. This module enables the system to operate using actions alone, eliminating the need for ground truth poses during inference. In Table 8, we compare the performance of using predicted poses versus ground truth poses. While using ground truth poses yields better results across all metrics, the performance drop with predicted poses is acceptable. This is because our method does not rely heavily on precise pose predictions – new frames are generated based on these predictions – and the ground truth poses generated by the Minecraft simulator also contain a certain degree of randomness.
379
+
380
+ Table 7: Ablation on length of memory context length
381
+
382
+ <table><tr><td>Length</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>1</td><td>16.18</td><td>0.1899</td><td>20.47</td></tr><tr><td>4</td><td>18.68</td><td>0.1568</td><td>16.54</td></tr><tr><td>8</td><td>19.32</td><td>0.1429</td><td>15.37</td></tr><tr><td>16</td><td>17.14</td><td>0.1687</td><td>18.33</td></tr></table>
383
+
384
+ Table 8: Comparison between using predicted poses and ground truth poses
385
+
386
+ <table><tr><td>Pose Type</td><td>PSNR ↑</td><td>LPIPS ↓</td><td>rFID ↓</td></tr><tr><td>Ground truth</td><td>19.32</td><td>0.1429</td><td>15.37</td></tr><tr><td>Predicted</td><td>17.13</td><td>0.1786</td><td>20.36</td></tr></table>
387
+
388
+ ![](images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg)
389
+ Figure 9: Structure of pose predictor.
390
+
391
+ ![](images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg)
392
+ (a) Timestep embedding
393
+ (b) Action embedding
394
+
395
+ ![](images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg)
396
+ (c) Pose embedding
397
+ (d) Timestamp embedding
398
+ Figure 10: Illustration of different embeddings.
399
+
400
+ # 7.2 Memory Usage and Scalability Analysis
401
+
402
+ To assess the scalability and practical feasibility of our method, we provide detailed quantitative analysis covering memory usage, generation duration, training cost, and inference efficiency.
403
+
404
+ Memory Usage of the Memory Bank. The memory bank is lightweight. Storing 600 visual memory tokens with shape [600, 16, 18, 32] in float32 takes approximately 21MB.
405
+
406
+ Retrieval Latency. Below we report the average retrieval time (for 8 memory frames) as a function of memory bank size:
407
+
408
+ ![](images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg)
409
+ Figure 11: Two-view FOV overlapping visualization.
410
+
411
+ <table><tr><td>Number of Memory Candidates</td><td>Retrieval Time (s)</td></tr><tr><td>10</td><td>0.04</td></tr><tr><td>100</td><td>0.06</td></tr><tr><td>600</td><td>0.10</td></tr><tr><td>1000</td><td>0.16</td></tr></table>
412
+
413
+ The generation cost (20 denoising steps) is $\sim 0.9$ s per frame. Retrieval time accounts for only $10 - 20\%$ of total inference time even with 1000 candidates.
414
+
415
+ Comparison with Baseline. We compare our method with a baseline model (without memory), under consistent settings: 8 context frames, 8 memory frames, 20 denoising steps, and no acceleration techniques, on single H200.
416
+
417
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Training</td><td colspan="2">Inference</td></tr><tr><td>Mem. Usage</td><td>Speed (it/s)</td><td>Mem. Usage</td><td>Speed (it/s)</td></tr><tr><td>w/o Memory</td><td>33 GB</td><td>3.19</td><td>9 GB</td><td>1.03</td></tr><tr><td>with Memory</td><td>51 GB</td><td>1.76</td><td>11 GB</td><td>0.89</td></tr></table>
418
+
419
+ Adding memory introduces moderate training overhead. During inference, the impact is minimal: only a small increase in memory usage and a slight decrease in speed.
420
+
421
+ Inference Optimization. With modern acceleration techniques (e.g., timestep distillation, early exit, sparse attention), inference speed can reach $\sim 10$ FPS, making our method practical for deployment.
422
+
423
+ FOV Overlapping Computation. We present the details of Monte Carlo-based FOV overlapping computation in Alg. 11, as well as the two-view overlapping sampling in Figure 11.
424
+
425
+ # 7.3 Visualizations
426
+
427
+ In this section, we provide more visualization of different aspects to facilitate understanding.
428
+
429
+ Minecraft Training Examples. We present a diverse set of training environments that include various terrain types, action spaces, and weather conditions, as shown in Figure 12. These variations help enhance the model's adaptability and robustness in different scenarios.
430
+
431
+ Trajectory Examples in Minecraft. Figure 13 illustrates trajectory examples in the x-z space over 100 frames. The agent's movement exhibits a random action pattern, ensuring diverse learning objectives and a broad range of sampled experiences.
432
+
433
+ Pose Distribution. We collect and visualize 800 samples within a sampling range of 8, as shown in Figure 14. The random pattern observed in Figure 14 ensures a diverse distribution of sampled poses in space, which is beneficial for learning the reasoning process within the memory blocks.
434
+
435
+ # Algorithm 2: Monte Carlo-based FOV Overlap Computation (Notationally Disjoint)
436
+
437
+ # Input:
438
+
439
+ - $Q_{\mathrm{ref}} \in \mathbb{R}^{F \times 5}$ : reference poses from memory bank (x,y,z,pitch,yaw), $F$ is the number of stored poses.
440
+ - $Q_{\mathrm{tgt}} \in \mathbb{R}^5$ : pose of the current (target) frame.
441
+ - $M$ : number of 3D sample points (default 10,000).
442
+ - $R$ : radius of the sampling sphere (default $30\mathrm{m}$ ).
443
+ - $\phi_h$ , $\phi_v$ : horizontal/vertical field-of-view angles (in degrees).
444
+
445
+ # Output:
446
+
447
+ - $\rho \in \mathbb{R}^F$ : overlapping ratios between each reference pose and the target pose.
448
+
449
+ # begin
450
+
451
+ # $\triangle$ Step 1: Random Sampling in a Sphere
452
+
453
+ Generate $M$ points $\mathbf{q}$ uniformly in a 3D sphere of radius $R$ :
454
+
455
+ $$
456
+ \mathbf {q} \leftarrow \text {P o i n t S a m p l i n g} (M, R).
457
+ $$
458
+
459
+ # $\Delta$ Step 2: Translate Points to $Q_{\mathrm{tgt}}$ as Center
460
+
461
+ Let $Q_{\mathrm{tgt}}(x,y,z)$ be the 3D coordinates of the current camera pose. Shift all sampled points:
462
+
463
+ $$
464
+ \mathbf {q} \leftarrow \mathbf {q} + Q _ {\mathrm {t g t}} (x, y, z).
465
+ $$
466
+
467
+ # $\Delta$ Step 3: FOV Checks
468
+
469
+ Compute a boolean matrix $\mathbf{v}_{\mathrm{ref}} \in \{0,1\}^{F \times M}$ , where each entry indicates if a point in $\mathbf{q}$ lies in the FOV of a reference pose:
470
+
471
+ $$
472
+ \mathbf {v} _ {\mathrm {r e f}} \leftarrow \operatorname {I s I n s i d e F O V} \big (\mathbf {q}, Q _ {\mathrm {r e f}}, \phi_ {h}, \phi_ {v} \big).
473
+ $$
474
+
475
+ Similarly, compute a boolean vector $\mathbf{v}_{\mathrm{tg}} \in \{0,1\}^{M}$ for the target pose:
476
+
477
+ $$
478
+ \mathbf {v} _ {\mathrm {t g t}} \leftarrow \operatorname {I s I n s i d e F O V} \big (\mathbf {q}, Q _ {\mathrm {t g t}}, \phi_ {h}, \phi_ {v} \big).
479
+ $$
480
+
481
+ # $\Delta$ Step 4: Overlapping Ratio Computation
482
+
483
+ Obtain the final overlapping ratio vector $\pmb {\rho}\in \mathbb{R}^{F}$ by combining $\mathbf{v}_{\mathrm{ref}}$ and $\mathbf{v}_{\mathrm{tgt}}$ . For instance,
484
+
485
+ $$
486
+ \boldsymbol {\rho} [ i ] = \frac {1}{M} \sum_ {j = 1} ^ {M} \left(\mathbf {v} _ {\mathrm {r e f}} [ i, j ] \cdot \mathbf {v} _ {\mathrm {t g t}} [ j ]\right),
487
+ $$
488
+
489
+ to measure the fraction of sampled points that are visible in both the $i$ -th reference pose and the target pose.
490
+
491
+ Return $\rho$
492
+
493
+ # end
494
+
495
+ More Qualitative Results. For additional qualitative examples, we recommend consulting the attached web page, which offers enhanced visualizations.
496
+
497
+ ![](images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg)
498
+
499
+ ![](images/9e7db2f2124ac20efa2edca5ec9e00b0b6d99dcd36451b763e6533c66aad42f2.jpg)
500
+
501
+ ![](images/9e73712af021410a3cc6091b7f271192d0761ef5e0bbac919792a0ed2bc5e942.jpg)
502
+
503
+ ![](images/b4112e672e093bd908e0d8b47f478e0720181eb791bfc4170bf71a494e2cad04.jpg)
504
+
505
+ ![](images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg)
506
+
507
+ ![](images/b2fb6711c620d4ecd7ae763026c5f9b183d24737649a7f6c09253b2753625d9a.jpg)
508
+
509
+ ![](images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg)
510
+
511
+ ![](images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg)
512
+
513
+ ![](images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg)
514
+
515
+ ![](images/e45872d86a46e6fd48f3db8a029253a71ef4ec766b935af977cd99b1b0592ac2.jpg)
516
+
517
+ ![](images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg)
518
+
519
+ ![](images/c559b904ce54ed8b960ef64d960db2b2626c58caca3d6c512ff9ee87f3d438a7.jpg)
520
+
521
+ ![](images/e7a0d754531749230bbd4a9a05832bffb34d64393251380876fd18cad297f056.jpg)
522
+
523
+ ![](images/e21770768b7f5933c8a6579cb05bb882d2db0cd3a528b963e76b28e85fc3f88e.jpg)
524
+
525
+ ![](images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg)
526
+
527
+ ![](images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg)
528
+
529
+ ![](images/d885c7d407ce44ff8633d17891065c18fb770ac9a2dbdb8904c89b0abb280874.jpg)
530
+ Figure 12: Training Examples. Our training environments encompass diverse terrains, action spaces, and weather conditions, providing a comprehensive setting for learning.
531
+
532
+ ![](images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg)
533
+
534
+ ![](images/ef99e53069789de1c72b40dec9daf83482c8b1d58b900b04b7b673a7536cdbeb.jpg)
535
+
536
+ ![](images/b7b3bfbd5ce5428451351fb16ec099c6e94af83843c985d33a1808711993472e.jpg)
537
+
538
+ ![](images/f69aaeecb41bae0b361ddaf6325948bcdd310d6207adc846da6a8605dba8f003.jpg)
539
+ Figure 13: Visualization of Trajectory Examples in the X-Z Space. The axis scales represent distances within the Minecraft environment.
540
+
541
+ ![](images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg)
542
+ Figure 14: Visualization of Relative Pose Distribution for Training in X-Z Space. Red dots indicate positions, while yellow arrows represent directions.
data/2025/2504_12xxx/2504.12369/images/051a668f07afe27adca49a42fba69f683663d42d43addef7ff5276c78d55d7e8.jpg ADDED

Git LFS Details

  • SHA256: 1a0c2c12db978c7fe6259daf1fb06fb0b7fe21d55f9197cd26ac6f3d11d1ff60
  • Pointer size: 129 Bytes
  • Size of remote file: 8.86 kB
data/2025/2504_12xxx/2504.12369/images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg ADDED

Git LFS Details

  • SHA256: 50e567e8cacd4fbd30cb79a6714d48bb410c685b85e6a75b8d1cedc387228763
  • Pointer size: 130 Bytes
  • Size of remote file: 31.8 kB
data/2025/2504_12xxx/2504.12369/images/128fabee19e5abaad9587da0de6cd970dc5cc8944b3b1196aad6e7166dc04fe7.jpg ADDED

Git LFS Details

  • SHA256: 5f9d4125cf80216d443167862bbade98c02d37a71d89ca93e697a24e5c281f2d
  • Pointer size: 129 Bytes
  • Size of remote file: 6.21 kB
data/2025/2504_12xxx/2504.12369/images/137374daba42cc29ee3827d4a155e71a28fcefdc5271bea03e2c5223a0b3ef72.jpg ADDED

Git LFS Details

  • SHA256: 3ae4a1ebe05ce4a35864f10b3ba43eaed93a41f8cf834f0aafefd1de4b1fa7d2
  • Pointer size: 129 Bytes
  • Size of remote file: 3.04 kB
data/2025/2504_12xxx/2504.12369/images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg ADDED

Git LFS Details

  • SHA256: f33cbd87baa12cbaf0c508a823fb9fc3da9e436438bbfe06e013403610ccb4a6
  • Pointer size: 129 Bytes
  • Size of remote file: 8.7 kB
data/2025/2504_12xxx/2504.12369/images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg ADDED

Git LFS Details

  • SHA256: 7693c1438099cf1195537b933db6b138f6a4323bffdcf7228d8b6bba9aad8f7e
  • Pointer size: 129 Bytes
  • Size of remote file: 7.7 kB
data/2025/2504_12xxx/2504.12369/images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg ADDED

Git LFS Details

  • SHA256: a897aa5a34c11dc1490a16b46518663829277a81239528203f63dc3769a9cc61
  • Pointer size: 130 Bytes
  • Size of remote file: 29.6 kB
data/2025/2504_12xxx/2504.12369/images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg ADDED

Git LFS Details

  • SHA256: 3dc276530879c82f290c3919bc5ba3255f197267e1c5e556096e8ecf93e7af15
  • Pointer size: 131 Bytes
  • Size of remote file: 101 kB
data/2025/2504_12xxx/2504.12369/images/30d17f42dbaa4ca8d8b12815ea604146efbf347f4ae14367a292fdb24ea2af4b.jpg ADDED

Git LFS Details

  • SHA256: ccc0636b1421424a750e8e51ca7d7f6fb51a2d1b4cd9f625129c4e26cb8d7eeb
  • Pointer size: 130 Bytes
  • Size of remote file: 14.5 kB
data/2025/2504_12xxx/2504.12369/images/311186cc1d836831fefdca576808fd26c822e109d2ddfda303da4ad7c48f137b.jpg ADDED

Git LFS Details

  • SHA256: 9137de795e46f0723457386843821292b594d0d948f8783697b531d31f7a8ba9
  • Pointer size: 130 Bytes
  • Size of remote file: 16.9 kB
data/2025/2504_12xxx/2504.12369/images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg ADDED

Git LFS Details

  • SHA256: 6d4beaca4b34ba16af96423f8a2f335a70d3fd5de4fb6c20ab4b1b2ef17c79e0
  • Pointer size: 130 Bytes
  • Size of remote file: 19 kB
data/2025/2504_12xxx/2504.12369/images/3549a7c6280ea8f465ef040bf31d25e38f052420b7d8d952435c0d665c12cb43.jpg ADDED

Git LFS Details

  • SHA256: a7d0c2bb16c291f89d977f7c5d7cd5a9e554700b2ec538087e4eda9550104c5c
  • Pointer size: 129 Bytes
  • Size of remote file: 7.63 kB
data/2025/2504_12xxx/2504.12369/images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg ADDED

Git LFS Details

  • SHA256: ec51c7196a374f0dd3e6300c70c0e5c2ec4f1a263d0969bd1de449a2959e2b64
  • Pointer size: 129 Bytes
  • Size of remote file: 6.82 kB
data/2025/2504_12xxx/2504.12369/images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg ADDED

Git LFS Details

  • SHA256: 0e84bc4662f46f6e6117d8f51ebad162364aa9b24ea397a5d53d11760dbf3f82
  • Pointer size: 130 Bytes
  • Size of remote file: 15.1 kB
data/2025/2504_12xxx/2504.12369/images/4cd14cfef9f9a857f1658afa482563f9f9aae5ff0fde6e994ca27c3ce2daf2f1.jpg ADDED

Git LFS Details

  • SHA256: 03d6a781be4120784a21a88111427afef66deba3a5d8a2eda4ea38c8d4cb6b5f
  • Pointer size: 129 Bytes
  • Size of remote file: 5.67 kB
data/2025/2504_12xxx/2504.12369/images/5019318d9bedd2a41eeab53f93ae9a8dc4075660cc387a33e5ac9e4fd4af8336.jpg ADDED

Git LFS Details

  • SHA256: 2e5ed5daadc5f4236edc97a7e9b9e66b482a926419275b922321b21addea5863
  • Pointer size: 129 Bytes
  • Size of remote file: 3.02 kB
data/2025/2504_12xxx/2504.12369/images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg ADDED

Git LFS Details

  • SHA256: 8c6c8b332507437475cbf3728510516ab5a50ab2054ffe0e0085d842daedd8bf
  • Pointer size: 130 Bytes
  • Size of remote file: 38.1 kB
data/2025/2504_12xxx/2504.12369/images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg ADDED

Git LFS Details

  • SHA256: fa0c3b9bb417311acd5c45d37778f5ab4eb154ed8b951400a0eefd90f553774c
  • Pointer size: 130 Bytes
  • Size of remote file: 26.6 kB
data/2025/2504_12xxx/2504.12369/images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg ADDED

Git LFS Details

  • SHA256: 09282f2a556fa76c518e3ccb1767af81b5d9881432e8f7daa9fd04b145e212c5
  • Pointer size: 129 Bytes
  • Size of remote file: 6.7 kB
data/2025/2504_12xxx/2504.12369/images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg ADDED

Git LFS Details

  • SHA256: ab25509ad271d317dff35776a5b4903af8d69690659790a05756d13843041f4c
  • Pointer size: 130 Bytes
  • Size of remote file: 84.7 kB
data/2025/2504_12xxx/2504.12369/images/5be7f40ca0170cf0931349adf148b2063a4d2a61d782d7c6200d3dc6a412a8d7.jpg ADDED

Git LFS Details

  • SHA256: 5aef0cf9e74a006f4c2b02479cd4b683cbc49aac31d6b709d049f4ed0ca8c942
  • Pointer size: 130 Bytes
  • Size of remote file: 19.3 kB
data/2025/2504_12xxx/2504.12369/images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg ADDED

Git LFS Details

  • SHA256: 9b37bbcf881b1a690fc4cc69c699aa4d2fbfdec3324a4f2066c9b14729909e77
  • Pointer size: 129 Bytes
  • Size of remote file: 7.64 kB
data/2025/2504_12xxx/2504.12369/images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg ADDED

Git LFS Details

  • SHA256: 35318cb395cea1ec0b7f5d8d9c5f77993cd58c9a87b3271472f565e10f4299c5
  • Pointer size: 130 Bytes
  • Size of remote file: 18.8 kB
data/2025/2504_12xxx/2504.12369/images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg ADDED

Git LFS Details

  • SHA256: cec7f0cc68b6a1ea3ed79b8606ec2375c3e600fd0dfcdb7e4b07ae7a9f056b7a
  • Pointer size: 129 Bytes
  • Size of remote file: 5.51 kB
data/2025/2504_12xxx/2504.12369/images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg ADDED

Git LFS Details

  • SHA256: f7ba67d86d7c9c2af55b110dd0cc63b71e7faa2b26bee6fda1abf8517aa41fb5
  • Pointer size: 129 Bytes
  • Size of remote file: 6.14 kB
data/2025/2504_12xxx/2504.12369/images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg ADDED

Git LFS Details

  • SHA256: fd7a7b36af64f8faff66ba4e75949109059ecc80b5490eaff3db19c6f2c92a8b
  • Pointer size: 131 Bytes
  • Size of remote file: 152 kB
data/2025/2504_12xxx/2504.12369/images/7e60e4ac83e5a8851f5a56840f7cc4b18e041198f40aa3ce69ed935d26ae78dc.jpg ADDED

Git LFS Details

  • SHA256: daa5dd967c555c0f9798a9637b657cb7486070c1ed776c3beb9cdc7dc1bd68ed
  • Pointer size: 129 Bytes
  • Size of remote file: 3.44 kB
data/2025/2504_12xxx/2504.12369/images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg ADDED

Git LFS Details

  • SHA256: 5c36da15a87e3b0a5ce7ede66fbdabd1cca5c81ccca8b568bb3dd4a4ee0ac8aa
  • Pointer size: 130 Bytes
  • Size of remote file: 33 kB
data/2025/2504_12xxx/2504.12369/images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg ADDED

Git LFS Details

  • SHA256: 64f846f47614c7dcf0c08f455255a01c8349d592d3b25b697eb7880e2475883c
  • Pointer size: 129 Bytes
  • Size of remote file: 6.91 kB
data/2025/2504_12xxx/2504.12369/images/8316f3b67686e102155ac6518c8ee82688e923ff0be476e4fe46781c9090d2df.jpg ADDED

Git LFS Details

  • SHA256: c72ef81a53de86528c9f2ce01feec458dcfeea52ec8836e5e99594357f4a429d
  • Pointer size: 129 Bytes
  • Size of remote file: 6.76 kB
data/2025/2504_12xxx/2504.12369/images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg ADDED

Git LFS Details

  • SHA256: daaa04c012557f94a745174bdc1a2a35090b80cbc357686cb2e990b2ec86b140
  • Pointer size: 130 Bytes
  • Size of remote file: 23.3 kB
data/2025/2504_12xxx/2504.12369/images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg ADDED

Git LFS Details

  • SHA256: ef8a272891c02ec4a72d5a3460fd3fca46f9e919d89f319f1d2918d4fcf94aa3
  • Pointer size: 129 Bytes
  • Size of remote file: 8.31 kB
data/2025/2504_12xxx/2504.12369/images/9491f18c075d1aadae6c839aeb0789004bda41f81162625333e60688614b9348.jpg ADDED

Git LFS Details

  • SHA256: efd21122884a9d6aa043be1403f3e12e1833caa2f2f0a452364465d5857e1e2e
  • Pointer size: 130 Bytes
  • Size of remote file: 25 kB
data/2025/2504_12xxx/2504.12369/images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg ADDED

Git LFS Details

  • SHA256: 58f92b66cdaa19ed6fb7a4ebf2863f6886d0c2f40d1841f66a9365d3bd6b6da4
  • Pointer size: 129 Bytes
  • Size of remote file: 8.21 kB
data/2025/2504_12xxx/2504.12369/images/9e63e34502d19e1a9660587fe1b448ca4cbb22eb6399eea90032701c88909b36.jpg ADDED

Git LFS Details

  • SHA256: dd2e4cf000120019903bebc56bc040875a1411eca88ffe73518c06ca031c0a1c
  • Pointer size: 130 Bytes
  • Size of remote file: 21.9 kB