erata commited on
Commit
24a71c4
·
verified ·
1 Parent(s): f08015c

Upload folder using huggingface_hub

Browse files
Files changed (40) hide show
  1. .gitattributes +1 -0
  2. README.md +104 -3
  3. added_tokens.json +28 -0
  4. chat_template.jinja +89 -0
  5. global_step440/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  6. global_step440/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  7. global_step440/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  8. global_step440/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  9. global_step440/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  10. global_step440/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
  11. global_step440/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
  12. global_step440/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
  13. global_step440/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
  14. global_step440/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
  15. global_step440/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
  16. global_step440/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
  17. global_step440/zero_pp_rank_4_mp_rank_00_model_states.pt +3 -0
  18. global_step440/zero_pp_rank_5_mp_rank_00_model_states.pt +3 -0
  19. global_step440/zero_pp_rank_6_mp_rank_00_model_states.pt +3 -0
  20. global_step440/zero_pp_rank_7_mp_rank_00_model_states.pt +3 -0
  21. latest +1 -0
  22. merges.txt +0 -0
  23. pytorch_model.bin +3 -0
  24. rng_state_0.pth +3 -0
  25. rng_state_1.pth +3 -0
  26. rng_state_2.pth +3 -0
  27. rng_state_3.pth +3 -0
  28. rng_state_4.pth +3 -0
  29. rng_state_5.pth +3 -0
  30. rng_state_6.pth +3 -0
  31. rng_state_7.pth +3 -0
  32. scheduler.pt +3 -0
  33. sft_qwen_var_classifier.py +725 -0
  34. special_tokens_map.json +31 -0
  35. tokenizer.json +3 -0
  36. tokenizer_config.json +239 -0
  37. trainer_state.json +1134 -0
  38. training_args.bin +3 -0
  39. vocab.json +0 -0
  40. zero_to_fp32.py +760 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,104 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: Qwen/Qwen3-4B
4
+ tags:
5
+ - SAT
6
+ - combinatorial-optimization
7
+ - classification
8
+ - cube-and-conquer
9
+ language:
10
+ - en
11
+ pipeline_tag: text-classification
12
+ ---
13
+
14
+ # Qwen3-4B-SAT-VarSelector
15
+
16
+ A Qwen3-4B model fine-tuned for **SAT branching variable selection** in Cube-and-Conquer (CnC) solvers.
17
+
18
+ ## Model Description
19
+
20
+ This model predicts which variable to branch/cube on next, given a SAT CNF formula state. Instead of generating text, it outputs a **classification over variable IDs** (1-500).
21
+
22
+ ### Architecture
23
+
24
+ - **Base**: `Qwen/Qwen3-4B` (causal language model)
25
+ - **Head**: LayerNorm → Linear(hidden_size, 501)
26
+ - **Pooling**: Last non-pad token hidden state
27
+ - **Masking**: Invalid variables (not in CNF) are masked to -10000 before softmax
28
+
29
+ ### Training
30
+
31
+ - **Dataset**: 3,898 training / 434 validation samples
32
+ - **Task**: Predict expert-selected branching variable
33
+ - **Best validation accuracy**: 16.36% (16x better than random ~1%)
34
+ - **Training**: 8 epochs, 8×H100 GPUs, DeepSpeed ZeRO-3
35
+
36
+ ## Usage
37
+
38
+ ```python
39
+ import torch
40
+ from transformers import AutoTokenizer
41
+ from sft_qwen_var_classifier import QwenVarClassifier, cnf_valid_mask
42
+
43
+ # Load model
44
+ model = QwenVarClassifier("Qwen/Qwen3-4B", max_vars=500)
45
+ state_dict = torch.load("pytorch_model.bin", map_location="cpu")
46
+ model.load_state_dict(state_dict, strict=False)
47
+ model = model.to("cuda", dtype=torch.bfloat16)
48
+ model.eval()
49
+
50
+ # Load tokenizer
51
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B")
52
+
53
+ # Prepare CNF input
54
+ cnf_text = """p cnf 100 250
55
+ 1 -2 3 0
56
+ -1 2 -4 0
57
+ ...
58
+ """
59
+
60
+ # Tokenize
61
+ inputs = tokenizer(cnf_text, return_tensors="pt", truncation=True, max_length=8192)
62
+ inputs = {k: v.to("cuda") for k, v in inputs.items()}
63
+
64
+ # Get valid variable mask
65
+ valid_mask = torch.tensor([cnf_valid_mask(cnf_text, max_vars=500)], dtype=torch.bool, device="cuda")
66
+
67
+ # Predict
68
+ with torch.no_grad():
69
+ outputs = model(**inputs)
70
+ logits = outputs["logits"]
71
+ logits = logits.masked_fill(~valid_mask, -1e4)
72
+ predicted_var = logits.argmax(dim=-1).item()
73
+
74
+ print(f"Predicted branching variable: {predicted_var}")
75
+ ```
76
+
77
+ ## Files
78
+
79
+ - `pytorch_model.bin` - Model weights (8GB, bfloat16)
80
+ - `sft_qwen_var_classifier.py` - Model class definition (required for loading)
81
+ - `inference_demo.py` - Example inference script
82
+
83
+ ## Metrics
84
+
85
+ | Metric | Value |
86
+ |--------|-------|
87
+ | Validation Accuracy | 16.36% |
88
+ | Validation Loss | 3.87 |
89
+ | Random Baseline | ~1% |
90
+ | Improvement | 16x |
91
+
92
+ ## Limitations
93
+
94
+ - Maximum 500 variables
95
+ - Maximum 8192 tokens for CNF input
96
+ - Trained on specific CNF distribution (may not generalize to all SAT instances)
97
+
98
+ ## Citation
99
+
100
+ If you use this model, please cite the Transformer-CnC paper.
101
+
102
+ ## License
103
+
104
+ Apache 2.0
added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
global_step440/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02c35b3103bdd6d1e454a19691d8eaa8fe9d5760adf760caf0c7b111bebc046f
3
+ size 6035639921
global_step440/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08ecf013ea2d99840f2ac2171762ff5d925629b1a54a72d6f69a6e1976a48ac6
3
+ size 6035639921
global_step440/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df72b0c2e5a1057f7656aab1e15b633f6ba4fd14e8b1bfe49947308053835ffb
3
+ size 6035639921
global_step440/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10681e851bf7b379ebc47c322a89f47598eb8b4b88bfbf061ed84349370c960f
3
+ size 6035639921
global_step440/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900494879db0429dcef1ac17e5c620440428b4f02e657d69ef2d49dc283a7ef7
3
+ size 6035639921
global_step440/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f295bc241d107b583819043ddb9ac92ec6ae2cf3181677a8c8ec9c1d951ba03
3
+ size 6035639921
global_step440/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f01e8b4d005cb73765ada0b2c05ef68385a047935475e525894c6f535fb7bfe8
3
+ size 6035639921
global_step440/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1317e2cd5882643804b4609614c2d4498e5b2952a88608e30bb6e5a80d2c1c07
3
+ size 6035639921
global_step440/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:353e7e3d1d067b622a0246db3dc5f49165cb23b76abfb91cfacaed1a1c554c70
3
+ size 216003
global_step440/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:259e52087ae56a56eaf06fa1e56f514f93ab8feb6ea5501f126a6ad131fc87e6
3
+ size 215939
global_step440/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e6511c46b51ed3707d94cca1421492c1e3f429051ad1937152778bb2c9c8d54
3
+ size 215939
global_step440/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f587dc756d715f1c9ef01ae8b14fce137d79f6103d6dfaeca0bb3004d258ad5
3
+ size 215939
global_step440/zero_pp_rank_4_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b47f5abc3eb74ac6a7d891247057ba5c3e10fbb5ba6e33905a2c9daceb9bf3f
3
+ size 215939
global_step440/zero_pp_rank_5_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a63250d254adccb7e34c9f03fc987e82578ebca9586766e21aaee83d4fae2689
3
+ size 215939
global_step440/zero_pp_rank_6_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68f54dd9c3a04f587e0fa420203c3b1a7aa3fd58839d27d6082dfc2e9a7ec383
3
+ size 215939
global_step440/zero_pp_rank_7_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a88bfe0b75e6f50a1cb2e2c757056131e89e191b2e54331588a4014b3298624
3
+ size 215939
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step440
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14b8fd8115b08b3d8f4b6e333aa99d9219f5365f3f970cfc32f58b1cde5c7c7d
3
+ size 8047648831
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70dd215f2dedfe314a275fb9922dc22825e4717fbfa21a1ac2f8837ee16ab463
3
+ size 16325
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01d7f1e4db12d81b9ac0026bf825bfdb68b1db3135931d28a9256f05de7050ae
3
+ size 16389
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0777fd711f6f1b05b996bfb41222f015463b3ed82432a1e781f5a4b11a26937a
3
+ size 16389
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e201dddd4d619d8e3734ca455c05c061c56e3bfd3320e4ca9607cef8ad6b1f51
3
+ size 16389
rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35ffb6be9bee206835f91c2266acdd2c6ac9881b4026daa72effcef42384cd61
3
+ size 16389
rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0770b44a4181b6aeb2c78ce37d4999a5ac30553071c785adf92b4fd6cc4a0287
3
+ size 16389
rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1accf85226d654e72ddbebdbbefba0312f691ea7469ee992bad394b19263f0f3
3
+ size 16389
rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:908bb48ae0a483f79172706ce8cf83001c72a85f19559eda6e3a3de1735b2713
3
+ size 16389
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f30ae256d6e721d0dbc9ced102d1429df94d78eb3cd40ff6b52c19bf28eef7da
3
+ size 1465
sft_qwen_var_classifier.py ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Qwen Variable Classifier for SAT Cube-and-Conquer
3
+
4
+ This script trains a transformer-based policy to select the next branching variable
5
+ for SAT (Boolean Satisfiability) solving using the Cube-and-Conquer approach.
6
+
7
+ == Problem Overview ==
8
+ In Cube-and-Conquer SAT solving, we split a hard SAT problem into subproblems ("cubes")
9
+ by choosing variables to branch on. The quality of variable selection significantly
10
+ affects solving performance. This model learns to predict good branching variables
11
+ from expert demonstrations.
12
+
13
+ == Architecture ==
14
+ - Backbone: Qwen3-4B (pretrained causal language model)
15
+ - Head: LayerNorm + Linear classifier over variable IDs (1 to max_vars)
16
+ - The model reads a CNF formula as text and outputs logits for each possible variable
17
+
18
+ == Training Approach ==
19
+ - Supervised Fine-Tuning (SFT) on expert variable choices
20
+ - Masked classification: only variables appearing in the CNF are valid choices
21
+ - Loss: Cross-entropy with invalid variable logits masked to -infinity
22
+
23
+ == Data Format ==
24
+ JSONL with fields:
25
+ - "cnf": DIMACS-format CNF text (e.g., "p cnf 100 200\n1 -2 3 0\n...")
26
+ - "label": integer variable ID to branch on (1 to max_vars)
27
+ """
28
+
29
+ import os
30
+ import argparse
31
+ from dataclasses import dataclass
32
+ from typing import Any, Dict, List
33
+ import numpy as np
34
+ import torch
35
+ import torch.nn as nn
36
+ import torch.nn.functional as F
37
+ from datasets import load_dataset
38
+ from transformers import (
39
+ AutoConfig,
40
+ AutoTokenizer,
41
+ AutoModelForCausalLM,
42
+ TrainingArguments,
43
+ Trainer,
44
+ set_seed,
45
+ )
46
+
47
+
48
+ # =============================================================================
49
+ # DEBUG FLAG: Set to True to enable verbose debug output, False to disable
50
+ # Can also be controlled via environment variable: DEBUG_TRAINING=1
51
+ # =============================================================================
52
+ DEBUG_TRAINING = os.environ.get("DEBUG_TRAINING", "0") == "1"
53
+
54
+
55
+ # =============================================================================
56
+ # CNF PARSING: Extract valid variables from DIMACS CNF text
57
+ # =============================================================================
58
+
59
+ def cnf_valid_mask(cnf_text: str, max_vars: int) -> List[int]:
60
+ """
61
+ Build a binary mask indicating which variable IDs appear in the CNF.
62
+
63
+ This is crucial for masked classification:
64
+ - A variable that doesn't appear in the (simplified) CNF cannot be branched on
65
+ - By masking invalid variables, we ensure the model only learns over valid choices
66
+
67
+ Args:
68
+ cnf_text: DIMACS-format CNF string. Format example:
69
+ p cnf 100 200 # header: 100 variables, 200 clauses
70
+ 1 -2 3 0 # clause: (x1 OR NOT x2 OR x3)
71
+ -1 4 0 # clause: (NOT x1 OR x4)
72
+ ...
73
+ max_vars: Maximum variable ID supported (typically 500)
74
+
75
+ Returns:
76
+ List of length (max_vars + 1) where:
77
+ - mask[0] = 0 (unused, variables are 1-indexed)
78
+ - mask[v] = 1 if variable v appears in any clause
79
+ - mask[v] = 0 if variable v does not appear
80
+
81
+ Note: We skip the header line "p cnf ..." to avoid capturing the clause count
82
+ as a valid variable (which was a bug in the original regex-based approach).
83
+ """
84
+ mask = [0] * (max_vars + 1)
85
+
86
+ for line in cnf_text.split('\n'):
87
+ line = line.strip()
88
+
89
+ # Skip empty lines, comment lines (start with 'c'), and header line (starts with 'p')
90
+ # The header "p cnf <num_vars> <num_clauses>" would incorrectly add num_clauses as a variable
91
+ if not line or line.startswith('c') or line.startswith('p'):
92
+ continue
93
+
94
+ # Parse clause: space-separated integers ending with 0
95
+ # Each integer is a literal: positive = variable, negative = negated variable
96
+ # Example: "1 -2 3 0" means (x1 OR NOT x2 OR x3)
97
+ for tok in line.split():
98
+ try:
99
+ lit = int(tok)
100
+ v = abs(lit) # Variable ID is absolute value of literal
101
+ if 1 <= v <= max_vars:
102
+ mask[v] = 1
103
+ except ValueError:
104
+ continue # Skip non-integer tokens (shouldn't happen in valid DIMACS)
105
+
106
+ # Fallback: if no variables found (e.g., truncated/malformed input), allow all
107
+ # This prevents the model from having zero valid outputs
108
+ if sum(mask) == 0:
109
+ for v in range(1, max_vars + 1):
110
+ mask[v] = 1
111
+
112
+ return mask
113
+
114
+
115
+ # =============================================================================
116
+ # MODEL: Qwen backbone with classification head for variable selection
117
+ # =============================================================================
118
+
119
+ class QwenVarClassifier(nn.Module):
120
+ """
121
+ Transformer-based variable classifier for SAT branching.
122
+
123
+ Architecture:
124
+ Input (CNF text)
125
+ → Tokenize
126
+ → Qwen3-4B backbone (frozen initially, fine-tuned with small LR)
127
+ → Extract last token's hidden state (sequence pooling)
128
+ → LayerNorm (stabilizes hidden state magnitude)
129
+ → Linear head (hidden_dim → num_classes)
130
+ → Logits for each variable ID
131
+
132
+ Why this architecture?
133
+ 1. Pretrained LLM backbone understands text structure and can learn CNF patterns
134
+ 2. Last-token pooling: the final token has attended to the entire input
135
+ 3. LayerNorm: Qwen's hidden states have large magnitudes; normalizing prevents
136
+ exploding gradients when combined with randomly-initialized head
137
+ 4. Single linear head: simple, interpretable, efficient
138
+ """
139
+
140
+ def __init__(self, base_model_name: str, max_vars: int):
141
+ """
142
+ Initialize the classifier.
143
+
144
+ Args:
145
+ base_model_name: HuggingFace model ID (e.g., "Qwen/Qwen3-4B")
146
+ max_vars: Maximum variable ID to classify (e.g., 500)
147
+ Output dimension will be max_vars + 1 (index 0 unused)
148
+ """
149
+ super().__init__()
150
+ self.max_vars = max_vars
151
+
152
+ # Load Qwen configuration and enable hidden state output
153
+ cfg = AutoConfig.from_pretrained(base_model_name)
154
+ cfg.output_hidden_states = True # We need hidden states, not just logits
155
+
156
+ # Load pretrained Qwen model
157
+ # Using bfloat16 for memory efficiency on modern GPUs (H100, A100)
158
+ self.backbone = AutoModelForCausalLM.from_pretrained(
159
+ base_model_name,
160
+ config=cfg,
161
+ torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
162
+ )
163
+
164
+ hidden = self.backbone.config.hidden_size # e.g., 2560 for Qwen3-4B
165
+
166
+ # LayerNorm to normalize hidden states before classification
167
+ # This is critical for stable training:
168
+ # - Qwen's hidden states can have large magnitude (std >> 1)
169
+ # - Randomly initialized linear head expects normalized inputs
170
+ # - Without LayerNorm, initial logits can be huge → high loss → exploding gradients
171
+ self.head_ln = nn.LayerNorm(hidden)
172
+
173
+ # Classification head: maps hidden state to variable logits
174
+ # Output shape: [batch, max_vars + 1]
175
+ # Index 0 is unused (variables are 1-indexed in DIMACS)
176
+ self.head = nn.Linear(hidden, max_vars + 1)
177
+
178
+ # Initialize head with standard small weights
179
+ # LayerNorm ensures the input has unit variance, so this init is appropriate
180
+ nn.init.normal_(self.head.weight, std=0.02)
181
+ nn.init.zeros_(self.head.bias)
182
+
183
+ # Expose backbone config for DeepSpeed compatibility
184
+ # DeepSpeed checks model.config.hidden_size for auto-configuration
185
+ self.config = self.backbone.config
186
+
187
+ def forward(self, input_ids, attention_mask, **kwargs):
188
+ """
189
+ Forward pass: CNF tokens → variable logits.
190
+
191
+ Args:
192
+ input_ids: [batch, seq_len] token IDs from tokenizer
193
+ attention_mask: [batch, seq_len] binary mask (1 = real token, 0 = padding)
194
+ **kwargs: ignored (allows passing 'labels' without error during eval)
195
+
196
+ Returns:
197
+ dict with "logits": [batch, max_vars + 1] raw classification logits
198
+ """
199
+ # Run through Qwen backbone
200
+ out = self.backbone(
201
+ input_ids=input_ids,
202
+ attention_mask=attention_mask,
203
+ output_hidden_states=True, # Need hidden states, not LM logits
204
+ use_cache=False, # Disable KV cache (not needed for training)
205
+ )
206
+
207
+ # Get hidden states from the last transformer layer
208
+ # Shape: [batch, seq_len, hidden_dim]
209
+ h = out.hidden_states[-1]
210
+
211
+ # Pool by taking the last non-padding token's hidden state
212
+ # This is the standard approach for causal LMs (like using [CLS] for BERT)
213
+ #
214
+ # Why last token?
215
+ # - In causal attention, each token only sees previous tokens
216
+ # - The last token has attended to the entire input sequence
217
+ # - It's a natural "summary" of the input
218
+ #
219
+ # Compute index of last real token: sum of attention mask minus 1
220
+ last_idx = attention_mask.sum(dim=1) - 1 # [batch]
221
+ last_idx = last_idx.clamp(min=0) # Safety: ensure non-negative
222
+
223
+ # Gather hidden state at the last token position for each batch element
224
+ b = torch.arange(h.size(0), device=h.device)
225
+ pooled = h[b, last_idx] # [batch, hidden_dim]
226
+
227
+ # DEBUG: Check hidden state stats
228
+ if DEBUG_TRAINING:
229
+ if not hasattr(self, '_debug_count'):
230
+ self._debug_count = 0
231
+ if self._debug_count < 3:
232
+ print(f"[DEBUG {self._debug_count}] pooled dtype={pooled.dtype}, mean={pooled.float().mean():.2f}, std={pooled.float().std():.2f}")
233
+ self._debug_count += 1
234
+
235
+ # Normalize hidden states for stable classification
236
+ pooled = self.head_ln(pooled)
237
+
238
+ # DEBUG: Check after LayerNorm
239
+ if DEBUG_TRAINING and hasattr(self, '_debug_count') and self._debug_count <= 3:
240
+ print(f"[DEBUG] after LN: dtype={pooled.dtype}, mean={pooled.float().mean():.4f}, std={pooled.float().std():.4f}")
241
+
242
+ # Project to variable logits
243
+ logits = self.head(pooled) # [batch, max_vars + 1]
244
+
245
+ # DEBUG: Check logits
246
+ if DEBUG_TRAINING and hasattr(self, '_debug_count') and self._debug_count <= 3:
247
+ print(f"[DEBUG] logits: dtype={logits.dtype}, mean={logits.float().mean():.2f}, std={logits.float().std():.2f}, min={logits.float().min():.2f}, max={logits.float().max():.2f}")
248
+
249
+ return {"logits": logits}
250
+
251
+
252
+ # =============================================================================
253
+ # DATA COLLATOR: Batch preparation with padding and mask handling
254
+ # =============================================================================
255
+
256
+ @dataclass
257
+ class Collator:
258
+ """
259
+ Custom data collator for variable classification.
260
+
261
+ Responsibilities:
262
+ 1. Pad variable-length token sequences to the same length within a batch
263
+ 2. Stack labels and valid_mask tensors
264
+ 3. Create proper attention masks for padded sequences
265
+
266
+ Why custom collator?
267
+ - We have custom fields (valid_mask) that need special handling
268
+ - Standard HF collators don't know about our mask format
269
+ """
270
+ tokenizer: Any # Tokenizer for padding configuration
271
+
272
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
273
+ """
274
+ Collate a list of examples into a batch.
275
+
276
+ Args:
277
+ features: List of dicts, each with:
278
+ - input_ids: List[int] - token IDs
279
+ - attention_mask: List[int] - attention mask
280
+ - label: int - target variable ID
281
+ - valid_mask: List[int] - binary mask of valid variables
282
+
283
+ Returns:
284
+ Dict with batched tensors:
285
+ - input_ids: [batch, max_seq_len]
286
+ - attention_mask: [batch, max_seq_len]
287
+ - labels: [batch]
288
+ - valid_mask: [batch, max_vars + 1]
289
+ """
290
+ # Convert to tensors
291
+ input_ids = [torch.tensor(f["input_ids"], dtype=torch.long) for f in features]
292
+ attention_mask = [torch.tensor(f["attention_mask"], dtype=torch.long) for f in features]
293
+ labels = torch.tensor([f["label"] for f in features], dtype=torch.long)
294
+ valid_mask = torch.tensor([f["valid_mask"] for f in features], dtype=torch.bool)
295
+
296
+ # Pad sequences to same length within batch
297
+ # Using pad_sequence pads shorter sequences with padding_value
298
+ input_ids = torch.nn.utils.rnn.pad_sequence(
299
+ input_ids,
300
+ batch_first=True,
301
+ padding_value=self.tokenizer.pad_token_id
302
+ )
303
+ attention_mask = torch.nn.utils.rnn.pad_sequence(
304
+ attention_mask,
305
+ batch_first=True,
306
+ padding_value=0 # Padding positions get 0 attention
307
+ )
308
+
309
+ return {
310
+ "input_ids": input_ids,
311
+ "attention_mask": attention_mask,
312
+ "labels": labels,
313
+ "valid_mask": valid_mask,
314
+ }
315
+
316
+
317
+ # =============================================================================
318
+ # TRAINER: Custom loss computation with variable masking
319
+ # =============================================================================
320
+
321
+ class MaskedVarTrainer(Trainer):
322
+ """
323
+ Custom HuggingFace Trainer with masked cross-entropy loss.
324
+
325
+ The key modification: before computing cross-entropy, we mask out logits
326
+ for invalid variables (those not appearing in the CNF). This ensures:
327
+ 1. The model cannot predict invalid variables
328
+ 2. No gradient flows to invalid variable logits
329
+ 3. Training focuses only on distinguishing valid choices
330
+
331
+ NOTE on displayed metrics:
332
+ - 'loss' shown by Trainer is summed across GPUs (loss × world_size)
333
+ We add 'true_loss' which is the actual per-sample loss
334
+ - 'grad_norm' is the L2 norm across ALL ~4B parameters BEFORE clipping
335
+ Values of 100-200 are normal for large models; it gets clipped to max_grad_norm
336
+ """
337
+
338
+ def __init__(self, *args, max_vars: int, **kwargs):
339
+ """
340
+ Args:
341
+ max_vars: Maximum variable ID (for sanity checking labels)
342
+ *args, **kwargs: Passed to parent Trainer
343
+ """
344
+ super().__init__(*args, **kwargs)
345
+ self.max_vars = max_vars
346
+ self._accumulated_loss = 0.0
347
+ self._loss_count = 0
348
+
349
+ def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
350
+ """
351
+ Compute masked cross-entropy loss for variable classification.
352
+
353
+ Algorithm:
354
+ 1. Extract labels and valid_mask from inputs
355
+ 2. Forward pass to get logits
356
+ 3. Set logits for invalid variables to -inf (or -1e4 for bf16 stability)
357
+ 4. Compute cross-entropy loss
358
+
359
+ Args:
360
+ model: The QwenVarClassifier
361
+ inputs: Dict with input_ids, attention_mask, labels, valid_mask
362
+ return_outputs: If True, return (loss, outputs) tuple
363
+ num_items_in_batch: Unused (for API compatibility)
364
+
365
+ Returns:
366
+ loss: Scalar loss value, or (loss, outputs) tuple if return_outputs=True
367
+ """
368
+ # Get labels and mask (don't pop - prediction_loop needs labels for compute_metrics)
369
+ labels = inputs.get("labels") # [batch]
370
+ valid_mask = inputs.get("valid_mask") # [batch, max_vars + 1] boolean
371
+
372
+ # Remove from inputs for model.forward (which doesn't expect them)
373
+ model_inputs = {k: v for k, v in inputs.items() if k not in ["labels", "valid_mask"]}
374
+
375
+ # Forward pass
376
+ outputs = model(**model_inputs)
377
+ logits = outputs["logits"] # [batch, max_vars + 1]
378
+
379
+ # DEBUG: Check if label is in valid_mask
380
+ if DEBUG_TRAINING:
381
+ if not hasattr(self, '_loss_debug_count'):
382
+ self._loss_debug_count = 0
383
+ if self._loss_debug_count < 5:
384
+ for i, (lbl, vmask) in enumerate(zip(labels, valid_mask)):
385
+ label_in_mask = vmask[lbl].item()
386
+ valid_count = vmask.sum().item()
387
+ logit_at_label = logits[i, lbl].item()
388
+ print(f"[LOSS DEBUG {self._loss_debug_count}] label={lbl.item()}, in_mask={label_in_mask}, valid_vars={valid_count}, logit_at_label={logit_at_label:.2f}")
389
+ self._loss_debug_count += 1
390
+
391
+ # Mask invalid variables by setting their logits to a large negative value
392
+ # After softmax, these will have probability ≈ 0
393
+ #
394
+ # Why -1e4 instead of -inf or -1e9?
395
+ # - bfloat16 has limited dynamic range
396
+ # - -1e9 can cause NaN issues when computing softmax/cross-entropy
397
+ # - -1e4 is small enough to give ~0 probability while staying numerically stable
398
+ logits = logits.masked_fill(~valid_mask.to(logits.device), -1e4)
399
+
400
+ # Sanity check: labels must be valid variable IDs (1 to max_vars)
401
+ # This catches data bugs early
402
+ if torch.any(labels <= 0) or torch.any(labels > self.max_vars):
403
+ bad = labels[(labels <= 0) | (labels > self.max_vars)].detach().cpu().tolist()
404
+ raise ValueError(f"Out-of-range labels detected (showing up to 20): {bad[:20]}")
405
+
406
+ # DEBUG: Check logit at label after masking
407
+ if DEBUG_TRAINING and hasattr(self, '_loss_debug_count') and self._loss_debug_count <= 5:
408
+ for i, lbl in enumerate(labels):
409
+ masked_logit = logits[i, lbl].item()
410
+ print(f"[LOSS DEBUG] after mask: logit_at_label={masked_logit:.2f}")
411
+
412
+ # Standard cross-entropy loss
413
+ # PyTorch's cross_entropy expects logits, not probabilities
414
+ loss = F.cross_entropy(logits, labels.to(logits.device))
415
+
416
+ # Track true loss for accurate logging
417
+ self._accumulated_loss += loss.item()
418
+ self._loss_count += 1
419
+
420
+ # DEBUG: Print loss
421
+ if DEBUG_TRAINING and hasattr(self, '_loss_debug_count') and self._loss_debug_count <= 5:
422
+ print(f"[LOSS DEBUG] loss={loss.item():.2f}")
423
+
424
+ # Return masked logits in outputs (so compute_metrics gets properly masked predictions)
425
+ masked_outputs = {"logits": logits}
426
+ return (loss, masked_outputs) if return_outputs else loss
427
+
428
+ def prediction_step(self, model, inputs, prediction_loss_only, ignore_keys=None):
429
+ """
430
+ Override prediction_step to properly return loss and logits for evaluation.
431
+
432
+ The default HF Trainer prediction_step doesn't work well with custom compute_loss,
433
+ so we implement our own that properly computes masked loss and returns logits.
434
+ """
435
+ model.eval()
436
+
437
+ with torch.no_grad():
438
+ # Get labels and mask
439
+ labels = inputs.get("labels")
440
+ valid_mask = inputs.get("valid_mask")
441
+
442
+ # Forward pass
443
+ model_inputs = {k: v for k, v in inputs.items() if k not in ["labels", "valid_mask"]}
444
+ outputs = model(**model_inputs)
445
+ logits = outputs["logits"]
446
+
447
+ # Mask invalid variables
448
+ logits = logits.masked_fill(~valid_mask.to(logits.device), -1e4)
449
+
450
+ # Compute loss
451
+ loss = F.cross_entropy(logits, labels.to(logits.device))
452
+
453
+ # Return (loss, logits, labels) - this is what compute_metrics expects
454
+ return (loss, logits.detach(), labels.detach())
455
+
456
+ def log(self, logs: Dict[str, float], start_time: float = None) -> None:
457
+ """
458
+ Override log to add true_loss and ensure eval metrics are logged to W&B.
459
+
460
+ The default 'loss' in HF Trainer is summed across GPUs in DDP/DeepSpeed.
461
+ We track the actual per-sample loss and report it as 'true_loss'.
462
+ """
463
+ if self._loss_count > 0:
464
+ # Calculate true average loss on this device
465
+ true_loss = self._accumulated_loss / self._loss_count
466
+ logs["true_loss"] = round(true_loss, 4)
467
+
468
+ # Reset for next logging interval
469
+ self._accumulated_loss = 0.0
470
+ self._loss_count = 0
471
+
472
+ # Let HF Trainer handle W&B logging - it manages step ordering correctly
473
+ super().log(logs, start_time)
474
+
475
+
476
+ def compute_metrics(eval_pred):
477
+ """
478
+ Compute accuracy for evaluation.
479
+
480
+ Args:
481
+ eval_pred: (logits, labels) from Trainer's prediction_loop
482
+ - logits: [num_samples, max_vars + 1] (already masked with -1e4 for invalid vars)
483
+ - labels: [num_samples]
484
+
485
+ Returns:
486
+ Dict with "accuracy" (Trainer will prefix with "eval_")
487
+
488
+ Note: eval_loss is computed automatically by Trainer from prediction_step's loss.
489
+ We don't need to compute it here.
490
+
491
+ Since invalid variables have logits ≈ -1e4, argmax will naturally avoid them.
492
+ """
493
+ logits, labels = eval_pred
494
+
495
+ # Accuracy: argmax prediction vs true label
496
+ preds = np.argmax(logits, axis=-1)
497
+ accuracy = float((preds == labels).mean())
498
+
499
+ return {"accuracy": accuracy}
500
+
501
+
502
+ def get_wandb_report_to():
503
+ """
504
+ Determine if this process should log to W&B.
505
+
506
+ Only the main process (rank 0) should log to W&B to avoid creating multiple runs.
507
+ Other ranks should not log to any external service.
508
+
509
+ Returns:
510
+ ["wandb"] for rank 0, [] for other ranks
511
+ """
512
+ local_rank = int(os.environ.get("LOCAL_RANK", 0))
513
+
514
+ if local_rank == 0:
515
+ return ["wandb"]
516
+ else:
517
+ return []
518
+
519
+
520
+ # =============================================================================
521
+ # MAIN: Training pipeline
522
+ # =============================================================================
523
+
524
+ def main():
525
+ """
526
+ Main training function.
527
+
528
+ Pipeline:
529
+ 1. Parse command line arguments
530
+ 2. Load tokenizer and datasets
531
+ 3. Preprocess: tokenize CNF text, compute valid masks
532
+ 4. Initialize model with pretrained backbone + new classification head
533
+ 5. Configure training (optimizer, scheduler, logging, etc.)
534
+ 6. Train and evaluate
535
+ """
536
+ ap = argparse.ArgumentParser(
537
+ description="Train a Qwen-based variable classifier for SAT branching"
538
+ )
539
+
540
+ # Model and data arguments
541
+ ap.add_argument("--model_name", type=str, default="Qwen/Qwen3-4B",
542
+ help="HuggingFace model ID for the backbone")
543
+ ap.add_argument("--train_jsonl", type=str, required=True,
544
+ help="Path to training data (JSONL with 'cnf' and 'label' fields)")
545
+ ap.add_argument("--valid_jsonl", type=str, required=True,
546
+ help="Path to validation data (same format)")
547
+ ap.add_argument("--output_dir", type=str, default="./out_qwen_var_sft",
548
+ help="Directory for checkpoints and logs")
549
+ ap.add_argument("--max_vars", type=int, default=500,
550
+ help="Maximum variable ID (determines output dimension)")
551
+ ap.add_argument("--max_length", type=int, default=8192,
552
+ help="Maximum sequence length in tokens (truncates longer CNFs)")
553
+ ap.add_argument("--seed", type=int, default=0,
554
+ help="Random seed for reproducibility")
555
+
556
+ # Training hyperparameters
557
+ ap.add_argument("--per_device_train_batch_size", type=int, default=1,
558
+ help="Batch size per GPU for training")
559
+ ap.add_argument("--per_device_eval_batch_size", type=int, default=1,
560
+ help="Batch size per GPU for evaluation")
561
+ ap.add_argument("--gradient_accumulation_steps", type=int, default=8,
562
+ help="Accumulate gradients over this many steps (effective batch = this * batch_size * num_gpus)")
563
+ ap.add_argument("--learning_rate", type=float, default=5e-6,
564
+ help="Peak learning rate (after warmup). Lower than typical fine-tuning due to classification head")
565
+ ap.add_argument("--num_train_epochs", type=float, default=3.0,
566
+ help="Total training epochs")
567
+ ap.add_argument("--warmup_ratio", type=float, default=0.03,
568
+ help="Fraction of training steps for learning rate warmup")
569
+ ap.add_argument("--weight_decay", type=float, default=0.0,
570
+ help="Weight decay (L2 regularization)")
571
+ ap.add_argument("--logging_steps", type=int, default=10,
572
+ help="Log training metrics every N steps")
573
+ ap.add_argument("--eval_steps", type=int, default=200,
574
+ help="Evaluate every N steps")
575
+ ap.add_argument("--save_steps", type=int, default=200,
576
+ help="Save checkpoint every N steps")
577
+ ap.add_argument("--report_to", type=str, default="wandb",
578
+ choices=["wandb", "tensorboard", "none"],
579
+ help="Logging backend")
580
+ ap.add_argument("--deepspeed", type=str, default=None,
581
+ help="Path to DeepSpeed config JSON for distributed training")
582
+
583
+ args = ap.parse_args()
584
+
585
+ # Set random seeds for reproducibility
586
+ set_seed(args.seed)
587
+
588
+ # Load tokenizer
589
+ # Qwen uses a byte-level BPE tokenizer
590
+ tok = AutoTokenizer.from_pretrained(args.model_name, use_fast=True)
591
+ if tok.pad_token is None:
592
+ # Qwen doesn't have a dedicated pad token; use eos as pad
593
+ tok.pad_token = tok.eos_token
594
+
595
+ # Load datasets from JSONL files
596
+ ds = load_dataset(
597
+ "json",
598
+ data_files={"train": args.train_jsonl, "validation": args.valid_jsonl},
599
+ )
600
+
601
+ def preprocess(ex):
602
+ """
603
+ Preprocess a single example.
604
+
605
+ Steps:
606
+ 1. Tokenize the CNF text
607
+ 2. Compute valid variable mask
608
+ 3. Return features for training
609
+
610
+ Args:
611
+ ex: Dict with 'cnf' (str) and 'label' (int)
612
+
613
+ Returns:
614
+ Dict with input_ids, attention_mask, label, valid_mask
615
+ """
616
+ cnf = ex["cnf"]
617
+ label = int(ex["label"])
618
+
619
+ # Tokenize CNF text
620
+ # No special prompt/instruction - the model learns to interpret raw CNF
621
+ enc = tok(
622
+ cnf,
623
+ truncation=True,
624
+ max_length=args.max_length,
625
+ padding=False # We handle padding in the collator
626
+ )
627
+
628
+ return {
629
+ "input_ids": enc["input_ids"],
630
+ "attention_mask": enc["attention_mask"],
631
+ "label": label,
632
+ "valid_mask": cnf_valid_mask(cnf, args.max_vars),
633
+ }
634
+
635
+ # Apply preprocessing to all examples
636
+ # remove_columns drops original fields (cnf, label) since we've extracted what we need
637
+ ds = ds.map(preprocess, remove_columns=ds["train"].column_names)
638
+
639
+ # Initialize model
640
+ model = QwenVarClassifier(args.model_name, max_vars=args.max_vars)
641
+
642
+ # Enable gradient checkpointing to save memory on long sequences
643
+ # This trades compute for memory by recomputing activations during backward pass
644
+ model.backbone.gradient_checkpointing_enable()
645
+
646
+ # Configure W&B logging (only rank 0 logs to avoid duplicate runs)
647
+ report_to = get_wandb_report_to()
648
+
649
+ # Configure training
650
+ training_args = TrainingArguments(
651
+ output_dir=args.output_dir,
652
+ overwrite_output_dir=True,
653
+
654
+ # Precision settings for modern GPUs
655
+ bf16=True, # Use bfloat16 for training (good for H100/A100)
656
+ tf32=True, # Enable TF32 for faster matmuls on Ampere+
657
+
658
+ # Batch configuration
659
+ per_device_train_batch_size=args.per_device_train_batch_size,
660
+ per_device_eval_batch_size=args.per_device_eval_batch_size,
661
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
662
+
663
+ # Optimizer settings
664
+ learning_rate=args.learning_rate,
665
+ warmup_ratio=args.warmup_ratio,
666
+ num_train_epochs=args.num_train_epochs,
667
+ weight_decay=args.weight_decay,
668
+
669
+ # Gradient clipping for training stability
670
+ # Clips gradient norm to this value if it exceeds it
671
+ # This prevents exploding gradients from destabilizing training
672
+ max_grad_norm=1.0,
673
+
674
+ # Logging and evaluation
675
+ logging_steps=args.logging_steps,
676
+ eval_strategy="steps",
677
+ eval_steps=args.eval_steps,
678
+
679
+ # Checkpointing - keep best checkpoints based on validation accuracy
680
+ save_strategy="steps",
681
+ save_steps=args.save_steps,
682
+ save_total_limit=3, # Keep best 3 checkpoints
683
+ load_best_model_at_end=True, # Load best checkpoint at end of training
684
+ metric_for_best_model="eval_accuracy", # Use validation accuracy to determine best
685
+ greater_is_better=True, # Higher accuracy is better
686
+
687
+ # Logging backend
688
+ report_to=report_to,
689
+ run_name=os.environ.get("WANDB_RUN_NAME", "qwen-var-sft") if args.report_to == "wandb" else None,
690
+ logging_dir=os.path.join(args.output_dir, "logs"),
691
+
692
+ # Important: don't remove valid_mask column (we need it in compute_loss)
693
+ remove_unused_columns=False,
694
+
695
+ # DDP settings (for multi-GPU)
696
+ ddp_find_unused_parameters=False,
697
+
698
+ # DeepSpeed for efficient distributed training
699
+ deepspeed=args.deepspeed,
700
+
701
+ # Use pickle format for saving (safetensors has issues with some weight tying configs)
702
+ save_safetensors=False,
703
+ )
704
+
705
+ # Create trainer with custom loss computation
706
+ trainer = MaskedVarTrainer(
707
+ model=model,
708
+ args=training_args,
709
+ train_dataset=ds["train"],
710
+ eval_dataset=ds["validation"],
711
+ tokenizer=tok,
712
+ data_collator=Collator(tok),
713
+ compute_metrics=compute_metrics,
714
+ max_vars=args.max_vars,
715
+ )
716
+
717
+ # Train!
718
+ trainer.train()
719
+
720
+ # Final evaluation
721
+ trainer.evaluate()
722
+
723
+
724
+ if __name__ == "__main__":
725
+ main()
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7a6a993d40b42d517297bb247ff66679e5bc9dd7a5143be0620faf210b42861
3
+ size 11422753
tokenizer_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "split_special_tokens": false,
237
+ "tokenizer_class": "Qwen2Tokenizer",
238
+ "unk_token": null
239
+ }
trainer_state.json ADDED
@@ -0,0 +1,1134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 440,
3
+ "best_metric": 0.16359447004608296,
4
+ "best_model_checkpoint": "out_qwen_var_sft/checkpoint-440",
5
+ "epoch": 7.213114754098361,
6
+ "eval_steps": 10,
7
+ "global_step": 440,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.08196721311475409,
14
+ "grad_norm": 2001.4421126814825,
15
+ "learning_rate": 4.0816326530612243e-07,
16
+ "loss": 42.6172,
17
+ "step": 5,
18
+ "true_loss": 5.0281
19
+ },
20
+ {
21
+ "epoch": 0.16393442622950818,
22
+ "grad_norm": 158.37906608693382,
23
+ "learning_rate": 9.183673469387756e-07,
24
+ "loss": 43.1594,
25
+ "step": 10,
26
+ "true_loss": 5.1953
27
+ },
28
+ {
29
+ "epoch": 0.16393442622950818,
30
+ "eval_accuracy": 0.01152073732718894,
31
+ "eval_loss": 5.286218166351318,
32
+ "eval_runtime": 15.0885,
33
+ "eval_samples_per_second": 28.764,
34
+ "eval_steps_per_second": 3.645,
35
+ "step": 10
36
+ },
37
+ {
38
+ "epoch": 0.2459016393442623,
39
+ "grad_norm": 203.23818895534905,
40
+ "learning_rate": 1.4285714285714286e-06,
41
+ "loss": 42.6562,
42
+ "step": 15,
43
+ "true_loss": 5.2555
44
+ },
45
+ {
46
+ "epoch": 0.32786885245901637,
47
+ "grad_norm": 176.6457829157802,
48
+ "learning_rate": 1.938775510204082e-06,
49
+ "loss": 42.1543,
50
+ "step": 20,
51
+ "true_loss": 5.1922
52
+ },
53
+ {
54
+ "epoch": 0.32786885245901637,
55
+ "eval_accuracy": 0.01152073732718894,
56
+ "eval_loss": 5.156322002410889,
57
+ "eval_runtime": 14.9518,
58
+ "eval_samples_per_second": 29.027,
59
+ "eval_steps_per_second": 3.678,
60
+ "step": 20
61
+ },
62
+ {
63
+ "epoch": 0.4098360655737705,
64
+ "grad_norm": 157.3725914328133,
65
+ "learning_rate": 2.4489795918367347e-06,
66
+ "loss": 41.0754,
67
+ "step": 25,
68
+ "true_loss": 4.9437
69
+ },
70
+ {
71
+ "epoch": 0.4918032786885246,
72
+ "grad_norm": 177.5762016006674,
73
+ "learning_rate": 2.959183673469388e-06,
74
+ "loss": 38.793,
75
+ "step": 30,
76
+ "true_loss": 5.1707
77
+ },
78
+ {
79
+ "epoch": 0.4918032786885246,
80
+ "eval_accuracy": 0.02304147465437788,
81
+ "eval_loss": 4.853038787841797,
82
+ "eval_runtime": 14.9796,
83
+ "eval_samples_per_second": 28.973,
84
+ "eval_steps_per_second": 3.672,
85
+ "step": 30
86
+ },
87
+ {
88
+ "epoch": 0.5737704918032787,
89
+ "grad_norm": 224.10088573018953,
90
+ "learning_rate": 3.469387755102041e-06,
91
+ "loss": 38.6121,
92
+ "step": 35,
93
+ "true_loss": 4.7551
94
+ },
95
+ {
96
+ "epoch": 0.6557377049180327,
97
+ "grad_norm": 302.8526334743389,
98
+ "learning_rate": 3.979591836734694e-06,
99
+ "loss": 37.8027,
100
+ "step": 40,
101
+ "true_loss": 4.9977
102
+ },
103
+ {
104
+ "epoch": 0.6557377049180327,
105
+ "eval_accuracy": 0.06221198156682028,
106
+ "eval_loss": 4.661470413208008,
107
+ "eval_runtime": 15.006,
108
+ "eval_samples_per_second": 28.922,
109
+ "eval_steps_per_second": 3.665,
110
+ "step": 40
111
+ },
112
+ {
113
+ "epoch": 0.7377049180327869,
114
+ "grad_norm": 177.15837283975523,
115
+ "learning_rate": 4.489795918367348e-06,
116
+ "loss": 36.4664,
117
+ "step": 45,
118
+ "true_loss": 4.5766
119
+ },
120
+ {
121
+ "epoch": 0.819672131147541,
122
+ "grad_norm": 190.75273796127124,
123
+ "learning_rate": 5e-06,
124
+ "loss": 36.8304,
125
+ "step": 50,
126
+ "true_loss": 4.7605
127
+ },
128
+ {
129
+ "epoch": 0.819672131147541,
130
+ "eval_accuracy": 0.052995391705069124,
131
+ "eval_loss": 4.522649765014648,
132
+ "eval_runtime": 14.8422,
133
+ "eval_samples_per_second": 29.241,
134
+ "eval_steps_per_second": 3.706,
135
+ "step": 50
136
+ },
137
+ {
138
+ "epoch": 0.9016393442622951,
139
+ "grad_norm": 185.07218548511187,
140
+ "learning_rate": 4.943052391799545e-06,
141
+ "loss": 36.585,
142
+ "step": 55,
143
+ "true_loss": 4.5461
144
+ },
145
+ {
146
+ "epoch": 0.9836065573770492,
147
+ "grad_norm": 169.35214138176565,
148
+ "learning_rate": 4.886104783599089e-06,
149
+ "loss": 35.3022,
150
+ "step": 60,
151
+ "true_loss": 4.3983
152
+ },
153
+ {
154
+ "epoch": 0.9836065573770492,
155
+ "eval_accuracy": 0.06221198156682028,
156
+ "eval_loss": 4.3924431800842285,
157
+ "eval_runtime": 14.9823,
158
+ "eval_samples_per_second": 28.967,
159
+ "eval_steps_per_second": 3.671,
160
+ "step": 60
161
+ },
162
+ {
163
+ "epoch": 1.0655737704918034,
164
+ "grad_norm": 186.19471703324027,
165
+ "learning_rate": 4.829157175398634e-06,
166
+ "loss": 34.9871,
167
+ "step": 65,
168
+ "true_loss": 4.534
169
+ },
170
+ {
171
+ "epoch": 1.1475409836065573,
172
+ "grad_norm": 178.50065430802542,
173
+ "learning_rate": 4.772209567198178e-06,
174
+ "loss": 34.8587,
175
+ "step": 70,
176
+ "true_loss": 4.0799
177
+ },
178
+ {
179
+ "epoch": 1.1475409836065573,
180
+ "eval_accuracy": 0.06682027649769585,
181
+ "eval_loss": 4.316955089569092,
182
+ "eval_runtime": 14.9903,
183
+ "eval_samples_per_second": 28.952,
184
+ "eval_steps_per_second": 3.669,
185
+ "step": 70
186
+ },
187
+ {
188
+ "epoch": 1.2295081967213115,
189
+ "grad_norm": 256.1945567078422,
190
+ "learning_rate": 4.7152619589977225e-06,
191
+ "loss": 33.4347,
192
+ "step": 75,
193
+ "true_loss": 3.7832
194
+ },
195
+ {
196
+ "epoch": 1.3114754098360657,
197
+ "grad_norm": 178.23899873402075,
198
+ "learning_rate": 4.658314350797267e-06,
199
+ "loss": 33.5913,
200
+ "step": 80,
201
+ "true_loss": 4.3762
202
+ },
203
+ {
204
+ "epoch": 1.3114754098360657,
205
+ "eval_accuracy": 0.08064516129032258,
206
+ "eval_loss": 4.25508975982666,
207
+ "eval_runtime": 14.9497,
208
+ "eval_samples_per_second": 29.031,
209
+ "eval_steps_per_second": 3.679,
210
+ "step": 80
211
+ },
212
+ {
213
+ "epoch": 1.3934426229508197,
214
+ "grad_norm": 176.89010340519602,
215
+ "learning_rate": 4.601366742596811e-06,
216
+ "loss": 33.1532,
217
+ "step": 85,
218
+ "true_loss": 4.1607
219
+ },
220
+ {
221
+ "epoch": 1.4754098360655736,
222
+ "grad_norm": 146.21731787991052,
223
+ "learning_rate": 4.544419134396356e-06,
224
+ "loss": 33.0214,
225
+ "step": 90,
226
+ "true_loss": 4.3359
227
+ },
228
+ {
229
+ "epoch": 1.4754098360655736,
230
+ "eval_accuracy": 0.0967741935483871,
231
+ "eval_loss": 4.17578125,
232
+ "eval_runtime": 14.9139,
233
+ "eval_samples_per_second": 29.1,
234
+ "eval_steps_per_second": 3.688,
235
+ "step": 90
236
+ },
237
+ {
238
+ "epoch": 1.5573770491803278,
239
+ "grad_norm": 163.96782239209782,
240
+ "learning_rate": 4.4874715261959e-06,
241
+ "loss": 31.8735,
242
+ "step": 95,
243
+ "true_loss": 4.1047
244
+ },
245
+ {
246
+ "epoch": 1.639344262295082,
247
+ "grad_norm": 172.0759692604596,
248
+ "learning_rate": 4.4305239179954446e-06,
249
+ "loss": 33.1337,
250
+ "step": 100,
251
+ "true_loss": 4.4055
252
+ },
253
+ {
254
+ "epoch": 1.639344262295082,
255
+ "eval_accuracy": 0.09907834101382489,
256
+ "eval_loss": 4.103317737579346,
257
+ "eval_runtime": 15.1423,
258
+ "eval_samples_per_second": 28.661,
259
+ "eval_steps_per_second": 3.632,
260
+ "step": 100
261
+ },
262
+ {
263
+ "epoch": 1.721311475409836,
264
+ "grad_norm": 163.50986481723115,
265
+ "learning_rate": 4.373576309794989e-06,
266
+ "loss": 30.4056,
267
+ "step": 105,
268
+ "true_loss": 3.5892
269
+ },
270
+ {
271
+ "epoch": 1.8032786885245902,
272
+ "grad_norm": 174.28722424027927,
273
+ "learning_rate": 4.316628701594533e-06,
274
+ "loss": 32.4443,
275
+ "step": 110,
276
+ "true_loss": 3.8227
277
+ },
278
+ {
279
+ "epoch": 1.8032786885245902,
280
+ "eval_accuracy": 0.11059907834101383,
281
+ "eval_loss": 4.104156970977783,
282
+ "eval_runtime": 14.9928,
283
+ "eval_samples_per_second": 28.947,
284
+ "eval_steps_per_second": 3.668,
285
+ "step": 110
286
+ },
287
+ {
288
+ "epoch": 1.8852459016393444,
289
+ "grad_norm": 599.8963091372194,
290
+ "learning_rate": 4.259681093394078e-06,
291
+ "loss": 32.7365,
292
+ "step": 115,
293
+ "true_loss": 4.3132
294
+ },
295
+ {
296
+ "epoch": 1.9672131147540983,
297
+ "grad_norm": 176.15071401035615,
298
+ "learning_rate": 4.202733485193622e-06,
299
+ "loss": 30.8238,
300
+ "step": 120,
301
+ "true_loss": 3.9128
302
+ },
303
+ {
304
+ "epoch": 1.9672131147540983,
305
+ "eval_accuracy": 0.09216589861751152,
306
+ "eval_loss": 4.044179439544678,
307
+ "eval_runtime": 14.9286,
308
+ "eval_samples_per_second": 29.072,
309
+ "eval_steps_per_second": 3.684,
310
+ "step": 120
311
+ },
312
+ {
313
+ "epoch": 2.0491803278688523,
314
+ "grad_norm": 193.228785849168,
315
+ "learning_rate": 4.145785876993167e-06,
316
+ "loss": 31.3295,
317
+ "step": 125,
318
+ "true_loss": 4.0655
319
+ },
320
+ {
321
+ "epoch": 2.1311475409836067,
322
+ "grad_norm": 167.07209609143695,
323
+ "learning_rate": 4.088838268792711e-06,
324
+ "loss": 29.0552,
325
+ "step": 130,
326
+ "true_loss": 3.727
327
+ },
328
+ {
329
+ "epoch": 2.1311475409836067,
330
+ "eval_accuracy": 0.12211981566820276,
331
+ "eval_loss": 4.029242992401123,
332
+ "eval_runtime": 15.3137,
333
+ "eval_samples_per_second": 28.341,
334
+ "eval_steps_per_second": 3.592,
335
+ "step": 130
336
+ },
337
+ {
338
+ "epoch": 2.2131147540983607,
339
+ "grad_norm": 208.65171046196372,
340
+ "learning_rate": 4.0318906605922555e-06,
341
+ "loss": 28.6111,
342
+ "step": 135,
343
+ "true_loss": 3.5737
344
+ },
345
+ {
346
+ "epoch": 2.2950819672131146,
347
+ "grad_norm": 198.8811989422508,
348
+ "learning_rate": 3.9749430523918e-06,
349
+ "loss": 28.6129,
350
+ "step": 140,
351
+ "true_loss": 3.4636
352
+ },
353
+ {
354
+ "epoch": 2.2950819672131146,
355
+ "eval_accuracy": 0.1152073732718894,
356
+ "eval_loss": 4.056672096252441,
357
+ "eval_runtime": 14.9575,
358
+ "eval_samples_per_second": 29.016,
359
+ "eval_steps_per_second": 3.677,
360
+ "step": 140
361
+ },
362
+ {
363
+ "epoch": 2.3770491803278686,
364
+ "grad_norm": 199.51861074084005,
365
+ "learning_rate": 3.917995444191344e-06,
366
+ "loss": 28.8025,
367
+ "step": 145,
368
+ "true_loss": 3.5059
369
+ },
370
+ {
371
+ "epoch": 2.459016393442623,
372
+ "grad_norm": 193.67355565121787,
373
+ "learning_rate": 3.861047835990889e-06,
374
+ "loss": 28.5974,
375
+ "step": 150,
376
+ "true_loss": 2.9942
377
+ },
378
+ {
379
+ "epoch": 2.459016393442623,
380
+ "eval_accuracy": 0.0967741935483871,
381
+ "eval_loss": 4.023684978485107,
382
+ "eval_runtime": 15.0321,
383
+ "eval_samples_per_second": 28.872,
384
+ "eval_steps_per_second": 3.659,
385
+ "step": 150
386
+ },
387
+ {
388
+ "epoch": 2.540983606557377,
389
+ "grad_norm": 205.5841428370514,
390
+ "learning_rate": 3.804100227790433e-06,
391
+ "loss": 28.4969,
392
+ "step": 155,
393
+ "true_loss": 3.5412
394
+ },
395
+ {
396
+ "epoch": 2.6229508196721314,
397
+ "grad_norm": 182.43691547474708,
398
+ "learning_rate": 3.7471526195899776e-06,
399
+ "loss": 29.1582,
400
+ "step": 160,
401
+ "true_loss": 3.5827
402
+ },
403
+ {
404
+ "epoch": 2.6229508196721314,
405
+ "eval_accuracy": 0.10368663594470046,
406
+ "eval_loss": 4.006450176239014,
407
+ "eval_runtime": 15.0347,
408
+ "eval_samples_per_second": 28.867,
409
+ "eval_steps_per_second": 3.658,
410
+ "step": 160
411
+ },
412
+ {
413
+ "epoch": 2.7049180327868854,
414
+ "grad_norm": 209.23223838323548,
415
+ "learning_rate": 3.690205011389522e-06,
416
+ "loss": 28.4554,
417
+ "step": 165,
418
+ "true_loss": 3.8515
419
+ },
420
+ {
421
+ "epoch": 2.7868852459016393,
422
+ "grad_norm": 181.70496603318674,
423
+ "learning_rate": 3.6332574031890664e-06,
424
+ "loss": 28.0033,
425
+ "step": 170,
426
+ "true_loss": 3.8595
427
+ },
428
+ {
429
+ "epoch": 2.7868852459016393,
430
+ "eval_accuracy": 0.1336405529953917,
431
+ "eval_loss": 3.9438364505767822,
432
+ "eval_runtime": 15.1184,
433
+ "eval_samples_per_second": 28.707,
434
+ "eval_steps_per_second": 3.638,
435
+ "step": 170
436
+ },
437
+ {
438
+ "epoch": 2.8688524590163933,
439
+ "grad_norm": 189.43537999115608,
440
+ "learning_rate": 3.5763097949886104e-06,
441
+ "loss": 27.3218,
442
+ "step": 175,
443
+ "true_loss": 3.4404
444
+ },
445
+ {
446
+ "epoch": 2.9508196721311473,
447
+ "grad_norm": 203.83162067201448,
448
+ "learning_rate": 3.519362186788155e-06,
449
+ "loss": 28.3321,
450
+ "step": 180,
451
+ "true_loss": 3.1337
452
+ },
453
+ {
454
+ "epoch": 2.9508196721311473,
455
+ "eval_accuracy": 0.10829493087557604,
456
+ "eval_loss": 3.9445700645446777,
457
+ "eval_runtime": 15.1698,
458
+ "eval_samples_per_second": 28.61,
459
+ "eval_steps_per_second": 3.626,
460
+ "step": 180
461
+ },
462
+ {
463
+ "epoch": 3.0327868852459017,
464
+ "grad_norm": 188.7733943154734,
465
+ "learning_rate": 3.4624145785876997e-06,
466
+ "loss": 27.27,
467
+ "step": 185,
468
+ "true_loss": 3.334
469
+ },
470
+ {
471
+ "epoch": 3.1147540983606556,
472
+ "grad_norm": 299.860020893839,
473
+ "learning_rate": 3.405466970387244e-06,
474
+ "loss": 23.8397,
475
+ "step": 190,
476
+ "true_loss": 2.6139
477
+ },
478
+ {
479
+ "epoch": 3.1147540983606556,
480
+ "eval_accuracy": 0.11059907834101383,
481
+ "eval_loss": 3.9798836708068848,
482
+ "eval_runtime": 15.4586,
483
+ "eval_samples_per_second": 28.075,
484
+ "eval_steps_per_second": 3.558,
485
+ "step": 190
486
+ },
487
+ {
488
+ "epoch": 3.19672131147541,
489
+ "grad_norm": 261.2265032273993,
490
+ "learning_rate": 3.3485193621867885e-06,
491
+ "loss": 24.7364,
492
+ "step": 195,
493
+ "true_loss": 3.0539
494
+ },
495
+ {
496
+ "epoch": 3.278688524590164,
497
+ "grad_norm": 244.13814588236283,
498
+ "learning_rate": 3.291571753986333e-06,
499
+ "loss": 25.6726,
500
+ "step": 200,
501
+ "true_loss": 3.0328
502
+ },
503
+ {
504
+ "epoch": 3.278688524590164,
505
+ "eval_accuracy": 0.11981566820276497,
506
+ "eval_loss": 3.931368350982666,
507
+ "eval_runtime": 14.9343,
508
+ "eval_samples_per_second": 29.061,
509
+ "eval_steps_per_second": 3.683,
510
+ "step": 200
511
+ },
512
+ {
513
+ "epoch": 3.360655737704918,
514
+ "grad_norm": 223.86848695043577,
515
+ "learning_rate": 3.2346241457858773e-06,
516
+ "loss": 24.6349,
517
+ "step": 205,
518
+ "true_loss": 2.9529
519
+ },
520
+ {
521
+ "epoch": 3.442622950819672,
522
+ "grad_norm": 239.830671087524,
523
+ "learning_rate": 3.1776765375854217e-06,
524
+ "loss": 24.5938,
525
+ "step": 210,
526
+ "true_loss": 3.3097
527
+ },
528
+ {
529
+ "epoch": 3.442622950819672,
530
+ "eval_accuracy": 0.1175115207373272,
531
+ "eval_loss": 3.9209704399108887,
532
+ "eval_runtime": 14.9774,
533
+ "eval_samples_per_second": 28.977,
534
+ "eval_steps_per_second": 3.672,
535
+ "step": 210
536
+ },
537
+ {
538
+ "epoch": 3.5245901639344264,
539
+ "grad_norm": 188.8971324623384,
540
+ "learning_rate": 3.120728929384966e-06,
541
+ "loss": 24.3125,
542
+ "step": 215,
543
+ "true_loss": 2.8892
544
+ },
545
+ {
546
+ "epoch": 3.6065573770491803,
547
+ "grad_norm": 211.46305164676468,
548
+ "learning_rate": 3.0637813211845106e-06,
549
+ "loss": 24.9786,
550
+ "step": 220,
551
+ "true_loss": 3.4745
552
+ },
553
+ {
554
+ "epoch": 3.6065573770491803,
555
+ "eval_accuracy": 0.11290322580645161,
556
+ "eval_loss": 3.892094373703003,
557
+ "eval_runtime": 15.0091,
558
+ "eval_samples_per_second": 28.916,
559
+ "eval_steps_per_second": 3.664,
560
+ "step": 220
561
+ },
562
+ {
563
+ "epoch": 3.6885245901639343,
564
+ "grad_norm": 209.57415125751984,
565
+ "learning_rate": 3.0068337129840546e-06,
566
+ "loss": 24.4776,
567
+ "step": 225,
568
+ "true_loss": 3.0013
569
+ },
570
+ {
571
+ "epoch": 3.7704918032786887,
572
+ "grad_norm": 219.2212947656317,
573
+ "learning_rate": 2.949886104783599e-06,
574
+ "loss": 25.0714,
575
+ "step": 230,
576
+ "true_loss": 3.3653
577
+ },
578
+ {
579
+ "epoch": 3.7704918032786887,
580
+ "eval_accuracy": 0.12211981566820276,
581
+ "eval_loss": 3.88911509513855,
582
+ "eval_runtime": 15.0058,
583
+ "eval_samples_per_second": 28.922,
584
+ "eval_steps_per_second": 3.665,
585
+ "step": 230
586
+ },
587
+ {
588
+ "epoch": 3.8524590163934427,
589
+ "grad_norm": 215.0136859889706,
590
+ "learning_rate": 2.892938496583144e-06,
591
+ "loss": 25.0681,
592
+ "step": 235,
593
+ "true_loss": 2.7688
594
+ },
595
+ {
596
+ "epoch": 3.9344262295081966,
597
+ "grad_norm": 212.6143565856701,
598
+ "learning_rate": 2.8359908883826882e-06,
599
+ "loss": 24.524,
600
+ "step": 240,
601
+ "true_loss": 3.0097
602
+ },
603
+ {
604
+ "epoch": 3.9344262295081966,
605
+ "eval_accuracy": 0.10599078341013825,
606
+ "eval_loss": 3.901205062866211,
607
+ "eval_runtime": 14.9742,
608
+ "eval_samples_per_second": 28.983,
609
+ "eval_steps_per_second": 3.673,
610
+ "step": 240
611
+ },
612
+ {
613
+ "epoch": 4.016393442622951,
614
+ "grad_norm": 203.1242362392191,
615
+ "learning_rate": 2.7790432801822326e-06,
616
+ "loss": 22.3977,
617
+ "step": 245,
618
+ "true_loss": 2.4811
619
+ },
620
+ {
621
+ "epoch": 4.098360655737705,
622
+ "grad_norm": 219.93955042504686,
623
+ "learning_rate": 2.722095671981777e-06,
624
+ "loss": 20.6891,
625
+ "step": 250,
626
+ "true_loss": 2.6231
627
+ },
628
+ {
629
+ "epoch": 4.098360655737705,
630
+ "eval_accuracy": 0.12903225806451613,
631
+ "eval_loss": 3.918625831604004,
632
+ "eval_runtime": 15.0247,
633
+ "eval_samples_per_second": 28.886,
634
+ "eval_steps_per_second": 3.661,
635
+ "step": 250
636
+ },
637
+ {
638
+ "epoch": 4.180327868852459,
639
+ "grad_norm": 269.83943945736723,
640
+ "learning_rate": 2.6651480637813215e-06,
641
+ "loss": 19.9576,
642
+ "step": 255,
643
+ "true_loss": 2.4069
644
+ },
645
+ {
646
+ "epoch": 4.262295081967213,
647
+ "grad_norm": 285.3088807670382,
648
+ "learning_rate": 2.608200455580866e-06,
649
+ "loss": 19.9079,
650
+ "step": 260,
651
+ "true_loss": 2.5739
652
+ },
653
+ {
654
+ "epoch": 4.262295081967213,
655
+ "eval_accuracy": 0.12672811059907835,
656
+ "eval_loss": 3.955899477005005,
657
+ "eval_runtime": 14.9722,
658
+ "eval_samples_per_second": 28.987,
659
+ "eval_steps_per_second": 3.673,
660
+ "step": 260
661
+ },
662
+ {
663
+ "epoch": 4.344262295081967,
664
+ "grad_norm": 235.65294642592144,
665
+ "learning_rate": 2.5512528473804103e-06,
666
+ "loss": 19.9913,
667
+ "step": 265,
668
+ "true_loss": 2.6661
669
+ },
670
+ {
671
+ "epoch": 4.426229508196721,
672
+ "grad_norm": 243.25746669728042,
673
+ "learning_rate": 2.4943052391799547e-06,
674
+ "loss": 21.5919,
675
+ "step": 270,
676
+ "true_loss": 2.931
677
+ },
678
+ {
679
+ "epoch": 4.426229508196721,
680
+ "eval_accuracy": 0.12672811059907835,
681
+ "eval_loss": 3.8867075443267822,
682
+ "eval_runtime": 14.9423,
683
+ "eval_samples_per_second": 29.045,
684
+ "eval_steps_per_second": 3.681,
685
+ "step": 270
686
+ },
687
+ {
688
+ "epoch": 4.508196721311475,
689
+ "grad_norm": 268.3143937900749,
690
+ "learning_rate": 2.437357630979499e-06,
691
+ "loss": 21.3721,
692
+ "step": 275,
693
+ "true_loss": 2.6628
694
+ },
695
+ {
696
+ "epoch": 4.590163934426229,
697
+ "grad_norm": 233.6702831549346,
698
+ "learning_rate": 2.3804100227790436e-06,
699
+ "loss": 21.651,
700
+ "step": 280,
701
+ "true_loss": 2.3591
702
+ },
703
+ {
704
+ "epoch": 4.590163934426229,
705
+ "eval_accuracy": 0.1359447004608295,
706
+ "eval_loss": 3.855499029159546,
707
+ "eval_runtime": 15.1421,
708
+ "eval_samples_per_second": 28.662,
709
+ "eval_steps_per_second": 3.632,
710
+ "step": 280
711
+ },
712
+ {
713
+ "epoch": 4.672131147540983,
714
+ "grad_norm": 219.0257023920148,
715
+ "learning_rate": 2.323462414578588e-06,
716
+ "loss": 21.5054,
717
+ "step": 285,
718
+ "true_loss": 2.5704
719
+ },
720
+ {
721
+ "epoch": 4.754098360655737,
722
+ "grad_norm": 287.08072005658994,
723
+ "learning_rate": 2.2665148063781324e-06,
724
+ "loss": 20.7173,
725
+ "step": 290,
726
+ "true_loss": 3.0756
727
+ },
728
+ {
729
+ "epoch": 4.754098360655737,
730
+ "eval_accuracy": 0.14055299539170507,
731
+ "eval_loss": 3.8604798316955566,
732
+ "eval_runtime": 15.1432,
733
+ "eval_samples_per_second": 28.66,
734
+ "eval_steps_per_second": 3.632,
735
+ "step": 290
736
+ },
737
+ {
738
+ "epoch": 4.836065573770492,
739
+ "grad_norm": 264.8569129204395,
740
+ "learning_rate": 2.209567198177677e-06,
741
+ "loss": 19.9181,
742
+ "step": 295,
743
+ "true_loss": 2.2299
744
+ },
745
+ {
746
+ "epoch": 4.918032786885246,
747
+ "grad_norm": 239.4181479000276,
748
+ "learning_rate": 2.1526195899772212e-06,
749
+ "loss": 22.0119,
750
+ "step": 300,
751
+ "true_loss": 3.0984
752
+ },
753
+ {
754
+ "epoch": 4.918032786885246,
755
+ "eval_accuracy": 0.1313364055299539,
756
+ "eval_loss": 3.848721742630005,
757
+ "eval_runtime": 15.1331,
758
+ "eval_samples_per_second": 28.679,
759
+ "eval_steps_per_second": 3.634,
760
+ "step": 300
761
+ },
762
+ {
763
+ "epoch": 5.0,
764
+ "grad_norm": 216.31247846676033,
765
+ "learning_rate": 2.0956719817767656e-06,
766
+ "loss": 20.7322,
767
+ "step": 305,
768
+ "true_loss": 2.2975
769
+ },
770
+ {
771
+ "epoch": 5.081967213114754,
772
+ "grad_norm": 266.0474875386491,
773
+ "learning_rate": 2.03872437357631e-06,
774
+ "loss": 17.2354,
775
+ "step": 310,
776
+ "true_loss": 2.0884
777
+ },
778
+ {
779
+ "epoch": 5.081967213114754,
780
+ "eval_accuracy": 0.1382488479262673,
781
+ "eval_loss": 3.835157871246338,
782
+ "eval_runtime": 15.1115,
783
+ "eval_samples_per_second": 28.72,
784
+ "eval_steps_per_second": 3.64,
785
+ "step": 310
786
+ },
787
+ {
788
+ "epoch": 5.163934426229508,
789
+ "grad_norm": 258.5339989822872,
790
+ "learning_rate": 1.9817767653758545e-06,
791
+ "loss": 17.442,
792
+ "step": 315,
793
+ "true_loss": 2.1051
794
+ },
795
+ {
796
+ "epoch": 5.245901639344262,
797
+ "grad_norm": 308.2469254205616,
798
+ "learning_rate": 1.924829157175399e-06,
799
+ "loss": 17.0414,
800
+ "step": 320,
801
+ "true_loss": 2.5328
802
+ },
803
+ {
804
+ "epoch": 5.245901639344262,
805
+ "eval_accuracy": 0.15207373271889402,
806
+ "eval_loss": 3.899200201034546,
807
+ "eval_runtime": 15.7217,
808
+ "eval_samples_per_second": 27.605,
809
+ "eval_steps_per_second": 3.498,
810
+ "step": 320
811
+ },
812
+ {
813
+ "epoch": 5.327868852459017,
814
+ "grad_norm": 297.7046229142417,
815
+ "learning_rate": 1.8678815489749433e-06,
816
+ "loss": 16.4162,
817
+ "step": 325,
818
+ "true_loss": 2.2139
819
+ },
820
+ {
821
+ "epoch": 5.409836065573771,
822
+ "grad_norm": 262.17760383527263,
823
+ "learning_rate": 1.8109339407744877e-06,
824
+ "loss": 17.9956,
825
+ "step": 330,
826
+ "true_loss": 2.3699
827
+ },
828
+ {
829
+ "epoch": 5.409836065573771,
830
+ "eval_accuracy": 0.15668202764976957,
831
+ "eval_loss": 3.869380235671997,
832
+ "eval_runtime": 14.9258,
833
+ "eval_samples_per_second": 29.077,
834
+ "eval_steps_per_second": 3.685,
835
+ "step": 330
836
+ },
837
+ {
838
+ "epoch": 5.491803278688525,
839
+ "grad_norm": 267.855545172113,
840
+ "learning_rate": 1.753986332574032e-06,
841
+ "loss": 18.2262,
842
+ "step": 335,
843
+ "true_loss": 2.1083
844
+ },
845
+ {
846
+ "epoch": 5.573770491803279,
847
+ "grad_norm": 263.032871409478,
848
+ "learning_rate": 1.6970387243735763e-06,
849
+ "loss": 17.7507,
850
+ "step": 340,
851
+ "true_loss": 2.0572
852
+ },
853
+ {
854
+ "epoch": 5.573770491803279,
855
+ "eval_accuracy": 0.14516129032258066,
856
+ "eval_loss": 3.860694646835327,
857
+ "eval_runtime": 14.9606,
858
+ "eval_samples_per_second": 29.01,
859
+ "eval_steps_per_second": 3.676,
860
+ "step": 340
861
+ },
862
+ {
863
+ "epoch": 5.655737704918033,
864
+ "grad_norm": 251.13017134627248,
865
+ "learning_rate": 1.640091116173121e-06,
866
+ "loss": 18.9093,
867
+ "step": 345,
868
+ "true_loss": 2.552
869
+ },
870
+ {
871
+ "epoch": 5.737704918032787,
872
+ "grad_norm": 283.50724134889464,
873
+ "learning_rate": 1.5831435079726654e-06,
874
+ "loss": 17.0755,
875
+ "step": 350,
876
+ "true_loss": 2.621
877
+ },
878
+ {
879
+ "epoch": 5.737704918032787,
880
+ "eval_accuracy": 0.15668202764976957,
881
+ "eval_loss": 3.821519613265991,
882
+ "eval_runtime": 15.0984,
883
+ "eval_samples_per_second": 28.745,
884
+ "eval_steps_per_second": 3.643,
885
+ "step": 350
886
+ },
887
+ {
888
+ "epoch": 5.8196721311475414,
889
+ "grad_norm": 751.7678054126261,
890
+ "learning_rate": 1.5261958997722096e-06,
891
+ "loss": 18.1668,
892
+ "step": 355,
893
+ "true_loss": 1.9259
894
+ },
895
+ {
896
+ "epoch": 5.901639344262295,
897
+ "grad_norm": 318.53273858625954,
898
+ "learning_rate": 1.469248291571754e-06,
899
+ "loss": 17.0525,
900
+ "step": 360,
901
+ "true_loss": 2.2937
902
+ },
903
+ {
904
+ "epoch": 5.901639344262295,
905
+ "eval_accuracy": 0.1497695852534562,
906
+ "eval_loss": 3.812807083129883,
907
+ "eval_runtime": 15.0494,
908
+ "eval_samples_per_second": 28.838,
909
+ "eval_steps_per_second": 3.655,
910
+ "step": 360
911
+ },
912
+ {
913
+ "epoch": 5.983606557377049,
914
+ "grad_norm": 259.86569786349736,
915
+ "learning_rate": 1.4123006833712984e-06,
916
+ "loss": 16.5851,
917
+ "step": 365,
918
+ "true_loss": 1.926
919
+ },
920
+ {
921
+ "epoch": 6.065573770491803,
922
+ "grad_norm": 236.16491616521617,
923
+ "learning_rate": 1.355353075170843e-06,
924
+ "loss": 14.0549,
925
+ "step": 370,
926
+ "true_loss": 1.6638
927
+ },
928
+ {
929
+ "epoch": 6.065573770491803,
930
+ "eval_accuracy": 0.14516129032258066,
931
+ "eval_loss": 3.8400564193725586,
932
+ "eval_runtime": 15.0501,
933
+ "eval_samples_per_second": 28.837,
934
+ "eval_steps_per_second": 3.654,
935
+ "step": 370
936
+ },
937
+ {
938
+ "epoch": 6.147540983606557,
939
+ "grad_norm": 267.4679343061016,
940
+ "learning_rate": 1.2984054669703875e-06,
941
+ "loss": 13.1733,
942
+ "step": 375,
943
+ "true_loss": 1.4947
944
+ },
945
+ {
946
+ "epoch": 6.229508196721311,
947
+ "grad_norm": 303.4138473047156,
948
+ "learning_rate": 1.2414578587699317e-06,
949
+ "loss": 14.2548,
950
+ "step": 380,
951
+ "true_loss": 1.8772
952
+ },
953
+ {
954
+ "epoch": 6.229508196721311,
955
+ "eval_accuracy": 0.14516129032258066,
956
+ "eval_loss": 3.897352933883667,
957
+ "eval_runtime": 15.0678,
958
+ "eval_samples_per_second": 28.803,
959
+ "eval_steps_per_second": 3.65,
960
+ "step": 380
961
+ },
962
+ {
963
+ "epoch": 6.311475409836065,
964
+ "grad_norm": 304.09080880251156,
965
+ "learning_rate": 1.1845102505694763e-06,
966
+ "loss": 14.1155,
967
+ "step": 385,
968
+ "true_loss": 1.4
969
+ },
970
+ {
971
+ "epoch": 6.39344262295082,
972
+ "grad_norm": 328.52917098962524,
973
+ "learning_rate": 1.1275626423690205e-06,
974
+ "loss": 14.0041,
975
+ "step": 390,
976
+ "true_loss": 1.5224
977
+ },
978
+ {
979
+ "epoch": 6.39344262295082,
980
+ "eval_accuracy": 0.1313364055299539,
981
+ "eval_loss": 3.8768956661224365,
982
+ "eval_runtime": 14.9485,
983
+ "eval_samples_per_second": 29.033,
984
+ "eval_steps_per_second": 3.679,
985
+ "step": 390
986
+ },
987
+ {
988
+ "epoch": 6.475409836065574,
989
+ "grad_norm": 374.87828963261063,
990
+ "learning_rate": 1.0706150341685651e-06,
991
+ "loss": 13.4204,
992
+ "step": 395,
993
+ "true_loss": 1.7657
994
+ },
995
+ {
996
+ "epoch": 6.557377049180328,
997
+ "grad_norm": 290.7307487199588,
998
+ "learning_rate": 1.0136674259681093e-06,
999
+ "loss": 14.282,
1000
+ "step": 400,
1001
+ "true_loss": 1.8787
1002
+ },
1003
+ {
1004
+ "epoch": 6.557377049180328,
1005
+ "eval_accuracy": 0.14285714285714285,
1006
+ "eval_loss": 3.83672833442688,
1007
+ "eval_runtime": 15.0933,
1008
+ "eval_samples_per_second": 28.755,
1009
+ "eval_steps_per_second": 3.644,
1010
+ "step": 400
1011
+ },
1012
+ {
1013
+ "epoch": 6.639344262295082,
1014
+ "grad_norm": 293.9907784197036,
1015
+ "learning_rate": 9.567198177676538e-07,
1016
+ "loss": 13.0058,
1017
+ "step": 405,
1018
+ "true_loss": 1.9678
1019
+ },
1020
+ {
1021
+ "epoch": 6.721311475409836,
1022
+ "grad_norm": 329.0206706011657,
1023
+ "learning_rate": 8.997722095671982e-07,
1024
+ "loss": 15.0129,
1025
+ "step": 410,
1026
+ "true_loss": 1.8787
1027
+ },
1028
+ {
1029
+ "epoch": 6.721311475409836,
1030
+ "eval_accuracy": 0.15668202764976957,
1031
+ "eval_loss": 3.854370594024658,
1032
+ "eval_runtime": 15.0162,
1033
+ "eval_samples_per_second": 28.902,
1034
+ "eval_steps_per_second": 3.663,
1035
+ "step": 410
1036
+ },
1037
+ {
1038
+ "epoch": 6.80327868852459,
1039
+ "grad_norm": 435.30888223467264,
1040
+ "learning_rate": 8.428246013667427e-07,
1041
+ "loss": 14.3213,
1042
+ "step": 415,
1043
+ "true_loss": 2.0839
1044
+ },
1045
+ {
1046
+ "epoch": 6.885245901639344,
1047
+ "grad_norm": 283.4882127418948,
1048
+ "learning_rate": 7.858769931662871e-07,
1049
+ "loss": 13.6361,
1050
+ "step": 420,
1051
+ "true_loss": 1.5802
1052
+ },
1053
+ {
1054
+ "epoch": 6.885245901639344,
1055
+ "eval_accuracy": 0.1497695852534562,
1056
+ "eval_loss": 3.839586019515991,
1057
+ "eval_runtime": 14.9583,
1058
+ "eval_samples_per_second": 29.014,
1059
+ "eval_steps_per_second": 3.677,
1060
+ "step": 420
1061
+ },
1062
+ {
1063
+ "epoch": 6.967213114754099,
1064
+ "grad_norm": 353.6661268712156,
1065
+ "learning_rate": 7.289293849658314e-07,
1066
+ "loss": 15.2028,
1067
+ "step": 425,
1068
+ "true_loss": 1.7352
1069
+ },
1070
+ {
1071
+ "epoch": 7.049180327868853,
1072
+ "grad_norm": 273.2552710608739,
1073
+ "learning_rate": 6.71981776765376e-07,
1074
+ "loss": 12.2664,
1075
+ "step": 430,
1076
+ "true_loss": 1.4752
1077
+ },
1078
+ {
1079
+ "epoch": 7.049180327868853,
1080
+ "eval_accuracy": 0.14746543778801843,
1081
+ "eval_loss": 3.839120388031006,
1082
+ "eval_runtime": 15.1557,
1083
+ "eval_samples_per_second": 28.636,
1084
+ "eval_steps_per_second": 3.629,
1085
+ "step": 430
1086
+ },
1087
+ {
1088
+ "epoch": 7.131147540983607,
1089
+ "grad_norm": 278.2724305429665,
1090
+ "learning_rate": 6.150341685649204e-07,
1091
+ "loss": 10.7664,
1092
+ "step": 435,
1093
+ "true_loss": 1.3734
1094
+ },
1095
+ {
1096
+ "epoch": 7.213114754098361,
1097
+ "grad_norm": 316.474377641746,
1098
+ "learning_rate": 5.580865603644648e-07,
1099
+ "loss": 10.5371,
1100
+ "step": 440,
1101
+ "true_loss": 1.6166
1102
+ },
1103
+ {
1104
+ "epoch": 7.213114754098361,
1105
+ "eval_accuracy": 0.16359447004608296,
1106
+ "eval_loss": 3.866461753845215,
1107
+ "eval_runtime": 14.9602,
1108
+ "eval_samples_per_second": 29.01,
1109
+ "eval_steps_per_second": 3.676,
1110
+ "step": 440
1111
+ }
1112
+ ],
1113
+ "logging_steps": 5,
1114
+ "max_steps": 488,
1115
+ "num_input_tokens_seen": 0,
1116
+ "num_train_epochs": 8,
1117
+ "save_steps": 10,
1118
+ "stateful_callbacks": {
1119
+ "TrainerControl": {
1120
+ "args": {
1121
+ "should_epoch_stop": false,
1122
+ "should_evaluate": false,
1123
+ "should_log": false,
1124
+ "should_save": true,
1125
+ "should_training_stop": false
1126
+ },
1127
+ "attributes": {}
1128
+ }
1129
+ },
1130
+ "total_flos": 0.0,
1131
+ "train_batch_size": 1,
1132
+ "trial_name": null,
1133
+ "trial_params": null
1134
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd25a69663dfac6f6868d122593f03b846718e390a0c72360a1c186d8e0bb2af
3
+ size 7249
vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)