farpluto commited on
Commit
92c8f07
·
verified ·
1 Parent(s): 43a1adf

Upload 124M GPT with symbolic reasoning distillation

Browse files
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language: [en]
4
+ tags: [text-generation, gpt2, knowledge-distillation, symbolic-reasoning, chain-of-thought, from-scratch]
5
+ datasets: [HuggingFaceFW/fineweb-edu, openai/gsm8k]
6
+ pipeline_tag: text-generation
7
+ ---
8
+ # 124M GPT with Symbolic Reasoning Distillation
9
+
10
+ Trained **from scratch** on mixed data with **dual-alpha distillation**:
11
+
12
+ | Stream | Dataset | Alpha | Purpose |
13
+ |--------|---------|-------|---------|
14
+ | General | FineWeb-Edu | 0.2 | Language modeling, light teacher guidance |
15
+ | **Reasoning** | **GSM8K chain-of-thought** | **0.8** | **Heavy distillation: teacher guides step-by-step math reasoning** |
16
+
17
+ - **Teacher**: SmolLM-135M-Instruct (frozen)
18
+ - **Time**: ~75 min on 1x A100
19
+ - **Tokens**: 327,680,000 (0 reasoning / 20,000 general batches)
20
+ - **Best loss**: 186.6474
chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "add_cross_attention": false,
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.0,
8
+ "bos_token_id": 1,
9
+ "dtype": "float32",
10
+ "embd_pdrop": 0.0,
11
+ "eos_token_id": 2,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 512,
20
+ "pad_token_id": null,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.0,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "tie_word_embeddings": true,
31
+ "transformers_version": "5.0.0",
32
+ "use_cache": true,
33
+ "vocab_size": 49152
34
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "output_attentions": false,
6
+ "output_hidden_states": false,
7
+ "transformers_version": "5.0.0",
8
+ "use_cache": true
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80740b3aefcec895c40c43103c41873c4b57f8cf76dbe85beac6487400bcfb78
3
+ size 492806784
raw_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5168c69e9f8a5df951d0e6900b3a30627680de7abe822a4ee2547d7c72029019
3
+ size 492414052
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|im_start|>",
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|im_end|>",
7
+ "extra_special_tokens": [
8
+ "<|im_start|>",
9
+ "<|im_end|>"
10
+ ],
11
+ "is_local": false,
12
+ "model_max_length": 1000000,
13
+ "pad_token": "<|im_end|>",
14
+ "tokenizer_class": "TokenizersBackend",
15
+ "unk_token": "<|endoftext|>",
16
+ "vocab_size": 49152
17
+ }
training_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "teacher": "HuggingFaceTB/SmolLM-135M-Instruct",
3
+ "general_data": "HuggingFaceFW/fineweb-edu/sample-10BT",
4
+ "reasoning_data": "openai/gsm8k",
5
+ "alpha_general": 0.2,
6
+ "alpha_reasoning": 0.8,
7
+ "reasoning_ratio": 0.25,
8
+ "steps": 5000,
9
+ "tokens": 327680000,
10
+ "best_loss": 186.64740371704102,
11
+ "reasoning_batches": 0,
12
+ "general_batches": 20000
13
+ }