Charlie81 commited on
Commit
799b295
·
2 Parent(s): d4a6b93 5aaf9d9

Merge branch 'main' of https://huggingface.co/Charlie81/LoRE

Browse files
checkpoints/checkpoint-21/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MyOlmoeForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "clip_qkv": null,
8
+ "eos_token_id": 50279,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 2048,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1024,
13
+ "max_position_embeddings": 4096,
14
+ "max_small_expert_count": 64,
15
+ "model_type": "olmoe",
16
+ "norm_topk_prob": false,
17
+ "num_attention_heads": 16,
18
+ "num_experts": 64,
19
+ "num_experts_per_tok": 2,
20
+ "num_hidden_layers": 16,
21
+ "num_key_value_heads": 16,
22
+ "num_small_experts": 0,
23
+ "output_router_logits": false,
24
+ "pad_token_id": 1,
25
+ "rms_norm_eps": 1e-05,
26
+ "rope_scaling": null,
27
+ "rope_theta": 10000.0,
28
+ "router_aux_loss_coef": 0.01,
29
+ "small_expert_count": 64,
30
+ "small_expert_intermediate_ratio": 16,
31
+ "small_expert_intermediate_size": 0,
32
+ "small_expert_sparsity_coef": 0.1,
33
+ "small_expert_strategy": "constant",
34
+ "tie_word_embeddings": false,
35
+ "torch_dtype": "bfloat16",
36
+ "transformers_version": "4.53.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50304
39
+ }
checkpoints/checkpoint-21/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 50279,
4
+ "pad_token_id": 1,
5
+ "transformers_version": "4.53.1"
6
+ }
checkpoints/checkpoint-21/model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a1b9651bad1a045a178e22cf198d5070c94d4374f6331086e38801fe8d88ca3
3
+ size 4997482624
checkpoints/checkpoint-21/model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72ab84fe9b48e6271dc2bfe0eb95fd06e858a7ab827ff54c8ccd46ca0d28ae12
3
+ size 4999439616
checkpoints/checkpoint-21/model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feaf170d726130b27a9b4631d84d0d1cd1ddedc6cb2e23a808f058ef0a7cb6af
3
+ size 3892418912
checkpoints/checkpoint-21/model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-21/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6edd3bec65a60cdf2a0818662c2eb24c666b8362f01d273395d740f2985e70e1
3
+ size 101356346
checkpoints/checkpoint-21/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b66e3cc7c452b707ddac5caf0aa17618afb9bc1a0333600a22c4afb353f3165
3
+ size 14244
checkpoints/checkpoint-21/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5638497377bddef6f6aaa8c2376c812451579d27fc6d2f46023c4e4a5b884ade
3
+ size 1064
checkpoints/checkpoint-21/trainer_state.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.001030188193307456,
6
+ "eval_steps": 500,
7
+ "global_step": 21,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0004905658063368838,
14
+ "grad_norm": 109.5,
15
+ "learning_rate": 7.500000000000001e-05,
16
+ "loss": 89.2631,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.0009811316126737675,
21
+ "grad_norm": 189.0,
22
+ "learning_rate": 3.0153689607045845e-06,
23
+ "loss": 64.0054,
24
+ "step": 20
25
+ }
26
+ ],
27
+ "logging_steps": 10,
28
+ "max_steps": 21,
29
+ "num_input_tokens_seen": 0,
30
+ "num_train_epochs": 1,
31
+ "save_steps": 2000,
32
+ "stateful_callbacks": {
33
+ "TrainerControl": {
34
+ "args": {
35
+ "should_epoch_stop": false,
36
+ "should_evaluate": false,
37
+ "should_log": false,
38
+ "should_save": true,
39
+ "should_training_stop": true
40
+ },
41
+ "attributes": {}
42
+ }
43
+ },
44
+ "total_flos": 5.649340538290176e+16,
45
+ "train_batch_size": 2,
46
+ "trial_name": null,
47
+ "trial_params": null
48
+ }
checkpoints/checkpoint-21/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ce7ae413f0631f142db7509afe4ffe786400454c38e283669197f678a70ca79
3
+ size 5304
checkpoints/checkpoint-22/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MyOlmoeForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "clip_qkv": null,
8
+ "eos_token_id": 50279,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 2048,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1024,
13
+ "max_position_embeddings": 4096,
14
+ "max_small_expert_count": 64,
15
+ "model_type": "olmoe",
16
+ "norm_topk_prob": false,
17
+ "num_attention_heads": 16,
18
+ "num_experts": 64,
19
+ "num_experts_per_tok": 2,
20
+ "num_hidden_layers": 16,
21
+ "num_key_value_heads": 16,
22
+ "num_small_experts": 0,
23
+ "output_router_logits": false,
24
+ "pad_token_id": 1,
25
+ "rms_norm_eps": 1e-05,
26
+ "rope_scaling": null,
27
+ "rope_theta": 10000.0,
28
+ "router_aux_loss_coef": 0.01,
29
+ "small_expert_count": 64,
30
+ "small_expert_intermediate_ratio": 16,
31
+ "small_expert_intermediate_size": 0,
32
+ "small_expert_sparsity_coef": 0.1,
33
+ "small_expert_strategy": "constant",
34
+ "tie_word_embeddings": false,
35
+ "torch_dtype": "bfloat16",
36
+ "transformers_version": "4.53.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50304
39
+ }
checkpoints/checkpoint-22/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 50279,
4
+ "pad_token_id": 1,
5
+ "transformers_version": "4.53.1"
6
+ }
checkpoints/checkpoint-22/model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a1b9651bad1a045a178e22cf198d5070c94d4374f6331086e38801fe8d88ca3
3
+ size 4997482624
checkpoints/checkpoint-22/model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72ab84fe9b48e6271dc2bfe0eb95fd06e858a7ab827ff54c8ccd46ca0d28ae12
3
+ size 4999439616
checkpoints/checkpoint-22/model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feaf170d726130b27a9b4631d84d0d1cd1ddedc6cb2e23a808f058ef0a7cb6af
3
+ size 3892418912
checkpoints/checkpoint-22/model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/checkpoint-22/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d5f704458736a5e920d76d2161498155adb8d57fe6813654a715c0b994dc41f
3
+ size 101356346
checkpoints/checkpoint-22/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b66e3cc7c452b707ddac5caf0aa17618afb9bc1a0333600a22c4afb353f3165
3
+ size 14244
checkpoints/checkpoint-22/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f57272cb87e2608835989af1f900831f7cb1d5dd0791977cc10a2cf1a0007e45
3
+ size 1064
checkpoints/checkpoint-22/trainer_state.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.0010792447739411443,
6
+ "eval_steps": 500,
7
+ "global_step": 22,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0004905658063368838,
14
+ "grad_norm": 109.5,
15
+ "learning_rate": 7.500000000000001e-05,
16
+ "loss": 89.2631,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.0009811316126737675,
21
+ "grad_norm": 189.0,
22
+ "learning_rate": 3.0153689607045845e-06,
23
+ "loss": 64.0054,
24
+ "step": 20
25
+ }
26
+ ],
27
+ "logging_steps": 10,
28
+ "max_steps": 21,
29
+ "num_input_tokens_seen": 0,
30
+ "num_train_epochs": 1,
31
+ "save_steps": 2000,
32
+ "stateful_callbacks": {
33
+ "TrainerControl": {
34
+ "args": {
35
+ "should_epoch_stop": false,
36
+ "should_evaluate": false,
37
+ "should_log": false,
38
+ "should_save": true,
39
+ "should_training_stop": true
40
+ },
41
+ "attributes": {}
42
+ }
43
+ },
44
+ "total_flos": 5.918356754399232e+16,
45
+ "train_batch_size": 2,
46
+ "trial_name": null,
47
+ "trial_params": null
48
+ }
checkpoints/checkpoint-22/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ce7ae413f0631f142db7509afe4ffe786400454c38e283669197f678a70ca79
3
+ size 5304
final_model/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MyOlmoeForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "clip_qkv": null,
8
+ "eos_token_id": 50279,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 2048,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1024,
13
+ "max_position_embeddings": 4096,
14
+ "max_small_expert_count": 64,
15
+ "model_type": "olmoe",
16
+ "norm_topk_prob": false,
17
+ "num_attention_heads": 16,
18
+ "num_experts": 64,
19
+ "num_experts_per_tok": 2,
20
+ "num_hidden_layers": 16,
21
+ "num_key_value_heads": 16,
22
+ "num_small_experts": 0,
23
+ "output_router_logits": false,
24
+ "pad_token_id": 1,
25
+ "rms_norm_eps": 1e-05,
26
+ "rope_scaling": null,
27
+ "rope_theta": 10000.0,
28
+ "router_aux_loss_coef": 0.01,
29
+ "small_expert_count": 64,
30
+ "small_expert_intermediate_ratio": 16,
31
+ "small_expert_intermediate_size": 0,
32
+ "small_expert_sparsity_coef": 0.1,
33
+ "small_expert_strategy": "constant",
34
+ "tie_word_embeddings": false,
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.53.1",
37
+ "use_cache": true,
38
+ "vocab_size": 50304
39
+ }
final_model/small_experts_and_gates.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65d2b340a7a0573d8b8ec06ce435f04e5da418f2fefa8c4a3cbc4a3dc548fd32
3
+ size 50670946
logs/events.out.tfevents.1752339720.97cc4a26e6e3.3754.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4739d38270ac1fedc65627fa89a0f6639cd4b11cd8c2f6b406bd4b13fe6b585
3
- size 5713
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e58561b2530f5523f881c3b5961920a5f2f272bec4ec35673f4d0dcc0eb3fda4
3
+ size 285
logs/events.out.tfevents.1752340447.97cc4a26e6e3.8782.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c1c7489eb4ddc7051e46a4f86a34212595f9fbbd162375de3fa0a572aebdf2f
3
+ size 5299