duoduoyeah commited on
Commit
965babe
·
verified ·
1 Parent(s): ddb3577

Add files using upload-large-folder tool

Browse files
pdlm_depth4_bs4_pr1_ratio40_causal/base_checkpoints/d4/meta_000416.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "step": 416,
3
+ "val_bpb": 1.0457755234725363,
4
+ "model_config": {
5
+ "sequence_len": 1024,
6
+ "pure_vocab_size": 4096,
7
+ "all_vocab_size": 4917,
8
+ "n_layer": 4,
9
+ "n_head": 2,
10
+ "n_kv_head": 2,
11
+ "n_embd": 256,
12
+ "prefix_pure_tokens": 1,
13
+ "mask_token_id": 4096
14
+ },
15
+ "user_config": {
16
+ "run": "pdlm_depth4_bs4_pr1_ratio40_causal",
17
+ "device_type": "",
18
+ "depth": 4,
19
+ "max_seq_len": 1024,
20
+ "block_size": 4,
21
+ "prefix_pure_tokens": 1,
22
+ "is_causal": true,
23
+ "noise_total_steps": 16,
24
+ "num_iterations": -1,
25
+ "target_flops": -1.0,
26
+ "target_param_data_ratio": 40,
27
+ "device_batch_size": 64,
28
+ "total_batch_size": 524288,
29
+ "embedding_lr": 0.2,
30
+ "unembedding_lr": 0.004,
31
+ "weight_decay": 0.0,
32
+ "matrix_lr": 0.02,
33
+ "grad_clip": 1.0,
34
+ "warmup_ratio": 0.0,
35
+ "warmdown_ratio": 0.2,
36
+ "final_lr_frac": 0.0,
37
+ "resume_from_step": -1,
38
+ "eval_every": 99999,
39
+ "eval_tokens": 10485760,
40
+ "core_metric_every": -1,
41
+ "core_metric_max_per_task": 500,
42
+ "sample_every": 2000,
43
+ "save_every": -1,
44
+ "model_tag": ""
45
+ },
46
+ "device_batch_size": 64,
47
+ "max_seq_len": 1024,
48
+ "dataloader_state_dict": {
49
+ "pq_idx": 3,
50
+ "rg_idx": 129
51
+ },
52
+ "loop_state": {
53
+ "min_val_bpb": 1.0457755234725363,
54
+ "smooth_train_loss": 1.788696900282726,
55
+ "total_training_time": 310.5307412147522
56
+ }
57
+ }
pdlm_depth4_bs4_pr1_ratio40_causal/base_checkpoints/d4/model_000416.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d909d5fc8c0cd173523c8dd3415c2f7fbd6435feb26af0d0b7eac623d4125a3
3
+ size 19305717
pdlm_depth4_bs4_pr1_ratio40_causal/base_checkpoints/d4/optim_000416_rank0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45a7bd54f7ad72f32b7805f76e30529f5cc71976992623c7d3e8d08a93e599a7
3
+ size 26017109
pdlm_depth4_bs4_pr1_ratio40_causal/report/base-model-training.md ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Base model training
2
+ timestamp: 2025-12-29 17:44:37
3
+
4
+ - run: pdlm_depth4_bs4_pr1_ratio40_causal
5
+ - device_type:
6
+ - depth: 4
7
+ - max_seq_len: 1024
8
+ - block_size: 4
9
+ - prefix_pure_tokens: 1
10
+ - is_causal: True
11
+ - noise_total_steps: 16
12
+ - num_iterations: -1
13
+ - target_flops: -1.0000
14
+ - target_param_data_ratio: 40
15
+ - device_batch_size: 64
16
+ - total_batch_size: 524,288
17
+ - embedding_lr: 0.2000
18
+ - unembedding_lr: 0.0040
19
+ - weight_decay: 0.0000
20
+ - matrix_lr: 0.0200
21
+ - grad_clip: 1.0000
22
+ - warmup_ratio: 0.0000
23
+ - warmdown_ratio: 0.2000
24
+ - final_lr_frac: 0.0000
25
+ - resume_from_step: -1
26
+ - eval_every: 99,999
27
+ - eval_tokens: 10,485,760
28
+ - core_metric_every: -1
29
+ - core_metric_max_per_task: 500
30
+ - sample_every: 2000
31
+ - save_every: -1
32
+ - model_tag:
33
+ - Number of parameters: 5,453,056
34
+ - Number of FLOPs per token: 3.774874e+07
35
+ - Calculated number of iterations: 416
36
+ - Number of training tokens: 218,103,808
37
+ - Tokens : Params ratio: 39.9966
38
+ - DDP world size: 1
39
+ - warmup_ratio: 0.0000
40
+ - warmdown_ratio: 0.2000
41
+ - final_lr_frac: 0.0000
42
+ - Minimum validation bpb: 1.0458
43
+ - Final validation bpb: 1.0458
44
+ - CORE metric estimate: None
45
+ - MFU %: 2.62%
46
+ - Total training flops: 8.233143e+15
47
+ - Total training time: 5.18m
48
+ - Peak memory usage: 6905.37MiB
49
+
pdlm_depth4_bs4_pr1_ratio40_causal/tokenizer/token_bytes.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c8b99c0d5a1b87b87118e840f69510440302023cd514b241614fb562373d7ce
3
+ size 17961
pdlm_depth4_bs4_pr1_ratio40_causal/tokenizer/token_maps.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e018730860c8e77e8cbba4b57ac9c7ba6798b5926dce925743959b932a099964
3
+ size 1850237
pdlm_depth4_bs4_pr1_ratio40_causal/tokenizer/tokenizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f874c4250ec76e2e8c4f97e91c55cfdf74d9f8eedaae14cd22db36bb718ee19
3
+ size 61662