Datasets:

Modalities:
Text
Formats:
text
Languages:
English
Size:
< 1K
Libraries:
Datasets
loubnabnl HF Staff commited on
Commit
3db9585
·
verified ·
1 Parent(s): fee6955

Create throughput_debugging/nn1_nanosets_32k_steps.yaml

Browse files
throughput_debugging/nn1_nanosets_32k_steps.yaml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nanotron_config": {
3
+ "value": {
4
+ "checkpoints": {
5
+ "checkpoint_interval": 2000,
6
+ "checkpoints_path": "/scratch/loubna/checkpoints/smollm3-ablations-3p56G-3B-nanotron-old-nn1-fwedu-seed-6-32k",
7
+ "checkpoints_path_is_shared_file_system": false,
8
+ "resume_checkpoint_path": null,
9
+ "save_initial_state": false
10
+ },
11
+ "data_stages": [
12
+ {
13
+ "data": {
14
+ "dataset": {
15
+ "dataset_folder": [
16
+ "/fsx/loubna/datasets/llama_tokenized/fineweb-edu/merged"
17
+ ]
18
+ },
19
+ "num_loading_workers": 1,
20
+ "seed": 6
21
+ },
22
+ "name": "stable",
23
+ "start_training_step": 1
24
+ }
25
+ ],
26
+ "general": {
27
+ "benchmark_csv_path": null,
28
+ "consumed_train_samples": null,
29
+ "ignore_sanity_checks": true,
30
+ "project": "smollm3-training",
31
+ "run": "3B-nanotron-old-nn1-fwedu-32k",
32
+ "seed": 6,
33
+ "step": null
34
+ },
35
+ "lighteval": null,
36
+ "logging": {
37
+ "iteration_step_info_interval": 1,
38
+ "log_level": "info",
39
+ "log_level_replica": "info"
40
+ },
41
+ "model": {
42
+ "ddp_bucket_cap_mb": 128,
43
+ "dtype": "bfloat16",
44
+ "init_method": {
45
+ "std": 0.02
46
+ },
47
+ "make_vocab_size_divisible_by": 1,
48
+ "model_config": {
49
+ "bos_token_id": 128000,
50
+ "eos_token_id": 128001,
51
+ "hidden_act": "silu",
52
+ "hidden_size": 2048,
53
+ "initializer_range": 0.02,
54
+ "intermediate_size": 11008,
55
+ "is_llama_config": true,
56
+ "max_position_embeddings": 4096,
57
+ "num_attention_heads": 16,
58
+ "num_hidden_layers": 36,
59
+ "num_key_value_heads": 4,
60
+ "pad_token_id": null,
61
+ "pretraining_tp": 2,
62
+ "rms_norm_eps": 0.000001,
63
+ "rope_scaling": null,
64
+ "rope_theta": 10000,
65
+ "tie_word_embeddings": true,
66
+ "use_cache": true,
67
+ "vocab_size": 128256
68
+ }
69
+ },
70
+ "optimizer": {
71
+ "accumulate_grad_in_fp32": true,
72
+ "clip_grad": 1,
73
+ "learning_rate_scheduler": {
74
+ "learning_rate": 0.0002,
75
+ "lr_decay_starting_step": 26000,
76
+ "lr_decay_steps": 6000,
77
+ "lr_decay_style": "linear",
78
+ "lr_warmup_steps": 2000,
79
+ "lr_warmup_style": "linear",
80
+ "min_decay_lr": 0
81
+ },
82
+ "optimizer_factory": {
83
+ "adam_beta1": 0.9,
84
+ "adam_beta2": 0.95,
85
+ "adam_eps": 1e-8,
86
+ "name": "adamW",
87
+ "torch_adam_is_fused": true
88
+ },
89
+ "weight_decay": 0.01,
90
+ "zero_stage": 0
91
+ },
92
+ "parallelism": {
93
+ "dp": 4,
94
+ "expert_parallel_size": 1,
95
+ "pp": 1,
96
+ "pp_engine": "1f1b",
97
+ "recompute_layer": false,
98
+ "tp": 2,
99
+ "tp_linear_async_communication": true,
100
+ "tp_mode": "REDUCE_SCATTER"
101
+ },
102
+ "profiler": null,
103
+ "tokenizer": {
104
+ "tokenizer_max_length": 4096,
105
+ "tokenizer_name_or_path": "meta-llama/Llama-3.2-1B",
106
+ "tokenizer_revision": null
107
+ },
108
+ "tokens": {
109
+ "batch_accumulation_per_replica": 1,
110
+ "limit_test_batches": 0,
111
+ "limit_val_batches": 0,
112
+ "micro_batch_size": 3,
113
+ "sequence_length": 4096,
114
+ "train_steps": 32000,
115
+ "val_check_interval": 100
116
+ }
117
+ }
118
+ }
119
+ }