VQLM / gpt-ts-d1 /meta_002500.json
ASzecsenyi's picture
Upload gpt-ts-d1/meta_002500.json with huggingface_hub
b5f2597 verified
{
"step": 2500,
"val_bpb": 1.3736497678831758,
"model_config": {
"sequence_len": 2048,
"vocab_size": 265,
"n_layer": 1,
"n_head": 16,
"n_kv_head": 16,
"n_embd": 1024,
"adaptive_decay": false,
"rope": true,
"repetition_count": 1,
"mamba_layers": [],
"recurrent_vocab_layers": [],
"gauss_layers": [],
"attention_time_decay": false,
"rva_viz": false
},
"user_config": {
"run": "dummy",
"device_type": "",
"depth": 1,
"repetition_count": 1,
"recurrent_vocab_layers": [],
"mamba_layers": [],
"gauss_layers": [],
"attention_time_decay": false,
"adaptive_decay": false,
"rope": true,
"init_from_tag": "",
"rva_viz": false,
"resume_from_tag": "",
"resume_from_step": -1,
"resume_from_hub": true,
"resume_hub_repo_id": "ASzecsenyi/VQLM",
"resume_hub_subdir": "",
"resume_hub_repo_type": "model",
"num_iterations": -1,
"target_flops": -1.0,
"target_param_data_ratio": 20,
"num_epochs": -1,
"data": "tinystories_data",
"vocab_size": 265,
"max_seq_len": 2048,
"device_batch_size": 4,
"total_batch_size": -1,
"embedding_lr": 0.2,
"unembedding_lr": 0.004,
"weight_decay": 0.0,
"matrix_lr": 0.02,
"base_lr": 1.0,
"grad_clip": 1.0,
"warmup_ratio": 0.05,
"warmdown_ratio": 0.2,
"final_lr_frac": 0.0,
"eval_every": 2500,
"eval_tokens": 10485760,
"core_metric_every": 2000,
"core_metric_max_per_task": 500,
"sample_every": 2500,
"checkpoint_every": -1,
"max_checkpoints": 3,
"push_checkpoints_to_hub": true,
"hf_repo_id": "ASzecsenyi/VQLM",
"hf_repo_type": "model",
"model_tag": "gpt-ts-d1"
},
"device_batch_size": 4,
"max_seq_len": 2048,
"num_iterations": 32045,
"warmdown_ratio": 0.2,
"max_checkpoints": 3,
"push_checkpoints_to_hub": true,
"hf_repo_id": "ASzecsenyi/VQLM",
"hf_repo_type": "model"
}