aditeyabaral commited on
Commit
73129af
·
verified ·
1 Parent(s): 3405bfa

Best model at Step 76565

Browse files
Files changed (3) hide show
  1. model.safetensors +1 -1
  2. trainer_state.pt +1 -1
  3. training_config.json +1 -1
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c7883c9489e7d0f16fad0a02c5fa3b476f1cb15c061cc68a5f49c1f8f4d776b4
3
  size 105233332
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fe340a3651cc9b5c6441a64322e3d402d54b4bf9c687c63f6097020936ba09f
3
  size 105233332
trainer_state.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbc93eefeded0944e15cb3f516d4a67bcf76dee5e27e562c03cdc01067cba1a0
3
  size 210474182
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84fb2d19e8e339b09e3246a7a72c0ed83e31369bbae751d2aa85d7ff4ab9ba94
3
  size 210474182
training_config.json CHANGED
@@ -47,7 +47,7 @@
47
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x15220b251150>",
48
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x15220b299940>",
49
  "wandb_table": "<wandb.sdk.data_types.table.Table object at 0x15220b29a900>",
50
- "optimizer": "AdamW (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n capturable: False\n decoupled_weight_decay: True\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: None\n initial_lr: 0.0003\n lr: 1.5289315058583208e-05\n maximize: False\n weight_decay: 0.05\n)",
51
  "scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x15220b40e7b0>",
52
  "optimizer_class": "adamw",
53
  "scheduler_class": "cosine",
 
47
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x15220b251150>",
48
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x15220b299940>",
49
  "wandb_table": "<wandb.sdk.data_types.table.Table object at 0x15220b29a900>",
50
+ "optimizer": "AdamW (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n capturable: False\n decoupled_weight_decay: True\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: None\n initial_lr: 0.0003\n lr: 7.350326937918619e-06\n maximize: False\n weight_decay: 0.05\n)",
51
  "scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x15220b40e7b0>",
52
  "optimizer_class": "adamw",
53
  "scheduler_class": "cosine",