ry1023 commited on
Commit
d862d0b
·
verified ·
1 Parent(s): b6d32f7

Training Step 2205

Browse files
Files changed (3) hide show
  1. model.safetensors +1 -1
  2. trainer_state.pt +1 -1
  3. training_config.json +1 -1
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7850ea4511281d90084291442eebc7bb9e8e4aea15c14c70042f1d41ac36a623
3
  size 97225416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6310bd7162db4cf924dcb0ffb33a78234cf55f248782e5be86fa7b861acc59a
3
  size 97225416
trainer_state.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48c6d5ae73b82bdbbdb815faf0a21be896a7efd92cc3e46a400f6e1d8194b855
3
  size 186100579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84e4b76ef9c53a220d195c28f6f4b2b446db9336ec2d14054d5417f1c65effb1
3
  size 186100579
training_config.json CHANGED
@@ -66,7 +66,7 @@
66
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x14d8adf96f00>",
67
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x14d8ae47d280>",
68
  "wandb_table": null,
69
- "optimizer": "AdamW (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n capturable: False\n decoupled_weight_decay: True\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: None\n initial_lr: 6.25e-05\n lr: 1.373440000000002e-06\n maximize: False\n weight_decay: 0.04000129827171861\n)",
70
  "lr_scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x14d8ad275340>",
71
  "wd_scheduler": "<trainers.schedulers.weight_decay.WeightDecayScheduler object at 0x14d8ae8119a0>",
72
  "momentum_scheduler": "<trainers.schedulers.momentum.MomentumScheduler object at 0x14d8ae811940>",
 
66
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x14d8adf96f00>",
67
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x14d8ae47d280>",
68
  "wandb_table": null,
69
+ "optimizer": "AdamW (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n capturable: False\n decoupled_weight_decay: True\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: None\n initial_lr: 6.25e-05\n lr: 1.4981800000000044e-06\n maximize: False\n weight_decay: 0.04000176735857941\n)",
70
  "lr_scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x14d8ad275340>",
71
  "wd_scheduler": "<trainers.schedulers.weight_decay.WeightDecayScheduler object at 0x14d8ae8119a0>",
72
  "momentum_scheduler": "<trainers.schedulers.momentum.MomentumScheduler object at 0x14d8ae811940>",