ry1023 commited on
Commit
4f51cd1
·
verified ·
1 Parent(s): a3f99e5

Training Step 1575

Browse files
Files changed (3) hide show
  1. model.safetensors +1 -1
  2. trainer_state.pt +1 -1
  3. training_config.json +1 -1
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7a8d2cd2111c68f0d3015eaa0ee10f23aab39a1ef3b6fb39129b90a993ad6d6
3
  size 68876952
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a75a722e01f465d2e298c8efda602d5297826f41cbb39bfa5bcc7fd0ac6c693
3
  size 68876952
trainer_state.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f44bfe39ab439a3176dbc1f7d28d90c2fb9c91f3fa7512683977be2246064664
3
  size 129403747
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a77bc531063247f6c9e3f36ca2939a1f9a27e1216b96003c6f526ebe1c88efad
3
  size 129403747
training_config.json CHANGED
@@ -66,7 +66,7 @@
66
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x14e5868dd3a0>",
67
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x14e57f3612e0>",
68
  "wandb_table": null,
69
- "optimizer": "AdamW (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n capturable: False\n decoupled_weight_decay: True\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: None\n initial_lr: 0.000125\n lr: 3.245967741935487e-06\n maximize: False\n weight_decay: 0.0400023071100874\n)",
70
  "lr_scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x14e586a1dca0>",
71
  "wd_scheduler": "<trainers.schedulers.weight_decay.WeightDecayScheduler object at 0x14e57e241820>",
72
  "momentum_scheduler": "<trainers.schedulers.momentum.MomentumScheduler object at 0x14e57e241850>",
 
66
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x14e5868dd3a0>",
67
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x14e57f3612e0>",
68
  "wandb_table": null,
69
+ "optimizer": "AdamW (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n capturable: False\n decoupled_weight_decay: True\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: None\n initial_lr: 0.000125\n lr: 3.7449596774193655e-06\n maximize: False\n weight_decay: 0.040003606000574454\n)",
70
  "lr_scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x14e586a1dca0>",
71
  "wd_scheduler": "<trainers.schedulers.weight_decay.WeightDecayScheduler object at 0x14e57e241820>",
72
  "momentum_scheduler": "<trainers.schedulers.momentum.MomentumScheduler object at 0x14e57e241850>",