rootxhacker commited on
Commit
3bec02c
·
verified ·
1 Parent(s): fe7ed82

Training in progress, step 4000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11a062f0d5247b9505627696a295b4feca04c6c45dd688fcbc2c07d7828e414c
3
  size 36730224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b445c42b37f9090e63ec9e650d67af120bdec1120aa2b99d075b10217d2f4041
3
  size 36730224
last-checkpoint/ar_diffusion_info.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ce273710431703c604c9d640c23434ce3f2b038664d7b3df1e4a13e933764d0
3
  size 1736
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ae940e2a8fce62cfe8e7b2911818f4f043d9298216ee2524750b9c2bb4195bb
3
  size 1736
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb347b9f0c8f6a0d9a0e6b08949ae99d2459f1f662e3a7dd16f39b8ba0ddf69c
3
  size 73588346
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dacaa8970d573f7be27d82942d32e193ea6f961af3a7a55fdd87f418730a6ce8
3
  size 73588346
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cf21f101c997b0643b73ca8f669be23f0abb9e5a2c6fedb07e17298d9cdd268
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1698c9cb87c2e75a65bbfdd5b5bac56ca042377422d15083ff3eeeb34095ca85
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:028b7d42ce0dbafb2a0126ad830dd1957166a4ca85043c6644487cae8315bfe9
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b33c54ce26acce0407557fdf2626c004064a12b74672d7c82d6602cfe737b3d
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d16b13e1d35412f9085034fa2481674486c7b966c61cae81848c36587b827f3d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd43e4afdd546fe9e425b45803ea4071bedf22fd593f1e2dc9ea450f413acbe3
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "best_global_step": 3500,
3
- "best_metric": 1.5354665517807007,
4
- "best_model_checkpoint": "./ar-diffusion-checkpoints-progressive-attention/checkpoint-3500",
5
- "epoch": 0.26921006076455656,
6
  "eval_steps": 250,
7
- "global_step": 3500,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -610,6 +610,92 @@
610
  "eval_samples_per_second": 59.184,
611
  "eval_steps_per_second": 14.796,
612
  "step": 3500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613
  }
614
  ],
615
  "logging_steps": 50,
 
1
  {
2
+ "best_global_step": 4000,
3
+ "best_metric": 1.5196877717971802,
4
+ "best_model_checkpoint": "./ar-diffusion-checkpoints-progressive-attention/checkpoint-4000",
5
+ "epoch": 0.30766864087377893,
6
  "eval_steps": 250,
7
+ "global_step": 4000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
610
  "eval_samples_per_second": 59.184,
611
  "eval_steps_per_second": 14.796,
612
  "step": 3500
613
+ },
614
+ {
615
+ "epoch": 0.2730559187754788,
616
+ "grad_norm": 1.5297306776046753,
617
+ "learning_rate": 0.00018418305067137626,
618
+ "loss": 1.5385,
619
+ "step": 3550
620
+ },
621
+ {
622
+ "epoch": 0.27690177678640104,
623
+ "grad_norm": 1.2498586177825928,
624
+ "learning_rate": 0.00018392333064955977,
625
+ "loss": 1.5591,
626
+ "step": 3600
627
+ },
628
+ {
629
+ "epoch": 0.2807476347973233,
630
+ "grad_norm": 1.5092873573303223,
631
+ "learning_rate": 0.0001836636106277433,
632
+ "loss": 1.554,
633
+ "step": 3650
634
+ },
635
+ {
636
+ "epoch": 0.28459349280824553,
637
+ "grad_norm": 1.4669116735458374,
638
+ "learning_rate": 0.00018340389060592682,
639
+ "loss": 1.5062,
640
+ "step": 3700
641
+ },
642
+ {
643
+ "epoch": 0.2884393508191678,
644
+ "grad_norm": 1.5152875185012817,
645
+ "learning_rate": 0.00018314417058411033,
646
+ "loss": 1.5277,
647
+ "step": 3750
648
+ },
649
+ {
650
+ "epoch": 0.2884393508191678,
651
+ "eval_loss": 1.5374407768249512,
652
+ "eval_runtime": 16.9404,
653
+ "eval_samples_per_second": 59.03,
654
+ "eval_steps_per_second": 14.758,
655
+ "step": 3750
656
+ },
657
+ {
658
+ "epoch": 0.29228520883009,
659
+ "grad_norm": 2.025120735168457,
660
+ "learning_rate": 0.00018288445056229387,
661
+ "loss": 1.534,
662
+ "step": 3800
663
+ },
664
+ {
665
+ "epoch": 0.2961310668410122,
666
+ "grad_norm": 1.34319269657135,
667
+ "learning_rate": 0.00018262473054047736,
668
+ "loss": 1.4707,
669
+ "step": 3850
670
+ },
671
+ {
672
+ "epoch": 0.29997692485193445,
673
+ "grad_norm": 1.9989622831344604,
674
+ "learning_rate": 0.00018236501051866087,
675
+ "loss": 1.567,
676
+ "step": 3900
677
+ },
678
+ {
679
+ "epoch": 0.3038227828628567,
680
+ "grad_norm": 1.2157036066055298,
681
+ "learning_rate": 0.0001821052904968444,
682
+ "loss": 1.507,
683
+ "step": 3950
684
+ },
685
+ {
686
+ "epoch": 0.30766864087377893,
687
+ "grad_norm": 0.9292582869529724,
688
+ "learning_rate": 0.00018184557047502793,
689
+ "loss": 1.4934,
690
+ "step": 4000
691
+ },
692
+ {
693
+ "epoch": 0.30766864087377893,
694
+ "eval_loss": 1.5196877717971802,
695
+ "eval_runtime": 16.7517,
696
+ "eval_samples_per_second": 59.695,
697
+ "eval_steps_per_second": 14.924,
698
+ "step": 4000
699
  }
700
  ],
701
  "logging_steps": 50,