Token Classification
Safetensors
English
deberta-v2
shawnrushefsky commited on
Commit
3ef26d6
Β·
1 Parent(s): 7577c21

checkpoint

Browse files
{checkpoint-5404 β†’ checkpoint-6562}/added_tokens.json RENAMED
File without changes
{checkpoint-5404 β†’ checkpoint-6562}/config.json RENAMED
File without changes
{checkpoint-5404 β†’ checkpoint-6562}/special_tokens_map.json RENAMED
File without changes
{checkpoint-5404 β†’ checkpoint-6562}/tokenizer.json RENAMED
File without changes
{checkpoint-5404 β†’ checkpoint-6562}/tokenizer_config.json RENAMED
File without changes
{checkpoint-5404 β†’ checkpoint-6562}/trainer_state.json RENAMED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.7017270484352681,
6
  "eval_steps": 500,
7
- "global_step": 5404,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -764,6 +764,167 @@
764
  "learning_rate": 1.742191816866202e-05,
765
  "loss": 0.1963,
766
  "step": 5400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
767
  }
768
  ],
769
  "logging_steps": 50,
@@ -783,7 +944,7 @@
783
  "attributes": {}
784
  }
785
  },
786
- "total_flos": 4.519165203568394e+17,
787
  "train_batch_size": 40,
788
  "trial_name": null,
789
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.8520971302428256,
6
  "eval_steps": 500,
7
+ "global_step": 6562,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
764
  "learning_rate": 1.742191816866202e-05,
765
  "loss": 0.1963,
766
  "step": 5400
767
+ },
768
+ {
769
+ "epoch": 0.7077002986625114,
770
+ "grad_norm": 0.8812252879142761,
771
+ "learning_rate": 1.7395605841336668e-05,
772
+ "loss": 0.196,
773
+ "step": 5450
774
+ },
775
+ {
776
+ "epoch": 0.7141929619529931,
777
+ "grad_norm": 0.6928241848945618,
778
+ "learning_rate": 1.7369293514011316e-05,
779
+ "loss": 0.2011,
780
+ "step": 5500
781
+ },
782
+ {
783
+ "epoch": 0.7206856252434749,
784
+ "grad_norm": 0.6892450451850891,
785
+ "learning_rate": 1.7342981186685965e-05,
786
+ "loss": 0.1949,
787
+ "step": 5550
788
+ },
789
+ {
790
+ "epoch": 0.7271782885339566,
791
+ "grad_norm": 0.4782065749168396,
792
+ "learning_rate": 1.7316668859360613e-05,
793
+ "loss": 0.194,
794
+ "step": 5600
795
+ },
796
+ {
797
+ "epoch": 0.7336709518244384,
798
+ "grad_norm": 0.6438505053520203,
799
+ "learning_rate": 1.729035653203526e-05,
800
+ "loss": 0.1967,
801
+ "step": 5650
802
+ },
803
+ {
804
+ "epoch": 0.7401636151149201,
805
+ "grad_norm": 0.5797818899154663,
806
+ "learning_rate": 1.726404420470991e-05,
807
+ "loss": 0.2185,
808
+ "step": 5700
809
+ },
810
+ {
811
+ "epoch": 0.7466562784054019,
812
+ "grad_norm": 0.6884586811065674,
813
+ "learning_rate": 1.7237731877384554e-05,
814
+ "loss": 0.1977,
815
+ "step": 5750
816
+ },
817
+ {
818
+ "epoch": 0.7531489416958836,
819
+ "grad_norm": 0.648883581161499,
820
+ "learning_rate": 1.7211419550059206e-05,
821
+ "loss": 0.1964,
822
+ "step": 5800
823
+ },
824
+ {
825
+ "epoch": 0.7596416049863655,
826
+ "grad_norm": 0.6440086960792542,
827
+ "learning_rate": 1.718510722273385e-05,
828
+ "loss": 0.2014,
829
+ "step": 5850
830
+ },
831
+ {
832
+ "epoch": 0.7661342682768472,
833
+ "grad_norm": 0.5619300007820129,
834
+ "learning_rate": 1.71587948954085e-05,
835
+ "loss": 0.1909,
836
+ "step": 5900
837
+ },
838
+ {
839
+ "epoch": 0.7726269315673289,
840
+ "grad_norm": 0.6859204769134521,
841
+ "learning_rate": 1.7132482568083147e-05,
842
+ "loss": 0.2049,
843
+ "step": 5950
844
+ },
845
+ {
846
+ "epoch": 0.7791195948578107,
847
+ "grad_norm": 0.6132592558860779,
848
+ "learning_rate": 1.7106170240757796e-05,
849
+ "loss": 0.2,
850
+ "step": 6000
851
+ },
852
+ {
853
+ "epoch": 0.7856122581482924,
854
+ "grad_norm": 0.7050901055335999,
855
+ "learning_rate": 1.7079857913432444e-05,
856
+ "loss": 0.189,
857
+ "step": 6050
858
+ },
859
+ {
860
+ "epoch": 0.7921049214387742,
861
+ "grad_norm": 0.6752614974975586,
862
+ "learning_rate": 1.7053545586107092e-05,
863
+ "loss": 0.2248,
864
+ "step": 6100
865
+ },
866
+ {
867
+ "epoch": 0.7985975847292559,
868
+ "grad_norm": 0.7186923623085022,
869
+ "learning_rate": 1.702723325878174e-05,
870
+ "loss": 0.1903,
871
+ "step": 6150
872
+ },
873
+ {
874
+ "epoch": 0.8050902480197377,
875
+ "grad_norm": 0.5991400480270386,
876
+ "learning_rate": 1.700092093145639e-05,
877
+ "loss": 0.197,
878
+ "step": 6200
879
+ },
880
+ {
881
+ "epoch": 0.8115829113102194,
882
+ "grad_norm": 0.9522245526313782,
883
+ "learning_rate": 1.6974608604131037e-05,
884
+ "loss": 0.1962,
885
+ "step": 6250
886
+ },
887
+ {
888
+ "epoch": 0.8180755746007012,
889
+ "grad_norm": 0.8645381927490234,
890
+ "learning_rate": 1.6948296276805686e-05,
891
+ "loss": 0.1901,
892
+ "step": 6300
893
+ },
894
+ {
895
+ "epoch": 0.8245682378911829,
896
+ "grad_norm": 0.5243034958839417,
897
+ "learning_rate": 1.6921983949480334e-05,
898
+ "loss": 0.194,
899
+ "step": 6350
900
+ },
901
+ {
902
+ "epoch": 0.8310609011816648,
903
+ "grad_norm": 0.5842151641845703,
904
+ "learning_rate": 1.6895671622154982e-05,
905
+ "loss": 0.1917,
906
+ "step": 6400
907
+ },
908
+ {
909
+ "epoch": 0.8375535644721465,
910
+ "grad_norm": 0.6111485362052917,
911
+ "learning_rate": 1.6869359294829627e-05,
912
+ "loss": 0.2042,
913
+ "step": 6450
914
+ },
915
+ {
916
+ "epoch": 0.8440462277626283,
917
+ "grad_norm": 0.6515288949012756,
918
+ "learning_rate": 1.684304696750428e-05,
919
+ "loss": 0.2032,
920
+ "step": 6500
921
+ },
922
+ {
923
+ "epoch": 0.85053889105311,
924
+ "grad_norm": 0.7596396207809448,
925
+ "learning_rate": 1.6816734640178924e-05,
926
+ "loss": 0.193,
927
+ "step": 6550
928
  }
929
  ],
930
  "logging_steps": 50,
 
944
  "attributes": {}
945
  }
946
  },
947
+ "total_flos": 5.487557747765412e+17,
948
  "train_batch_size": 40,
949
  "trial_name": null,
950
  "trial_params": null