Token Classification
Safetensors
English
deberta-v2
shawnrushefsky commited on
Commit
d18158b
Β·
1 Parent(s): 3ef26d6

checkpoint

Browse files
{checkpoint-5790 β†’ checkpoint-6948}/added_tokens.json RENAMED
File without changes
{checkpoint-5790 β†’ checkpoint-6948}/config.json RENAMED
File without changes
{checkpoint-5790 β†’ checkpoint-6948}/special_tokens_map.json RENAMED
File without changes
{checkpoint-5790 β†’ checkpoint-6948}/tokenizer.json RENAMED
File without changes
{checkpoint-5790 β†’ checkpoint-6948}/tokenizer_config.json RENAMED
File without changes
{checkpoint-5790 β†’ checkpoint-6948}/trainer_state.json RENAMED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.7518504090377873,
6
  "eval_steps": 500,
7
- "global_step": 5790,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -813,6 +813,167 @@
813
  "learning_rate": 1.7237731877384554e-05,
814
  "loss": 0.1977,
815
  "step": 5750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
816
  }
817
  ],
818
  "logging_steps": 50,
@@ -832,7 +993,7 @@
832
  "attributes": {}
833
  }
834
  },
835
- "total_flos": 4.8419627183007334e+17,
836
  "train_batch_size": 40,
837
  "trial_name": null,
838
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.9022204908453447,
6
  "eval_steps": 500,
7
+ "global_step": 6948,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
813
  "learning_rate": 1.7237731877384554e-05,
814
  "loss": 0.1977,
815
  "step": 5750
816
+ },
817
+ {
818
+ "epoch": 0.7531489416958836,
819
+ "grad_norm": 0.648883581161499,
820
+ "learning_rate": 1.7211419550059206e-05,
821
+ "loss": 0.1964,
822
+ "step": 5800
823
+ },
824
+ {
825
+ "epoch": 0.7596416049863655,
826
+ "grad_norm": 0.6440086960792542,
827
+ "learning_rate": 1.718510722273385e-05,
828
+ "loss": 0.2014,
829
+ "step": 5850
830
+ },
831
+ {
832
+ "epoch": 0.7661342682768472,
833
+ "grad_norm": 0.5619300007820129,
834
+ "learning_rate": 1.71587948954085e-05,
835
+ "loss": 0.1909,
836
+ "step": 5900
837
+ },
838
+ {
839
+ "epoch": 0.7726269315673289,
840
+ "grad_norm": 0.6859204769134521,
841
+ "learning_rate": 1.7132482568083147e-05,
842
+ "loss": 0.2049,
843
+ "step": 5950
844
+ },
845
+ {
846
+ "epoch": 0.7791195948578107,
847
+ "grad_norm": 0.6132592558860779,
848
+ "learning_rate": 1.7106170240757796e-05,
849
+ "loss": 0.2,
850
+ "step": 6000
851
+ },
852
+ {
853
+ "epoch": 0.7856122581482924,
854
+ "grad_norm": 0.7050901055335999,
855
+ "learning_rate": 1.7079857913432444e-05,
856
+ "loss": 0.189,
857
+ "step": 6050
858
+ },
859
+ {
860
+ "epoch": 0.7921049214387742,
861
+ "grad_norm": 0.6752614974975586,
862
+ "learning_rate": 1.7053545586107092e-05,
863
+ "loss": 0.2248,
864
+ "step": 6100
865
+ },
866
+ {
867
+ "epoch": 0.7985975847292559,
868
+ "grad_norm": 0.7186923623085022,
869
+ "learning_rate": 1.702723325878174e-05,
870
+ "loss": 0.1903,
871
+ "step": 6150
872
+ },
873
+ {
874
+ "epoch": 0.8050902480197377,
875
+ "grad_norm": 0.5991400480270386,
876
+ "learning_rate": 1.700092093145639e-05,
877
+ "loss": 0.197,
878
+ "step": 6200
879
+ },
880
+ {
881
+ "epoch": 0.8115829113102194,
882
+ "grad_norm": 0.9522245526313782,
883
+ "learning_rate": 1.6974608604131037e-05,
884
+ "loss": 0.1962,
885
+ "step": 6250
886
+ },
887
+ {
888
+ "epoch": 0.8180755746007012,
889
+ "grad_norm": 0.8645381927490234,
890
+ "learning_rate": 1.6948296276805686e-05,
891
+ "loss": 0.1901,
892
+ "step": 6300
893
+ },
894
+ {
895
+ "epoch": 0.8245682378911829,
896
+ "grad_norm": 0.5243034958839417,
897
+ "learning_rate": 1.6921983949480334e-05,
898
+ "loss": 0.194,
899
+ "step": 6350
900
+ },
901
+ {
902
+ "epoch": 0.8310609011816648,
903
+ "grad_norm": 0.5842151641845703,
904
+ "learning_rate": 1.6895671622154982e-05,
905
+ "loss": 0.1917,
906
+ "step": 6400
907
+ },
908
+ {
909
+ "epoch": 0.8375535644721465,
910
+ "grad_norm": 0.6111485362052917,
911
+ "learning_rate": 1.6869359294829627e-05,
912
+ "loss": 0.2042,
913
+ "step": 6450
914
+ },
915
+ {
916
+ "epoch": 0.8440462277626283,
917
+ "grad_norm": 0.6515288949012756,
918
+ "learning_rate": 1.684304696750428e-05,
919
+ "loss": 0.2032,
920
+ "step": 6500
921
+ },
922
+ {
923
+ "epoch": 0.85053889105311,
924
+ "grad_norm": 0.7596396207809448,
925
+ "learning_rate": 1.6816734640178924e-05,
926
+ "loss": 0.193,
927
+ "step": 6550
928
+ },
929
+ {
930
+ "epoch": 0.8570315543435918,
931
+ "grad_norm": 0.6944254636764526,
932
+ "learning_rate": 1.6790422312853575e-05,
933
+ "loss": 0.1899,
934
+ "step": 6600
935
+ },
936
+ {
937
+ "epoch": 0.8635242176340735,
938
+ "grad_norm": 0.6190508604049683,
939
+ "learning_rate": 1.676410998552822e-05,
940
+ "loss": 0.1987,
941
+ "step": 6650
942
+ },
943
+ {
944
+ "epoch": 0.8700168809245552,
945
+ "grad_norm": 1.1515477895736694,
946
+ "learning_rate": 1.673779765820287e-05,
947
+ "loss": 0.196,
948
+ "step": 6700
949
+ },
950
+ {
951
+ "epoch": 0.876509544215037,
952
+ "grad_norm": 0.5803254842758179,
953
+ "learning_rate": 1.6711485330877517e-05,
954
+ "loss": 0.1923,
955
+ "step": 6750
956
+ },
957
+ {
958
+ "epoch": 0.8830022075055187,
959
+ "grad_norm": 0.8052871227264404,
960
+ "learning_rate": 1.6685173003552165e-05,
961
+ "loss": 0.1894,
962
+ "step": 6800
963
+ },
964
+ {
965
+ "epoch": 0.8894948707960005,
966
+ "grad_norm": 0.9313941597938538,
967
+ "learning_rate": 1.6658860676226813e-05,
968
+ "loss": 0.1889,
969
+ "step": 6850
970
+ },
971
+ {
972
+ "epoch": 0.8959875340864822,
973
+ "grad_norm": 0.5186671614646912,
974
+ "learning_rate": 1.663254834890146e-05,
975
+ "loss": 0.1895,
976
+ "step": 6900
977
  }
978
  ],
979
  "logging_steps": 50,
 
993
  "attributes": {}
994
  }
995
  },
996
+ "total_flos": 5.810355262497751e+17,
997
  "train_batch_size": 40,
998
  "trial_name": null,
999
  "trial_params": null