SystemAdmin123 commited on
Commit
f3bc482
·
verified ·
1 Parent(s): 1824c90

Training in progress, step 1600, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ffa055769cedc6e19d6f4fa255cbc212741473eed6a1eae92226e6554b6e051
3
  size 2433024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f461d6c9104ce6907b4e7c31e27d59f5ac89572d6389be328116c94be46dfc90
3
  size 2433024
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b92316674c1c085567619b898fcbb41987b0645cdc9f7adc511d53180384fe19
3
  size 2498406
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fceeac08bcbf067a45f25c4cabdca0559cd7fb797d80d8ee1f9a373872298b8
3
  size 2498406
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a85b7ee4e3e06f8b21d4d23e7eb8bbe5510e7f25d23cfc2ffc16d97845a1be25
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced0ac0d077b41bd2987add3782b7ce1140142ac3cddaf433babda96674c50fb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:427276ae77d918ee2b880ea4152618640d39ea76588856ca2cd62fe2ab8b83d7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ed6aad8025a80b776f2d50234fd05b8c1e2e758d3d427458fe15ed9bc7f733a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3551346552234389,
5
  "eval_steps": 200,
6
- "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -903,6 +903,302 @@
903
  "eval_samples_per_second": 40.706,
904
  "eval_steps_per_second": 10.19,
905
  "step": 1200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
906
  }
907
  ],
908
  "logging_steps": 10,
@@ -922,7 +1218,7 @@
922
  "attributes": {}
923
  }
924
  },
925
- "total_flos": 117590851584.0,
926
  "train_batch_size": 4,
927
  "trial_name": null,
928
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.47351287363125183,
5
  "eval_steps": 200,
6
+ "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
903
  "eval_samples_per_second": 40.706,
904
  "eval_steps_per_second": 10.19,
905
  "step": 1200
906
+ },
907
+ {
908
+ "epoch": 0.3580941106836342,
909
+ "grad_norm": 0.57421875,
910
+ "learning_rate": 0.0001135169494631497,
911
+ "loss": 10.541,
912
+ "step": 1210
913
+ },
914
+ {
915
+ "epoch": 0.36105356614382955,
916
+ "grad_norm": 0.58984375,
917
+ "learning_rate": 0.00011220516908034601,
918
+ "loss": 10.5805,
919
+ "step": 1220
920
+ },
921
+ {
922
+ "epoch": 0.3640130216040249,
923
+ "grad_norm": 0.52734375,
924
+ "learning_rate": 0.00011089125314635726,
925
+ "loss": 10.6207,
926
+ "step": 1230
927
+ },
928
+ {
929
+ "epoch": 0.3669724770642202,
930
+ "grad_norm": 0.734375,
931
+ "learning_rate": 0.00010957543155842702,
932
+ "loss": 10.6731,
933
+ "step": 1240
934
+ },
935
+ {
936
+ "epoch": 0.36993193252441553,
937
+ "grad_norm": 0.765625,
938
+ "learning_rate": 0.00010825793454723325,
939
+ "loss": 10.5913,
940
+ "step": 1250
941
+ },
942
+ {
943
+ "epoch": 0.37289138798461086,
944
+ "grad_norm": 0.490234375,
945
+ "learning_rate": 0.00010693899263660441,
946
+ "loss": 10.5151,
947
+ "step": 1260
948
+ },
949
+ {
950
+ "epoch": 0.3758508434448062,
951
+ "grad_norm": 0.515625,
952
+ "learning_rate": 0.00010561883660318455,
953
+ "loss": 10.5782,
954
+ "step": 1270
955
+ },
956
+ {
957
+ "epoch": 0.37881029890500145,
958
+ "grad_norm": 1.1015625,
959
+ "learning_rate": 0.00010429769743605407,
960
+ "loss": 10.5898,
961
+ "step": 1280
962
+ },
963
+ {
964
+ "epoch": 0.3817697543651968,
965
+ "grad_norm": 0.60546875,
966
+ "learning_rate": 0.00010297580629631325,
967
+ "loss": 10.5216,
968
+ "step": 1290
969
+ },
970
+ {
971
+ "epoch": 0.3847292098253921,
972
+ "grad_norm": 1.4921875,
973
+ "learning_rate": 0.00010165339447663587,
974
+ "loss": 10.4868,
975
+ "step": 1300
976
+ },
977
+ {
978
+ "epoch": 0.38768866528558743,
979
+ "grad_norm": 0.44140625,
980
+ "learning_rate": 0.00010033069336079952,
981
+ "loss": 10.6363,
982
+ "step": 1310
983
+ },
984
+ {
985
+ "epoch": 0.39064812074578276,
986
+ "grad_norm": 0.515625,
987
+ "learning_rate": 9.900793438320037e-05,
988
+ "loss": 10.55,
989
+ "step": 1320
990
+ },
991
+ {
992
+ "epoch": 0.3936075762059781,
993
+ "grad_norm": 0.703125,
994
+ "learning_rate": 9.768534898835862e-05,
995
+ "loss": 10.6171,
996
+ "step": 1330
997
+ },
998
+ {
999
+ "epoch": 0.3965670316661734,
1000
+ "grad_norm": 0.69140625,
1001
+ "learning_rate": 9.636316859042259e-05,
1002
+ "loss": 10.6343,
1003
+ "step": 1340
1004
+ },
1005
+ {
1006
+ "epoch": 0.39952648712636873,
1007
+ "grad_norm": 1.171875,
1008
+ "learning_rate": 9.504162453267777e-05,
1009
+ "loss": 10.6261,
1010
+ "step": 1350
1011
+ },
1012
+ {
1013
+ "epoch": 0.40248594258656406,
1014
+ "grad_norm": 0.49609375,
1015
+ "learning_rate": 9.372094804706867e-05,
1016
+ "loss": 10.566,
1017
+ "step": 1360
1018
+ },
1019
+ {
1020
+ "epoch": 0.4054453980467594,
1021
+ "grad_norm": 0.54296875,
1022
+ "learning_rate": 9.24013702137397e-05,
1023
+ "loss": 10.5186,
1024
+ "step": 1370
1025
+ },
1026
+ {
1027
+ "epoch": 0.4084048535069547,
1028
+ "grad_norm": 0.5546875,
1029
+ "learning_rate": 9.108312192060298e-05,
1030
+ "loss": 10.6343,
1031
+ "step": 1380
1032
+ },
1033
+ {
1034
+ "epoch": 0.41136430896715004,
1035
+ "grad_norm": 0.83984375,
1036
+ "learning_rate": 8.97664338229395e-05,
1037
+ "loss": 10.6551,
1038
+ "step": 1390
1039
+ },
1040
+ {
1041
+ "epoch": 0.41432376442734536,
1042
+ "grad_norm": 2.859375,
1043
+ "learning_rate": 8.845153630304139e-05,
1044
+ "loss": 10.6436,
1045
+ "step": 1400
1046
+ },
1047
+ {
1048
+ "epoch": 0.41432376442734536,
1049
+ "eval_loss": 10.585193634033203,
1050
+ "eval_runtime": 37.153,
1051
+ "eval_samples_per_second": 40.427,
1052
+ "eval_steps_per_second": 10.12,
1053
+ "step": 1400
1054
+ },
1055
+ {
1056
+ "epoch": 0.4172832198875407,
1057
+ "grad_norm": 0.53125,
1058
+ "learning_rate": 8.713865942990141e-05,
1059
+ "loss": 10.5189,
1060
+ "step": 1410
1061
+ },
1062
+ {
1063
+ "epoch": 0.420242675347736,
1064
+ "grad_norm": 0.53515625,
1065
+ "learning_rate": 8.582803291895758e-05,
1066
+ "loss": 10.4783,
1067
+ "step": 1420
1068
+ },
1069
+ {
1070
+ "epoch": 0.42320213080793134,
1071
+ "grad_norm": 0.5390625,
1072
+ "learning_rate": 8.451988609189987e-05,
1073
+ "loss": 10.6285,
1074
+ "step": 1430
1075
+ },
1076
+ {
1077
+ "epoch": 0.42616158626812667,
1078
+ "grad_norm": 0.63671875,
1079
+ "learning_rate": 8.321444783654524e-05,
1080
+ "loss": 10.7481,
1081
+ "step": 1440
1082
+ },
1083
+ {
1084
+ "epoch": 0.429121041728322,
1085
+ "grad_norm": 1.0703125,
1086
+ "learning_rate": 8.191194656678904e-05,
1087
+ "loss": 10.6971,
1088
+ "step": 1450
1089
+ },
1090
+ {
1091
+ "epoch": 0.4320804971885173,
1092
+ "grad_norm": 0.5078125,
1093
+ "learning_rate": 8.061261018263919e-05,
1094
+ "loss": 10.5323,
1095
+ "step": 1460
1096
+ },
1097
+ {
1098
+ "epoch": 0.43503995264871265,
1099
+ "grad_norm": 0.51171875,
1100
+ "learning_rate": 7.931666603034033e-05,
1101
+ "loss": 10.4634,
1102
+ "step": 1470
1103
+ },
1104
+ {
1105
+ "epoch": 0.437999408108908,
1106
+ "grad_norm": 0.6171875,
1107
+ "learning_rate": 7.80243408625947e-05,
1108
+ "loss": 10.5384,
1109
+ "step": 1480
1110
+ },
1111
+ {
1112
+ "epoch": 0.4409588635691033,
1113
+ "grad_norm": 0.76953125,
1114
+ "learning_rate": 7.673586079888698e-05,
1115
+ "loss": 10.699,
1116
+ "step": 1490
1117
+ },
1118
+ {
1119
+ "epoch": 0.4439183190292986,
1120
+ "grad_norm": 1.5390625,
1121
+ "learning_rate": 7.54514512859201e-05,
1122
+ "loss": 10.5729,
1123
+ "step": 1500
1124
+ },
1125
+ {
1126
+ "epoch": 0.44687777448949395,
1127
+ "grad_norm": 0.455078125,
1128
+ "learning_rate": 7.417133705816837e-05,
1129
+ "loss": 10.5262,
1130
+ "step": 1510
1131
+ },
1132
+ {
1133
+ "epoch": 0.4498372299496893,
1134
+ "grad_norm": 0.6640625,
1135
+ "learning_rate": 7.289574209855559e-05,
1136
+ "loss": 10.5102,
1137
+ "step": 1520
1138
+ },
1139
+ {
1140
+ "epoch": 0.4527966854098846,
1141
+ "grad_norm": 0.5703125,
1142
+ "learning_rate": 7.16248895992645e-05,
1143
+ "loss": 10.6538,
1144
+ "step": 1530
1145
+ },
1146
+ {
1147
+ "epoch": 0.45575614087007993,
1148
+ "grad_norm": 0.83203125,
1149
+ "learning_rate": 7.035900192268464e-05,
1150
+ "loss": 10.4972,
1151
+ "step": 1540
1152
+ },
1153
+ {
1154
+ "epoch": 0.45871559633027525,
1155
+ "grad_norm": 0.90625,
1156
+ "learning_rate": 6.909830056250527e-05,
1157
+ "loss": 10.646,
1158
+ "step": 1550
1159
+ },
1160
+ {
1161
+ "epoch": 0.4616750517904706,
1162
+ "grad_norm": 0.494140625,
1163
+ "learning_rate": 6.784300610496048e-05,
1164
+ "loss": 10.564,
1165
+ "step": 1560
1166
+ },
1167
+ {
1168
+ "epoch": 0.46463450725066585,
1169
+ "grad_norm": 0.57421875,
1170
+ "learning_rate": 6.65933381902329e-05,
1171
+ "loss": 10.5419,
1172
+ "step": 1570
1173
+ },
1174
+ {
1175
+ "epoch": 0.4675939627108612,
1176
+ "grad_norm": 0.5703125,
1177
+ "learning_rate": 6.534951547402322e-05,
1178
+ "loss": 10.6451,
1179
+ "step": 1580
1180
+ },
1181
+ {
1182
+ "epoch": 0.4705534181710565,
1183
+ "grad_norm": 0.7578125,
1184
+ "learning_rate": 6.411175558929152e-05,
1185
+ "loss": 10.7074,
1186
+ "step": 1590
1187
+ },
1188
+ {
1189
+ "epoch": 0.47351287363125183,
1190
+ "grad_norm": 0.98046875,
1191
+ "learning_rate": 6.28802751081779e-05,
1192
+ "loss": 10.5774,
1193
+ "step": 1600
1194
+ },
1195
+ {
1196
+ "epoch": 0.47351287363125183,
1197
+ "eval_loss": 10.587952613830566,
1198
+ "eval_runtime": 36.8828,
1199
+ "eval_samples_per_second": 40.724,
1200
+ "eval_steps_per_second": 10.194,
1201
+ "step": 1600
1202
  }
1203
  ],
1204
  "logging_steps": 10,
 
1218
  "attributes": {}
1219
  }
1220
  },
1221
+ "total_flos": 156755165184.0,
1222
  "train_batch_size": 4,
1223
  "trial_name": null,
1224
  "trial_params": null