Model save
Browse files- README.md +5 -5
- all_results.json +10 -10
- eval_results.json +6 -6
- train_results.json +4 -4
- trainer_state.json +109 -109
- training_args.bin +1 -1
README.md
CHANGED
|
@@ -16,8 +16,8 @@ should probably proofread and complete it, then remove this comment. -->
|
|
| 16 |
|
| 17 |
This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
|
| 18 |
It achieves the following results on the evaluation set:
|
| 19 |
-
- Loss: 3.
|
| 20 |
-
- Accuracy: 0.
|
| 21 |
|
| 22 |
## Model description
|
| 23 |
|
|
@@ -142,9 +142,9 @@ The following hyperparameters were used during training:
|
|
| 142 |
| 3.127 | 9.7035 | 90000 | 0.3942 | 3.3023 |
|
| 143 |
| 3.1309 | 9.8113 | 91000 | 0.3945 | 3.3003 |
|
| 144 |
| 3.1271 | 9.9191 | 92000 | 0.3946 | 3.2983 |
|
| 145 |
-
| 3.2875 | 10.0270 | 93000 | 3.
|
| 146 |
-
| 3.
|
| 147 |
-
| 3.
|
| 148 |
|
| 149 |
|
| 150 |
### Framework versions
|
|
|
|
| 16 |
|
| 17 |
This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
|
| 18 |
It achieves the following results on the evaluation set:
|
| 19 |
+
- Loss: 3.4619
|
| 20 |
+
- Accuracy: 0.3767
|
| 21 |
|
| 22 |
## Model description
|
| 23 |
|
|
|
|
| 142 |
| 3.127 | 9.7035 | 90000 | 0.3942 | 3.3023 |
|
| 143 |
| 3.1309 | 9.8113 | 91000 | 0.3945 | 3.3003 |
|
| 144 |
| 3.1271 | 9.9191 | 92000 | 0.3946 | 3.2983 |
|
| 145 |
+
| 3.2875 | 10.0270 | 93000 | 3.4554 | 0.3782 |
|
| 146 |
+
| 3.3636 | 10.1348 | 94000 | 3.4681 | 0.3759 |
|
| 147 |
+
| 3.3804 | 10.2426 | 95000 | 3.4619 | 0.3767 |
|
| 148 |
|
| 149 |
|
| 150 |
### Framework versions
|
all_results.json
CHANGED
|
@@ -1,16 +1,16 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 10.242587601078167,
|
| 3 |
-
"eval_accuracy": 0.
|
| 4 |
-
"eval_loss": 3.
|
| 5 |
-
"eval_runtime":
|
| 6 |
"eval_samples": 18011,
|
| 7 |
-
"eval_samples_per_second":
|
| 8 |
-
"eval_steps_per_second":
|
| 9 |
-
"perplexity": 27.
|
| 10 |
"total_flos": 7.94262454272e+17,
|
| 11 |
-
"train_loss": 0.
|
| 12 |
-
"train_runtime":
|
| 13 |
"train_samples": 296775,
|
| 14 |
-
"train_samples_per_second":
|
| 15 |
-
"train_steps_per_second":
|
| 16 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 10.242587601078167,
|
| 3 |
+
"eval_accuracy": 0.3942401583029114,
|
| 4 |
+
"eval_loss": 3.3023483753204346,
|
| 5 |
+
"eval_runtime": 145.9968,
|
| 6 |
"eval_samples": 18011,
|
| 7 |
+
"eval_samples_per_second": 123.366,
|
| 8 |
+
"eval_steps_per_second": 7.712,
|
| 9 |
+
"perplexity": 27.176384392621944,
|
| 10 |
"total_flos": 7.94262454272e+17,
|
| 11 |
+
"train_loss": 0.07917592998303866,
|
| 12 |
+
"train_runtime": 1615.2207,
|
| 13 |
"train_samples": 296775,
|
| 14 |
+
"train_samples_per_second": 9186.825,
|
| 15 |
+
"train_steps_per_second": 287.112
|
| 16 |
}
|
eval_results.json
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 10.242587601078167,
|
| 3 |
-
"eval_accuracy": 0.
|
| 4 |
-
"eval_loss": 3.
|
| 5 |
-
"eval_runtime":
|
| 6 |
"eval_samples": 18011,
|
| 7 |
-
"eval_samples_per_second":
|
| 8 |
-
"eval_steps_per_second":
|
| 9 |
-
"perplexity": 27.
|
| 10 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 10.242587601078167,
|
| 3 |
+
"eval_accuracy": 0.3942401583029114,
|
| 4 |
+
"eval_loss": 3.3023483753204346,
|
| 5 |
+
"eval_runtime": 145.9968,
|
| 6 |
"eval_samples": 18011,
|
| 7 |
+
"eval_samples_per_second": 123.366,
|
| 8 |
+
"eval_steps_per_second": 7.712,
|
| 9 |
+
"perplexity": 27.176384392621944
|
| 10 |
}
|
train_results.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 10.242587601078167,
|
| 3 |
"total_flos": 7.94262454272e+17,
|
| 4 |
-
"train_loss": 0.
|
| 5 |
-
"train_runtime":
|
| 6 |
"train_samples": 296775,
|
| 7 |
-
"train_samples_per_second":
|
| 8 |
-
"train_steps_per_second":
|
| 9 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 10.242587601078167,
|
| 3 |
"total_flos": 7.94262454272e+17,
|
| 4 |
+
"train_loss": 0.07917592998303866,
|
| 5 |
+
"train_runtime": 1615.2207,
|
| 6 |
"train_samples": 296775,
|
| 7 |
+
"train_samples_per_second": 9186.825,
|
| 8 |
+
"train_steps_per_second": 287.112
|
| 9 |
}
|
trainer_state.json
CHANGED
|
@@ -13823,354 +13823,354 @@
|
|
| 13823 |
},
|
| 13824 |
{
|
| 13825 |
"epoch": 10.005390835579515,
|
| 13826 |
-
"grad_norm":
|
| 13827 |
"learning_rate": 0.0004800828210934972,
|
| 13828 |
-
"loss": 3.
|
| 13829 |
"step": 92800
|
| 13830 |
},
|
| 13831 |
{
|
| 13832 |
"epoch": 10.01078167115903,
|
| 13833 |
-
"grad_norm":
|
| 13834 |
"learning_rate": 0.00048001811711420253,
|
| 13835 |
-
"loss": 3.
|
| 13836 |
"step": 92850
|
| 13837 |
},
|
| 13838 |
{
|
| 13839 |
"epoch": 10.016172506738544,
|
| 13840 |
-
"grad_norm": 1.
|
| 13841 |
"learning_rate": 0.00047995341313490777,
|
| 13842 |
-
"loss": 3.
|
| 13843 |
"step": 92900
|
| 13844 |
},
|
| 13845 |
{
|
| 13846 |
"epoch": 10.021563342318059,
|
| 13847 |
-
"grad_norm": 0.
|
| 13848 |
"learning_rate": 0.000479888709155613,
|
| 13849 |
-
"loss": 3.
|
| 13850 |
"step": 92950
|
| 13851 |
},
|
| 13852 |
{
|
| 13853 |
"epoch": 10.026954177897574,
|
| 13854 |
-
"grad_norm": 1.
|
| 13855 |
"learning_rate": 0.00047982400517631826,
|
| 13856 |
-
"loss": 3.
|
| 13857 |
"step": 93000
|
| 13858 |
},
|
| 13859 |
{
|
| 13860 |
"epoch": 10.026954177897574,
|
| 13861 |
-
"eval_accuracy": 0.
|
| 13862 |
-
"eval_loss": 3.
|
| 13863 |
-
"eval_runtime":
|
| 13864 |
-
"eval_samples_per_second":
|
| 13865 |
-
"eval_steps_per_second":
|
| 13866 |
"step": 93000
|
| 13867 |
},
|
| 13868 |
{
|
| 13869 |
"epoch": 10.032345013477089,
|
| 13870 |
-
"grad_norm": 1.
|
| 13871 |
"learning_rate": 0.0004797593011970236,
|
| 13872 |
-
"loss": 3.
|
| 13873 |
"step": 93050
|
| 13874 |
},
|
| 13875 |
{
|
| 13876 |
"epoch": 10.037735849056604,
|
| 13877 |
-
"grad_norm": 0.
|
| 13878 |
"learning_rate": 0.00047969459721772885,
|
| 13879 |
-
"loss": 3.
|
| 13880 |
"step": 93100
|
| 13881 |
},
|
| 13882 |
{
|
| 13883 |
"epoch": 10.04312668463612,
|
| 13884 |
-
"grad_norm": 0.
|
| 13885 |
"learning_rate": 0.0004796298932384341,
|
| 13886 |
-
"loss": 3.
|
| 13887 |
"step": 93150
|
| 13888 |
},
|
| 13889 |
{
|
| 13890 |
"epoch": 10.048517520215633,
|
| 13891 |
-
"grad_norm": 0.
|
| 13892 |
"learning_rate": 0.00047956518925913944,
|
| 13893 |
-
"loss": 3.
|
| 13894 |
"step": 93200
|
| 13895 |
},
|
| 13896 |
{
|
| 13897 |
"epoch": 10.053908355795148,
|
| 13898 |
-
"grad_norm": 0.
|
| 13899 |
"learning_rate": 0.0004795004852798447,
|
| 13900 |
-
"loss": 3.
|
| 13901 |
"step": 93250
|
| 13902 |
},
|
| 13903 |
{
|
| 13904 |
"epoch": 10.059299191374663,
|
| 13905 |
-
"grad_norm": 0.
|
| 13906 |
"learning_rate": 0.0004794357813005499,
|
| 13907 |
-
"loss": 3.
|
| 13908 |
"step": 93300
|
| 13909 |
},
|
| 13910 |
{
|
| 13911 |
"epoch": 10.064690026954178,
|
| 13912 |
-
"grad_norm": 1.
|
| 13913 |
"learning_rate": 0.00047937107732125517,
|
| 13914 |
-
"loss": 3.
|
| 13915 |
"step": 93350
|
| 13916 |
},
|
| 13917 |
{
|
| 13918 |
"epoch": 10.070080862533693,
|
| 13919 |
-
"grad_norm": 0.
|
| 13920 |
"learning_rate": 0.0004793063733419605,
|
| 13921 |
-
"loss": 3.
|
| 13922 |
"step": 93400
|
| 13923 |
},
|
| 13924 |
{
|
| 13925 |
"epoch": 10.075471698113208,
|
| 13926 |
-
"grad_norm": 0.
|
| 13927 |
"learning_rate": 0.00047924166936266576,
|
| 13928 |
-
"loss": 3.
|
| 13929 |
"step": 93450
|
| 13930 |
},
|
| 13931 |
{
|
| 13932 |
"epoch": 10.080862533692722,
|
| 13933 |
-
"grad_norm": 0.
|
| 13934 |
"learning_rate": 0.000479176965383371,
|
| 13935 |
-
"loss": 3.
|
| 13936 |
"step": 93500
|
| 13937 |
},
|
| 13938 |
{
|
| 13939 |
"epoch": 10.086253369272237,
|
| 13940 |
-
"grad_norm": 0.
|
| 13941 |
"learning_rate": 0.0004791122614040763,
|
| 13942 |
-
"loss": 3.
|
| 13943 |
"step": 93550
|
| 13944 |
},
|
| 13945 |
{
|
| 13946 |
"epoch": 10.091644204851752,
|
| 13947 |
-
"grad_norm": 0.
|
| 13948 |
"learning_rate": 0.0004790475574247816,
|
| 13949 |
-
"loss": 3.
|
| 13950 |
"step": 93600
|
| 13951 |
},
|
| 13952 |
{
|
| 13953 |
"epoch": 10.097035040431267,
|
| 13954 |
-
"grad_norm": 1.
|
| 13955 |
"learning_rate": 0.00047898285344548684,
|
| 13956 |
-
"loss": 3.
|
| 13957 |
"step": 93650
|
| 13958 |
},
|
| 13959 |
{
|
| 13960 |
"epoch": 10.102425876010782,
|
| 13961 |
-
"grad_norm": 0.
|
| 13962 |
"learning_rate": 0.00047891814946619213,
|
| 13963 |
-
"loss": 3.
|
| 13964 |
"step": 93700
|
| 13965 |
},
|
| 13966 |
{
|
| 13967 |
"epoch": 10.107816711590296,
|
| 13968 |
-
"grad_norm": 0.
|
| 13969 |
"learning_rate": 0.0004788534454868974,
|
| 13970 |
-
"loss": 3.
|
| 13971 |
"step": 93750
|
| 13972 |
},
|
| 13973 |
{
|
| 13974 |
"epoch": 10.11320754716981,
|
| 13975 |
-
"grad_norm": 0.
|
| 13976 |
"learning_rate": 0.00047878874150760267,
|
| 13977 |
-
"loss": 3.
|
| 13978 |
"step": 93800
|
| 13979 |
},
|
| 13980 |
{
|
| 13981 |
"epoch": 10.118598382749326,
|
| 13982 |
-
"grad_norm": 1.
|
| 13983 |
"learning_rate": 0.00047872403752830797,
|
| 13984 |
-
"loss": 3.
|
| 13985 |
"step": 93850
|
| 13986 |
},
|
| 13987 |
{
|
| 13988 |
"epoch": 10.123989218328841,
|
| 13989 |
-
"grad_norm": 0.
|
| 13990 |
"learning_rate": 0.0004786593335490132,
|
| 13991 |
-
"loss": 3.
|
| 13992 |
"step": 93900
|
| 13993 |
},
|
| 13994 |
{
|
| 13995 |
"epoch": 10.129380053908356,
|
| 13996 |
-
"grad_norm": 0.
|
| 13997 |
"learning_rate": 0.0004785946295697185,
|
| 13998 |
-
"loss": 3.
|
| 13999 |
"step": 93950
|
| 14000 |
},
|
| 14001 |
{
|
| 14002 |
"epoch": 10.134770889487871,
|
| 14003 |
-
"grad_norm": 0.
|
| 14004 |
"learning_rate": 0.0004785299255904238,
|
| 14005 |
-
"loss": 3.
|
| 14006 |
"step": 94000
|
| 14007 |
},
|
| 14008 |
{
|
| 14009 |
"epoch": 10.134770889487871,
|
| 14010 |
-
"eval_accuracy": 0.
|
| 14011 |
-
"eval_loss": 3.
|
| 14012 |
-
"eval_runtime":
|
| 14013 |
-
"eval_samples_per_second":
|
| 14014 |
-
"eval_steps_per_second":
|
| 14015 |
"step": 94000
|
| 14016 |
},
|
| 14017 |
{
|
| 14018 |
"epoch": 10.140161725067385,
|
| 14019 |
-
"grad_norm": 0.
|
| 14020 |
"learning_rate": 0.00047846522161112905,
|
| 14021 |
-
"loss": 3.
|
| 14022 |
"step": 94050
|
| 14023 |
},
|
| 14024 |
{
|
| 14025 |
"epoch": 10.1455525606469,
|
| 14026 |
-
"grad_norm": 0.
|
| 14027 |
"learning_rate": 0.0004784005176318343,
|
| 14028 |
-
"loss": 3.
|
| 14029 |
"step": 94100
|
| 14030 |
},
|
| 14031 |
{
|
| 14032 |
"epoch": 10.150943396226415,
|
| 14033 |
-
"grad_norm": 0.
|
| 14034 |
"learning_rate": 0.00047833581365253964,
|
| 14035 |
-
"loss": 3.
|
| 14036 |
"step": 94150
|
| 14037 |
},
|
| 14038 |
{
|
| 14039 |
"epoch": 10.15633423180593,
|
| 14040 |
-
"grad_norm": 0.
|
| 14041 |
"learning_rate": 0.0004782711096732449,
|
| 14042 |
-
"loss": 3.
|
| 14043 |
"step": 94200
|
| 14044 |
},
|
| 14045 |
{
|
| 14046 |
"epoch": 10.161725067385445,
|
| 14047 |
-
"grad_norm": 0.
|
| 14048 |
"learning_rate": 0.0004782064056939501,
|
| 14049 |
-
"loss": 3.
|
| 14050 |
"step": 94250
|
| 14051 |
},
|
| 14052 |
{
|
| 14053 |
"epoch": 10.167115902964959,
|
| 14054 |
-
"grad_norm": 0.
|
| 14055 |
"learning_rate": 0.00047814170171465536,
|
| 14056 |
-
"loss": 3.
|
| 14057 |
"step": 94300
|
| 14058 |
},
|
| 14059 |
{
|
| 14060 |
"epoch": 10.172506738544474,
|
| 14061 |
-
"grad_norm": 0.
|
| 14062 |
"learning_rate": 0.0004780769977353607,
|
| 14063 |
-
"loss": 3.
|
| 14064 |
"step": 94350
|
| 14065 |
},
|
| 14066 |
{
|
| 14067 |
"epoch": 10.177897574123989,
|
| 14068 |
-
"grad_norm": 0.
|
| 14069 |
"learning_rate": 0.00047801229375606596,
|
| 14070 |
-
"loss": 3.
|
| 14071 |
"step": 94400
|
| 14072 |
},
|
| 14073 |
{
|
| 14074 |
"epoch": 10.183288409703504,
|
| 14075 |
-
"grad_norm": 0.
|
| 14076 |
"learning_rate": 0.0004779475897767712,
|
| 14077 |
-
"loss": 3.
|
| 14078 |
"step": 94450
|
| 14079 |
},
|
| 14080 |
{
|
| 14081 |
"epoch": 10.18867924528302,
|
| 14082 |
-
"grad_norm": 0.
|
| 14083 |
"learning_rate": 0.00047788288579747655,
|
| 14084 |
-
"loss": 3.
|
| 14085 |
"step": 94500
|
| 14086 |
},
|
| 14087 |
{
|
| 14088 |
"epoch": 10.194070080862534,
|
| 14089 |
-
"grad_norm": 0.
|
| 14090 |
"learning_rate": 0.0004778181818181818,
|
| 14091 |
-
"loss": 3.
|
| 14092 |
"step": 94550
|
| 14093 |
},
|
| 14094 |
{
|
| 14095 |
"epoch": 10.199460916442048,
|
| 14096 |
-
"grad_norm": 0.
|
| 14097 |
"learning_rate": 0.00047775347783888703,
|
| 14098 |
-
"loss": 3.
|
| 14099 |
"step": 94600
|
| 14100 |
},
|
| 14101 |
{
|
| 14102 |
"epoch": 10.204851752021563,
|
| 14103 |
-
"grad_norm": 0.
|
| 14104 |
"learning_rate": 0.0004776887738595923,
|
| 14105 |
-
"loss": 3.
|
| 14106 |
"step": 94650
|
| 14107 |
},
|
| 14108 |
{
|
| 14109 |
"epoch": 10.210242587601078,
|
| 14110 |
-
"grad_norm": 0.
|
| 14111 |
"learning_rate": 0.0004776240698802976,
|
| 14112 |
-
"loss": 3.
|
| 14113 |
"step": 94700
|
| 14114 |
},
|
| 14115 |
{
|
| 14116 |
"epoch": 10.215633423180593,
|
| 14117 |
-
"grad_norm": 0.
|
| 14118 |
"learning_rate": 0.00047755936590100287,
|
| 14119 |
-
"loss": 3.
|
| 14120 |
"step": 94750
|
| 14121 |
},
|
| 14122 |
{
|
| 14123 |
"epoch": 10.221024258760108,
|
| 14124 |
-
"grad_norm": 0.
|
| 14125 |
"learning_rate": 0.0004774946619217081,
|
| 14126 |
-
"loss": 3.
|
| 14127 |
"step": 94800
|
| 14128 |
},
|
| 14129 |
{
|
| 14130 |
"epoch": 10.226415094339623,
|
| 14131 |
-
"grad_norm": 0.
|
| 14132 |
"learning_rate": 0.0004774299579424134,
|
| 14133 |
-
"loss": 3.
|
| 14134 |
"step": 94850
|
| 14135 |
},
|
| 14136 |
{
|
| 14137 |
"epoch": 10.231805929919137,
|
| 14138 |
-
"grad_norm": 0.
|
| 14139 |
"learning_rate": 0.0004773652539631187,
|
| 14140 |
-
"loss": 3.
|
| 14141 |
"step": 94900
|
| 14142 |
},
|
| 14143 |
{
|
| 14144 |
"epoch": 10.237196765498652,
|
| 14145 |
-
"grad_norm": 0.
|
| 14146 |
"learning_rate": 0.00047730054998382395,
|
| 14147 |
-
"loss": 3.
|
| 14148 |
"step": 94950
|
| 14149 |
},
|
| 14150 |
{
|
| 14151 |
"epoch": 10.242587601078167,
|
| 14152 |
-
"grad_norm": 0.
|
| 14153 |
"learning_rate": 0.00047723584600452924,
|
| 14154 |
-
"loss": 3.
|
| 14155 |
"step": 95000
|
| 14156 |
},
|
| 14157 |
{
|
| 14158 |
"epoch": 10.242587601078167,
|
| 14159 |
-
"eval_accuracy": 0.
|
| 14160 |
-
"eval_loss": 3.
|
| 14161 |
-
"eval_runtime":
|
| 14162 |
-
"eval_samples_per_second":
|
| 14163 |
-
"eval_steps_per_second":
|
| 14164 |
"step": 95000
|
| 14165 |
},
|
| 14166 |
{
|
| 14167 |
"epoch": 10.242587601078167,
|
| 14168 |
"step": 95000,
|
| 14169 |
"total_flos": 7.94262454272e+17,
|
| 14170 |
-
"train_loss": 0.
|
| 14171 |
-
"train_runtime":
|
| 14172 |
-
"train_samples_per_second":
|
| 14173 |
-
"train_steps_per_second":
|
| 14174 |
}
|
| 14175 |
],
|
| 14176 |
"logging_steps": 50,
|
|
|
|
| 13823 |
},
|
| 13824 |
{
|
| 13825 |
"epoch": 10.005390835579515,
|
| 13826 |
+
"grad_norm": 0.9977272748947144,
|
| 13827 |
"learning_rate": 0.0004800828210934972,
|
| 13828 |
+
"loss": 3.1867,
|
| 13829 |
"step": 92800
|
| 13830 |
},
|
| 13831 |
{
|
| 13832 |
"epoch": 10.01078167115903,
|
| 13833 |
+
"grad_norm": 1.0503565073013306,
|
| 13834 |
"learning_rate": 0.00048001811711420253,
|
| 13835 |
+
"loss": 3.2172,
|
| 13836 |
"step": 92850
|
| 13837 |
},
|
| 13838 |
{
|
| 13839 |
"epoch": 10.016172506738544,
|
| 13840 |
+
"grad_norm": 1.1011548042297363,
|
| 13841 |
"learning_rate": 0.00047995341313490777,
|
| 13842 |
+
"loss": 3.2555,
|
| 13843 |
"step": 92900
|
| 13844 |
},
|
| 13845 |
{
|
| 13846 |
"epoch": 10.021563342318059,
|
| 13847 |
+
"grad_norm": 0.9567869305610657,
|
| 13848 |
"learning_rate": 0.000479888709155613,
|
| 13849 |
+
"loss": 3.2694,
|
| 13850 |
"step": 92950
|
| 13851 |
},
|
| 13852 |
{
|
| 13853 |
"epoch": 10.026954177897574,
|
| 13854 |
+
"grad_norm": 1.1113311052322388,
|
| 13855 |
"learning_rate": 0.00047982400517631826,
|
| 13856 |
+
"loss": 3.2875,
|
| 13857 |
"step": 93000
|
| 13858 |
},
|
| 13859 |
{
|
| 13860 |
"epoch": 10.026954177897574,
|
| 13861 |
+
"eval_accuracy": 0.37822396206884223,
|
| 13862 |
+
"eval_loss": 3.4554944038391113,
|
| 13863 |
+
"eval_runtime": 146.8369,
|
| 13864 |
+
"eval_samples_per_second": 122.66,
|
| 13865 |
+
"eval_steps_per_second": 7.668,
|
| 13866 |
"step": 93000
|
| 13867 |
},
|
| 13868 |
{
|
| 13869 |
"epoch": 10.032345013477089,
|
| 13870 |
+
"grad_norm": 1.1876881122589111,
|
| 13871 |
"learning_rate": 0.0004797593011970236,
|
| 13872 |
+
"loss": 3.3014,
|
| 13873 |
"step": 93050
|
| 13874 |
},
|
| 13875 |
{
|
| 13876 |
"epoch": 10.037735849056604,
|
| 13877 |
+
"grad_norm": 0.9571216702461243,
|
| 13878 |
"learning_rate": 0.00047969459721772885,
|
| 13879 |
+
"loss": 3.2934,
|
| 13880 |
"step": 93100
|
| 13881 |
},
|
| 13882 |
{
|
| 13883 |
"epoch": 10.04312668463612,
|
| 13884 |
+
"grad_norm": 0.9346808195114136,
|
| 13885 |
"learning_rate": 0.0004796298932384341,
|
| 13886 |
+
"loss": 3.3042,
|
| 13887 |
"step": 93150
|
| 13888 |
},
|
| 13889 |
{
|
| 13890 |
"epoch": 10.048517520215633,
|
| 13891 |
+
"grad_norm": 0.9184759259223938,
|
| 13892 |
"learning_rate": 0.00047956518925913944,
|
| 13893 |
+
"loss": 3.3273,
|
| 13894 |
"step": 93200
|
| 13895 |
},
|
| 13896 |
{
|
| 13897 |
"epoch": 10.053908355795148,
|
| 13898 |
+
"grad_norm": 0.9030526280403137,
|
| 13899 |
"learning_rate": 0.0004795004852798447,
|
| 13900 |
+
"loss": 3.3273,
|
| 13901 |
"step": 93250
|
| 13902 |
},
|
| 13903 |
{
|
| 13904 |
"epoch": 10.059299191374663,
|
| 13905 |
+
"grad_norm": 0.9374904632568359,
|
| 13906 |
"learning_rate": 0.0004794357813005499,
|
| 13907 |
+
"loss": 3.3431,
|
| 13908 |
"step": 93300
|
| 13909 |
},
|
| 13910 |
{
|
| 13911 |
"epoch": 10.064690026954178,
|
| 13912 |
+
"grad_norm": 1.0782991647720337,
|
| 13913 |
"learning_rate": 0.00047937107732125517,
|
| 13914 |
+
"loss": 3.3481,
|
| 13915 |
"step": 93350
|
| 13916 |
},
|
| 13917 |
{
|
| 13918 |
"epoch": 10.070080862533693,
|
| 13919 |
+
"grad_norm": 0.9518354535102844,
|
| 13920 |
"learning_rate": 0.0004793063733419605,
|
| 13921 |
+
"loss": 3.3441,
|
| 13922 |
"step": 93400
|
| 13923 |
},
|
| 13924 |
{
|
| 13925 |
"epoch": 10.075471698113208,
|
| 13926 |
+
"grad_norm": 0.9269945621490479,
|
| 13927 |
"learning_rate": 0.00047924166936266576,
|
| 13928 |
+
"loss": 3.3473,
|
| 13929 |
"step": 93450
|
| 13930 |
},
|
| 13931 |
{
|
| 13932 |
"epoch": 10.080862533692722,
|
| 13933 |
+
"grad_norm": 0.9213449954986572,
|
| 13934 |
"learning_rate": 0.000479176965383371,
|
| 13935 |
+
"loss": 3.3259,
|
| 13936 |
"step": 93500
|
| 13937 |
},
|
| 13938 |
{
|
| 13939 |
"epoch": 10.086253369272237,
|
| 13940 |
+
"grad_norm": 0.9624829292297363,
|
| 13941 |
"learning_rate": 0.0004791122614040763,
|
| 13942 |
+
"loss": 3.3459,
|
| 13943 |
"step": 93550
|
| 13944 |
},
|
| 13945 |
{
|
| 13946 |
"epoch": 10.091644204851752,
|
| 13947 |
+
"grad_norm": 0.9101693630218506,
|
| 13948 |
"learning_rate": 0.0004790475574247816,
|
| 13949 |
+
"loss": 3.3453,
|
| 13950 |
"step": 93600
|
| 13951 |
},
|
| 13952 |
{
|
| 13953 |
"epoch": 10.097035040431267,
|
| 13954 |
+
"grad_norm": 1.010877251625061,
|
| 13955 |
"learning_rate": 0.00047898285344548684,
|
| 13956 |
+
"loss": 3.3487,
|
| 13957 |
"step": 93650
|
| 13958 |
},
|
| 13959 |
{
|
| 13960 |
"epoch": 10.102425876010782,
|
| 13961 |
+
"grad_norm": 0.9577584862709045,
|
| 13962 |
"learning_rate": 0.00047891814946619213,
|
| 13963 |
+
"loss": 3.3608,
|
| 13964 |
"step": 93700
|
| 13965 |
},
|
| 13966 |
{
|
| 13967 |
"epoch": 10.107816711590296,
|
| 13968 |
+
"grad_norm": 0.9266205430030823,
|
| 13969 |
"learning_rate": 0.0004788534454868974,
|
| 13970 |
+
"loss": 3.3607,
|
| 13971 |
"step": 93750
|
| 13972 |
},
|
| 13973 |
{
|
| 13974 |
"epoch": 10.11320754716981,
|
| 13975 |
+
"grad_norm": 0.9234276413917542,
|
| 13976 |
"learning_rate": 0.00047878874150760267,
|
| 13977 |
+
"loss": 3.3616,
|
| 13978 |
"step": 93800
|
| 13979 |
},
|
| 13980 |
{
|
| 13981 |
"epoch": 10.118598382749326,
|
| 13982 |
+
"grad_norm": 1.0343414545059204,
|
| 13983 |
"learning_rate": 0.00047872403752830797,
|
| 13984 |
+
"loss": 3.3605,
|
| 13985 |
"step": 93850
|
| 13986 |
},
|
| 13987 |
{
|
| 13988 |
"epoch": 10.123989218328841,
|
| 13989 |
+
"grad_norm": 0.823859691619873,
|
| 13990 |
"learning_rate": 0.0004786593335490132,
|
| 13991 |
+
"loss": 3.3597,
|
| 13992 |
"step": 93900
|
| 13993 |
},
|
| 13994 |
{
|
| 13995 |
"epoch": 10.129380053908356,
|
| 13996 |
+
"grad_norm": 0.8498820066452026,
|
| 13997 |
"learning_rate": 0.0004785946295697185,
|
| 13998 |
+
"loss": 3.3508,
|
| 13999 |
"step": 93950
|
| 14000 |
},
|
| 14001 |
{
|
| 14002 |
"epoch": 10.134770889487871,
|
| 14003 |
+
"grad_norm": 0.876309871673584,
|
| 14004 |
"learning_rate": 0.0004785299255904238,
|
| 14005 |
+
"loss": 3.3637,
|
| 14006 |
"step": 94000
|
| 14007 |
},
|
| 14008 |
{
|
| 14009 |
"epoch": 10.134770889487871,
|
| 14010 |
+
"eval_accuracy": 0.3758454417016954,
|
| 14011 |
+
"eval_loss": 3.4686508178710938,
|
| 14012 |
+
"eval_runtime": 146.2986,
|
| 14013 |
+
"eval_samples_per_second": 123.111,
|
| 14014 |
+
"eval_steps_per_second": 7.697,
|
| 14015 |
"step": 94000
|
| 14016 |
},
|
| 14017 |
{
|
| 14018 |
"epoch": 10.140161725067385,
|
| 14019 |
+
"grad_norm": 0.8599374294281006,
|
| 14020 |
"learning_rate": 0.00047846522161112905,
|
| 14021 |
+
"loss": 3.3606,
|
| 14022 |
"step": 94050
|
| 14023 |
},
|
| 14024 |
{
|
| 14025 |
"epoch": 10.1455525606469,
|
| 14026 |
+
"grad_norm": 0.8754469156265259,
|
| 14027 |
"learning_rate": 0.0004784005176318343,
|
| 14028 |
+
"loss": 3.3598,
|
| 14029 |
"step": 94100
|
| 14030 |
},
|
| 14031 |
{
|
| 14032 |
"epoch": 10.150943396226415,
|
| 14033 |
+
"grad_norm": 0.8947704434394836,
|
| 14034 |
"learning_rate": 0.00047833581365253964,
|
| 14035 |
+
"loss": 3.3447,
|
| 14036 |
"step": 94150
|
| 14037 |
},
|
| 14038 |
{
|
| 14039 |
"epoch": 10.15633423180593,
|
| 14040 |
+
"grad_norm": 0.9169558882713318,
|
| 14041 |
"learning_rate": 0.0004782711096732449,
|
| 14042 |
+
"loss": 3.3796,
|
| 14043 |
"step": 94200
|
| 14044 |
},
|
| 14045 |
{
|
| 14046 |
"epoch": 10.161725067385445,
|
| 14047 |
+
"grad_norm": 0.8607390522956848,
|
| 14048 |
"learning_rate": 0.0004782064056939501,
|
| 14049 |
+
"loss": 3.3756,
|
| 14050 |
"step": 94250
|
| 14051 |
},
|
| 14052 |
{
|
| 14053 |
"epoch": 10.167115902964959,
|
| 14054 |
+
"grad_norm": 0.8300653696060181,
|
| 14055 |
"learning_rate": 0.00047814170171465536,
|
| 14056 |
+
"loss": 3.3631,
|
| 14057 |
"step": 94300
|
| 14058 |
},
|
| 14059 |
{
|
| 14060 |
"epoch": 10.172506738544474,
|
| 14061 |
+
"grad_norm": 0.9180701375007629,
|
| 14062 |
"learning_rate": 0.0004780769977353607,
|
| 14063 |
+
"loss": 3.3624,
|
| 14064 |
"step": 94350
|
| 14065 |
},
|
| 14066 |
{
|
| 14067 |
"epoch": 10.177897574123989,
|
| 14068 |
+
"grad_norm": 0.7899641990661621,
|
| 14069 |
"learning_rate": 0.00047801229375606596,
|
| 14070 |
+
"loss": 3.3785,
|
| 14071 |
"step": 94400
|
| 14072 |
},
|
| 14073 |
{
|
| 14074 |
"epoch": 10.183288409703504,
|
| 14075 |
+
"grad_norm": 0.854278028011322,
|
| 14076 |
"learning_rate": 0.0004779475897767712,
|
| 14077 |
+
"loss": 3.3768,
|
| 14078 |
"step": 94450
|
| 14079 |
},
|
| 14080 |
{
|
| 14081 |
"epoch": 10.18867924528302,
|
| 14082 |
+
"grad_norm": 0.8559595942497253,
|
| 14083 |
"learning_rate": 0.00047788288579747655,
|
| 14084 |
+
"loss": 3.357,
|
| 14085 |
"step": 94500
|
| 14086 |
},
|
| 14087 |
{
|
| 14088 |
"epoch": 10.194070080862534,
|
| 14089 |
+
"grad_norm": 0.9118571877479553,
|
| 14090 |
"learning_rate": 0.0004778181818181818,
|
| 14091 |
+
"loss": 3.369,
|
| 14092 |
"step": 94550
|
| 14093 |
},
|
| 14094 |
{
|
| 14095 |
"epoch": 10.199460916442048,
|
| 14096 |
+
"grad_norm": 0.7602856755256653,
|
| 14097 |
"learning_rate": 0.00047775347783888703,
|
| 14098 |
+
"loss": 3.3648,
|
| 14099 |
"step": 94600
|
| 14100 |
},
|
| 14101 |
{
|
| 14102 |
"epoch": 10.204851752021563,
|
| 14103 |
+
"grad_norm": 0.8732795715332031,
|
| 14104 |
"learning_rate": 0.0004776887738595923,
|
| 14105 |
+
"loss": 3.3724,
|
| 14106 |
"step": 94650
|
| 14107 |
},
|
| 14108 |
{
|
| 14109 |
"epoch": 10.210242587601078,
|
| 14110 |
+
"grad_norm": 0.8687312602996826,
|
| 14111 |
"learning_rate": 0.0004776240698802976,
|
| 14112 |
+
"loss": 3.384,
|
| 14113 |
"step": 94700
|
| 14114 |
},
|
| 14115 |
{
|
| 14116 |
"epoch": 10.215633423180593,
|
| 14117 |
+
"grad_norm": 0.8585965037345886,
|
| 14118 |
"learning_rate": 0.00047755936590100287,
|
| 14119 |
+
"loss": 3.3806,
|
| 14120 |
"step": 94750
|
| 14121 |
},
|
| 14122 |
{
|
| 14123 |
"epoch": 10.221024258760108,
|
| 14124 |
+
"grad_norm": 0.8676369786262512,
|
| 14125 |
"learning_rate": 0.0004774946619217081,
|
| 14126 |
+
"loss": 3.3732,
|
| 14127 |
"step": 94800
|
| 14128 |
},
|
| 14129 |
{
|
| 14130 |
"epoch": 10.226415094339623,
|
| 14131 |
+
"grad_norm": 0.8202512264251709,
|
| 14132 |
"learning_rate": 0.0004774299579424134,
|
| 14133 |
+
"loss": 3.3866,
|
| 14134 |
"step": 94850
|
| 14135 |
},
|
| 14136 |
{
|
| 14137 |
"epoch": 10.231805929919137,
|
| 14138 |
+
"grad_norm": 0.8557911515235901,
|
| 14139 |
"learning_rate": 0.0004773652539631187,
|
| 14140 |
+
"loss": 3.3494,
|
| 14141 |
"step": 94900
|
| 14142 |
},
|
| 14143 |
{
|
| 14144 |
"epoch": 10.237196765498652,
|
| 14145 |
+
"grad_norm": 0.8179985284805298,
|
| 14146 |
"learning_rate": 0.00047730054998382395,
|
| 14147 |
+
"loss": 3.3798,
|
| 14148 |
"step": 94950
|
| 14149 |
},
|
| 14150 |
{
|
| 14151 |
"epoch": 10.242587601078167,
|
| 14152 |
+
"grad_norm": 0.882681667804718,
|
| 14153 |
"learning_rate": 0.00047723584600452924,
|
| 14154 |
+
"loss": 3.3803,
|
| 14155 |
"step": 95000
|
| 14156 |
},
|
| 14157 |
{
|
| 14158 |
"epoch": 10.242587601078167,
|
| 14159 |
+
"eval_accuracy": 0.37679104778434486,
|
| 14160 |
+
"eval_loss": 3.4614784717559814,
|
| 14161 |
+
"eval_runtime": 145.9969,
|
| 14162 |
+
"eval_samples_per_second": 123.366,
|
| 14163 |
+
"eval_steps_per_second": 7.712,
|
| 14164 |
"step": 95000
|
| 14165 |
},
|
| 14166 |
{
|
| 14167 |
"epoch": 10.242587601078167,
|
| 14168 |
"step": 95000,
|
| 14169 |
"total_flos": 7.94262454272e+17,
|
| 14170 |
+
"train_loss": 0.07917592998303866,
|
| 14171 |
+
"train_runtime": 1615.2207,
|
| 14172 |
+
"train_samples_per_second": 9186.825,
|
| 14173 |
+
"train_steps_per_second": 287.112
|
| 14174 |
}
|
| 14175 |
],
|
| 14176 |
"logging_steps": 50,
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5304
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82e051b88a0db5b9879f5b1f004bda11f82d92ed1846f25896bab93912738a10
|
| 3 |
size 5304
|