Shawon16 commited on
Commit
b705ef8
·
verified ·
1 Parent(s): 725e912

Model save

Browse files
Files changed (3) hide show
  1. README.md +78 -0
  2. model.safetensors +1 -1
  3. trainer_state.json +275 -0
README.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: cc-by-nc-4.0
4
+ base_model: MCG-NJU/videomae-base
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: VideoMAE_Base_wlasl_2000_longtail_20
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # VideoMAE_Base_wlasl_2000_longtail_20
18
+
19
+ This model is a fine-tuned version of [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 7.8221
22
+ - Accuracy: 0.0033
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 5e-05
42
+ - train_batch_size: 2
43
+ - eval_batch_size: 2
44
+ - seed: 42
45
+ - gradient_accumulation_steps: 4
46
+ - total_train_batch_size: 8
47
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: linear
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - training_steps: 35720
51
+ - mixed_precision_training: Native AMP
52
+
53
+ ### Training results
54
+
55
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
56
+ |:-------------:|:-------:|:-----:|:---------------:|:--------:|
57
+ | 30.6409 | 0.05 | 1786 | 7.6310 | 0.0005 |
58
+ | 30.5597 | 1.0500 | 3572 | 7.6175 | 0.0005 |
59
+ | 30.4316 | 2.0500 | 5358 | 7.6035 | 0.0010 |
60
+ | 30.2683 | 3.0500 | 7145 | 7.5938 | 0.0020 |
61
+ | 30.0727 | 4.05 | 8931 | 7.6268 | 0.0018 |
62
+ | 29.84 | 5.0500 | 10717 | 7.6477 | 0.0026 |
63
+ | 29.5721 | 6.0500 | 12503 | 7.6825 | 0.0023 |
64
+ | 29.2352 | 7.0500 | 14290 | 7.7271 | 0.0023 |
65
+ | 28.9425 | 8.05 | 16076 | 7.7662 | 0.0041 |
66
+ | 28.6146 | 9.0500 | 17862 | 7.7746 | 0.0031 |
67
+ | 28.3135 | 10.0500 | 19648 | 7.7994 | 0.0028 |
68
+ | 27.985 | 11.0500 | 21435 | 7.8092 | 0.0036 |
69
+ | 27.6736 | 12.05 | 23221 | 7.8222 | 0.0028 |
70
+ | 27.3741 | 13.0500 | 25007 | 7.8221 | 0.0033 |
71
+
72
+
73
+ ### Framework versions
74
+
75
+ - Transformers 4.46.1
76
+ - Pytorch 2.5.1+cu124
77
+ - Datasets 3.1.0
78
+ - Tokenizers 0.20.1
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcfab36001991fe7f8261d3ff2a152aa70bfa04b1f57cac28db4ca3172467840
3
  size 351083264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8054db7c64d5b52e59c4773862d2ab13e0741de7ba93a0fcbc04e1b1582649
3
  size 351083264
trainer_state.json ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.0040858018386108275,
3
+ "best_model_checkpoint": "/media/cse/HDD/Shawon/shawon/10 fold timesformer/VideoMAE_Base_wlasl_2000_longtail_20/checkpoint-16076",
4
+ "epoch": 13.049993001119821,
5
+ "eval_steps": 500,
6
+ "global_step": 25007,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "grad_norm": 22.720428466796875,
14
+ "learning_rate": 2.494400895856663e-05,
15
+ "loss": 30.6409,
16
+ "step": 1786
17
+ },
18
+ {
19
+ "epoch": 0.05,
20
+ "eval_accuracy": 0.0005107252298263534,
21
+ "eval_loss": 7.631043910980225,
22
+ "eval_runtime": 277.5295,
23
+ "eval_samples_per_second": 14.11,
24
+ "eval_steps_per_second": 7.055,
25
+ "step": 1786
26
+ },
27
+ {
28
+ "epoch": 1.0499930011198209,
29
+ "grad_norm": 21.318378448486328,
30
+ "learning_rate": 4.994400895856663e-05,
31
+ "loss": 30.5597,
32
+ "step": 3572
33
+ },
34
+ {
35
+ "epoch": 1.0499930011198209,
36
+ "eval_accuracy": 0.0005107252298263534,
37
+ "eval_loss": 7.617450714111328,
38
+ "eval_runtime": 258.7991,
39
+ "eval_samples_per_second": 15.131,
40
+ "eval_steps_per_second": 7.566,
41
+ "step": 3572
42
+ },
43
+ {
44
+ "epoch": 2.0499860022396414,
45
+ "grad_norm": 20.306100845336914,
46
+ "learning_rate": 4.722999875575464e-05,
47
+ "loss": 30.4316,
48
+ "step": 5358
49
+ },
50
+ {
51
+ "epoch": 2.0499860022396414,
52
+ "eval_accuracy": 0.0010214504596527069,
53
+ "eval_loss": 7.6034626960754395,
54
+ "eval_runtime": 245.7692,
55
+ "eval_samples_per_second": 15.934,
56
+ "eval_steps_per_second": 7.967,
57
+ "step": 5358
58
+ },
59
+ {
60
+ "epoch": 3.0500069988801792,
61
+ "grad_norm": 21.882591247558594,
62
+ "learning_rate": 4.445222097797686e-05,
63
+ "loss": 30.2683,
64
+ "step": 7145
65
+ },
66
+ {
67
+ "epoch": 3.0500069988801792,
68
+ "eval_accuracy": 0.0020429009193054137,
69
+ "eval_loss": 7.593833923339844,
70
+ "eval_runtime": 259.6587,
71
+ "eval_samples_per_second": 15.081,
72
+ "eval_steps_per_second": 7.541,
73
+ "step": 7145
74
+ },
75
+ {
76
+ "epoch": 4.05,
77
+ "grad_norm": 19.35872459411621,
78
+ "learning_rate": 4.167444320019908e-05,
79
+ "loss": 30.0727,
80
+ "step": 8931
81
+ },
82
+ {
83
+ "epoch": 4.05,
84
+ "eval_accuracy": 0.001787538304392237,
85
+ "eval_loss": 7.626790523529053,
86
+ "eval_runtime": 259.567,
87
+ "eval_samples_per_second": 15.087,
88
+ "eval_steps_per_second": 7.543,
89
+ "step": 8931
90
+ },
91
+ {
92
+ "epoch": 5.04999300111982,
93
+ "grad_norm": 19.55528450012207,
94
+ "learning_rate": 3.8898220729127785e-05,
95
+ "loss": 29.84,
96
+ "step": 10717
97
+ },
98
+ {
99
+ "epoch": 5.04999300111982,
100
+ "eval_accuracy": 0.002553626149131767,
101
+ "eval_loss": 7.647721290588379,
102
+ "eval_runtime": 258.3287,
103
+ "eval_samples_per_second": 15.159,
104
+ "eval_steps_per_second": 7.579,
105
+ "step": 10717
106
+ },
107
+ {
108
+ "epoch": 6.049986002239642,
109
+ "grad_norm": 19.76344108581543,
110
+ "learning_rate": 3.612044295135001e-05,
111
+ "loss": 29.5721,
112
+ "step": 12503
113
+ },
114
+ {
115
+ "epoch": 6.049986002239642,
116
+ "eval_accuracy": 0.0022982635342185904,
117
+ "eval_loss": 7.682473659515381,
118
+ "eval_runtime": 250.7957,
119
+ "eval_samples_per_second": 15.614,
120
+ "eval_steps_per_second": 7.807,
121
+ "step": 12503
122
+ },
123
+ {
124
+ "epoch": 7.050006998880179,
125
+ "grad_norm": 22.916093826293945,
126
+ "learning_rate": 3.334266517357223e-05,
127
+ "loss": 29.2352,
128
+ "step": 14290
129
+ },
130
+ {
131
+ "epoch": 7.050006998880179,
132
+ "eval_accuracy": 0.0022982635342185904,
133
+ "eval_loss": 7.727109909057617,
134
+ "eval_runtime": 269.1751,
135
+ "eval_samples_per_second": 14.548,
136
+ "eval_steps_per_second": 7.274,
137
+ "step": 14290
138
+ },
139
+ {
140
+ "epoch": 8.05,
141
+ "grad_norm": 20.577028274536133,
142
+ "learning_rate": 3.056644270250094e-05,
143
+ "loss": 28.9425,
144
+ "step": 16076
145
+ },
146
+ {
147
+ "epoch": 8.05,
148
+ "eval_accuracy": 0.0040858018386108275,
149
+ "eval_loss": 7.766170501708984,
150
+ "eval_runtime": 254.6696,
151
+ "eval_samples_per_second": 15.377,
152
+ "eval_steps_per_second": 7.688,
153
+ "step": 16076
154
+ },
155
+ {
156
+ "epoch": 9.049993001119821,
157
+ "grad_norm": 21.435955047607422,
158
+ "learning_rate": 2.7788664924723157e-05,
159
+ "loss": 28.6146,
160
+ "step": 17862
161
+ },
162
+ {
163
+ "epoch": 9.049993001119821,
164
+ "eval_accuracy": 0.0030643513789581204,
165
+ "eval_loss": 7.774607181549072,
166
+ "eval_runtime": 259.5706,
167
+ "eval_samples_per_second": 15.086,
168
+ "eval_steps_per_second": 7.543,
169
+ "step": 17862
170
+ },
171
+ {
172
+ "epoch": 10.049986002239642,
173
+ "grad_norm": 23.16460609436035,
174
+ "learning_rate": 2.5012442453651862e-05,
175
+ "loss": 28.3135,
176
+ "step": 19648
177
+ },
178
+ {
179
+ "epoch": 10.049986002239642,
180
+ "eval_accuracy": 0.0028089887640449437,
181
+ "eval_loss": 7.799375057220459,
182
+ "eval_runtime": 259.0623,
183
+ "eval_samples_per_second": 15.116,
184
+ "eval_steps_per_second": 7.558,
185
+ "step": 19648
186
+ },
187
+ {
188
+ "epoch": 11.050006998880178,
189
+ "grad_norm": 25.342758178710938,
190
+ "learning_rate": 2.2234664675874084e-05,
191
+ "loss": 27.985,
192
+ "step": 21435
193
+ },
194
+ {
195
+ "epoch": 11.050006998880178,
196
+ "eval_accuracy": 0.003575076608784474,
197
+ "eval_loss": 7.809170246124268,
198
+ "eval_runtime": 262.3629,
199
+ "eval_samples_per_second": 14.926,
200
+ "eval_steps_per_second": 7.463,
201
+ "step": 21435
202
+ },
203
+ {
204
+ "epoch": 12.05,
205
+ "grad_norm": 22.567102432250977,
206
+ "learning_rate": 1.9458442204802787e-05,
207
+ "loss": 27.6736,
208
+ "step": 23221
209
+ },
210
+ {
211
+ "epoch": 12.05,
212
+ "eval_accuracy": 0.0028089887640449437,
213
+ "eval_loss": 7.8221635818481445,
214
+ "eval_runtime": 252.5003,
215
+ "eval_samples_per_second": 15.509,
216
+ "eval_steps_per_second": 7.754,
217
+ "step": 23221
218
+ },
219
+ {
220
+ "epoch": 13.049993001119821,
221
+ "grad_norm": 24.473997116088867,
222
+ "learning_rate": 1.6680664427025012e-05,
223
+ "loss": 27.3741,
224
+ "step": 25007
225
+ },
226
+ {
227
+ "epoch": 13.049993001119821,
228
+ "eval_accuracy": 0.003319713993871297,
229
+ "eval_loss": 7.8220720291137695,
230
+ "eval_runtime": 253.0179,
231
+ "eval_samples_per_second": 15.477,
232
+ "eval_steps_per_second": 7.739,
233
+ "step": 25007
234
+ },
235
+ {
236
+ "epoch": 13.049993001119821,
237
+ "step": 25007,
238
+ "total_flos": 2.5371184479755305e+20,
239
+ "train_loss": 29.25168140420682,
240
+ "train_runtime": 23631.6152,
241
+ "train_samples_per_second": 12.092,
242
+ "train_steps_per_second": 1.512
243
+ }
244
+ ],
245
+ "logging_steps": 500,
246
+ "max_steps": 35720,
247
+ "num_input_tokens_seen": 0,
248
+ "num_train_epochs": 9223372036854775807,
249
+ "save_steps": 500,
250
+ "stateful_callbacks": {
251
+ "EarlyStoppingCallback": {
252
+ "args": {
253
+ "early_stopping_patience": 5,
254
+ "early_stopping_threshold": 0.0
255
+ },
256
+ "attributes": {
257
+ "early_stopping_patience_counter": 5
258
+ }
259
+ },
260
+ "TrainerControl": {
261
+ "args": {
262
+ "should_epoch_stop": false,
263
+ "should_evaluate": false,
264
+ "should_log": false,
265
+ "should_save": true,
266
+ "should_training_stop": true
267
+ },
268
+ "attributes": {}
269
+ }
270
+ },
271
+ "total_flos": 2.5371184479755305e+20,
272
+ "train_batch_size": 2,
273
+ "trial_name": null,
274
+ "trial_params": null
275
+ }