Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- added_tokens.json +16 -0
- chat_template.json +3 -0
- config.json +48 -0
- generation_config.json +14 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- output.20241021134301.log.txt +501 -0
- preprocessor_config.json +29 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +146 -0
- vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"<|box_end|>": 151649,
|
| 3 |
+
"<|box_start|>": 151648,
|
| 4 |
+
"<|endoftext|>": 151643,
|
| 5 |
+
"<|im_end|>": 151645,
|
| 6 |
+
"<|im_start|>": 151644,
|
| 7 |
+
"<|image_pad|>": 151655,
|
| 8 |
+
"<|object_ref_end|>": 151647,
|
| 9 |
+
"<|object_ref_start|>": 151646,
|
| 10 |
+
"<|quad_end|>": 151651,
|
| 11 |
+
"<|quad_start|>": 151650,
|
| 12 |
+
"<|video_pad|>": 151656,
|
| 13 |
+
"<|vision_end|>": 151653,
|
| 14 |
+
"<|vision_pad|>": 151654,
|
| 15 |
+
"<|vision_start|>": 151652
|
| 16 |
+
}
|
chat_template.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
|
| 3 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "Qwen/Qwen2-VL-2B-Instruct",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"Qwen2VLForConditionalGeneration"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 151643,
|
| 8 |
+
"eos_token_id": 151645,
|
| 9 |
+
"hidden_act": "silu",
|
| 10 |
+
"hidden_size": 1536,
|
| 11 |
+
"image_token_id": 151655,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"intermediate_size": 8960,
|
| 14 |
+
"max_position_embeddings": 32768,
|
| 15 |
+
"max_window_layers": 28,
|
| 16 |
+
"model_type": "qwen2_vl",
|
| 17 |
+
"num_attention_heads": 12,
|
| 18 |
+
"num_hidden_layers": 28,
|
| 19 |
+
"num_key_value_heads": 2,
|
| 20 |
+
"rms_norm_eps": 1e-06,
|
| 21 |
+
"rope_scaling": {
|
| 22 |
+
"mrope_section": [
|
| 23 |
+
16,
|
| 24 |
+
24,
|
| 25 |
+
24
|
| 26 |
+
],
|
| 27 |
+
"rope_type": "default",
|
| 28 |
+
"type": "default"
|
| 29 |
+
},
|
| 30 |
+
"rope_theta": 1000000.0,
|
| 31 |
+
"sliding_window": 32768,
|
| 32 |
+
"tie_word_embeddings": true,
|
| 33 |
+
"torch_dtype": "bfloat16",
|
| 34 |
+
"transformers_version": "4.45.2",
|
| 35 |
+
"use_cache": true,
|
| 36 |
+
"use_sliding_window": false,
|
| 37 |
+
"video_token_id": 151656,
|
| 38 |
+
"vision_config": {
|
| 39 |
+
"hidden_size": 1536,
|
| 40 |
+
"in_chans": 3,
|
| 41 |
+
"model_type": "qwen2_vl",
|
| 42 |
+
"spatial_patch_size": 14
|
| 43 |
+
},
|
| 44 |
+
"vision_end_token_id": 151653,
|
| 45 |
+
"vision_start_token_id": 151652,
|
| 46 |
+
"vision_token_id": 151654,
|
| 47 |
+
"vocab_size": 151936
|
| 48 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attn_implementation": "flash_attention_2",
|
| 3 |
+
"bos_token_id": 151643,
|
| 4 |
+
"do_sample": true,
|
| 5 |
+
"eos_token_id": [
|
| 6 |
+
151645,
|
| 7 |
+
151643
|
| 8 |
+
],
|
| 9 |
+
"pad_token_id": 151643,
|
| 10 |
+
"temperature": 0.01,
|
| 11 |
+
"top_k": 1,
|
| 12 |
+
"top_p": 0.001,
|
| 13 |
+
"transformers_version": "4.45.2"
|
| 14 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a111b34aab66a5a53e90df46918080d8137321e4c5aac8490d44260a7d07cd76
|
| 3 |
+
size 4418050848
|
output.20241021134301.log.txt
ADDED
|
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2024-10-21 13:43:07-finetune.py:240-INFO >> Batch 2 of epoch 1/10, average training loss of previous 2 batches: 1.383868396282196
|
| 2 |
+
2024-10-21 13:43:08-finetune.py:240-INFO >> Batch 4 of epoch 1/10, average training loss of previous 2 batches: 1.4955955147743225
|
| 3 |
+
2024-10-21 13:43:09-finetune.py:240-INFO >> Batch 6 of epoch 1/10, average training loss of previous 2 batches: 1.2759928703308105
|
| 4 |
+
2024-10-21 13:43:10-finetune.py:240-INFO >> Batch 8 of epoch 1/10, average training loss of previous 2 batches: 1.6038963794708252
|
| 5 |
+
2024-10-21 13:43:11-finetune.py:240-INFO >> Batch 10 of epoch 1/10, average training loss of previous 2 batches: 1.2483851313591003
|
| 6 |
+
2024-10-21 13:43:12-finetune.py:240-INFO >> Batch 12 of epoch 1/10, average training loss of previous 2 batches: 1.3747590780258179
|
| 7 |
+
2024-10-21 13:43:12-finetune.py:240-INFO >> Batch 14 of epoch 1/10, average training loss of previous 2 batches: 1.27400141954422
|
| 8 |
+
2024-10-21 13:43:13-finetune.py:240-INFO >> Batch 16 of epoch 1/10, average training loss of previous 2 batches: 1.441308856010437
|
| 9 |
+
2024-10-21 13:43:14-finetune.py:240-INFO >> Batch 18 of epoch 1/10, average training loss of previous 2 batches: 1.2634166479110718
|
| 10 |
+
2024-10-21 13:43:15-finetune.py:240-INFO >> Batch 20 of epoch 1/10, average training loss of previous 2 batches: 1.1642013788223267
|
| 11 |
+
2024-10-21 13:43:16-finetune.py:240-INFO >> Batch 22 of epoch 1/10, average training loss of previous 2 batches: 1.3199244737625122
|
| 12 |
+
2024-10-21 13:43:17-finetune.py:240-INFO >> Batch 24 of epoch 1/10, average training loss of previous 2 batches: 1.2249361276626587
|
| 13 |
+
2024-10-21 13:43:18-finetune.py:240-INFO >> Batch 26 of epoch 1/10, average training loss of previous 2 batches: 1.3165916800498962
|
| 14 |
+
2024-10-21 13:43:18-finetune.py:240-INFO >> Batch 28 of epoch 1/10, average training loss of previous 2 batches: 1.1227564811706543
|
| 15 |
+
2024-10-21 13:43:19-finetune.py:240-INFO >> Batch 30 of epoch 1/10, average training loss of previous 2 batches: 1.022961974143982
|
| 16 |
+
2024-10-21 13:43:20-finetune.py:240-INFO >> Batch 32 of epoch 1/10, average training loss of previous 2 batches: 1.1248258352279663
|
| 17 |
+
2024-10-21 13:43:21-finetune.py:240-INFO >> Batch 34 of epoch 1/10, average training loss of previous 2 batches: 1.1138423681259155
|
| 18 |
+
2024-10-21 13:43:22-finetune.py:240-INFO >> Batch 36 of epoch 1/10, average training loss of previous 2 batches: 1.2348712086677551
|
| 19 |
+
2024-10-21 13:43:23-finetune.py:240-INFO >> Batch 38 of epoch 1/10, average training loss of previous 2 batches: 1.032978355884552
|
| 20 |
+
2024-10-21 13:43:23-finetune.py:240-INFO >> Batch 40 of epoch 1/10, average training loss of previous 2 batches: 1.0560821890830994
|
| 21 |
+
2024-10-21 13:43:24-finetune.py:240-INFO >> Batch 42 of epoch 1/10, average training loss of previous 2 batches: 1.0707827806472778
|
| 22 |
+
2024-10-21 13:43:25-finetune.py:240-INFO >> Batch 44 of epoch 1/10, average training loss of previous 2 batches: 1.0700677037239075
|
| 23 |
+
2024-10-21 13:43:26-finetune.py:240-INFO >> Batch 46 of epoch 1/10, average training loss of previous 2 batches: 1.1728523969650269
|
| 24 |
+
2024-10-21 13:43:27-finetune.py:240-INFO >> Batch 48 of epoch 1/10, average training loss of previous 2 batches: 1.207688808441162
|
| 25 |
+
2024-10-21 13:43:28-finetune.py:240-INFO >> Batch 50 of epoch 1/10, average training loss of previous 2 batches: 0.9196622967720032
|
| 26 |
+
2024-10-21 13:43:29-finetune.py:240-INFO >> Batch 52 of epoch 1/10, average training loss of previous 2 batches: 1.1481328010559082
|
| 27 |
+
2024-10-21 13:43:29-finetune.py:240-INFO >> Batch 54 of epoch 1/10, average training loss of previous 2 batches: 1.104943871498108
|
| 28 |
+
2024-10-21 13:43:30-finetune.py:240-INFO >> Batch 56 of epoch 1/10, average training loss of previous 2 batches: 0.9173874855041504
|
| 29 |
+
2024-10-21 13:43:31-finetune.py:240-INFO >> Batch 58 of epoch 1/10, average training loss of previous 2 batches: 0.9855371713638306
|
| 30 |
+
2024-10-21 13:43:32-finetune.py:240-INFO >> Batch 60 of epoch 1/10, average training loss of previous 2 batches: 1.1189665794372559
|
| 31 |
+
2024-10-21 13:43:33-finetune.py:240-INFO >> Batch 62 of epoch 1/10, average training loss of previous 2 batches: 0.9940662384033203
|
| 32 |
+
2024-10-21 13:43:34-finetune.py:240-INFO >> Batch 64 of epoch 1/10, average training loss of previous 2 batches: 1.120991289615631
|
| 33 |
+
2024-10-21 13:43:34-finetune.py:240-INFO >> Batch 66 of epoch 1/10, average training loss of previous 2 batches: 0.9912855923175812
|
| 34 |
+
2024-10-21 13:43:35-finetune.py:240-INFO >> Batch 68 of epoch 1/10, average training loss of previous 2 batches: 0.9637334048748016
|
| 35 |
+
2024-10-21 13:43:36-finetune.py:240-INFO >> Batch 70 of epoch 1/10, average training loss of previous 2 batches: 1.0715060234069824
|
| 36 |
+
2024-10-21 13:43:37-finetune.py:240-INFO >> Batch 72 of epoch 1/10, average training loss of previous 2 batches: 0.8575984835624695
|
| 37 |
+
2024-10-21 13:43:38-finetune.py:240-INFO >> Batch 74 of epoch 1/10, average training loss of previous 2 batches: 1.012690782546997
|
| 38 |
+
2024-10-21 13:43:39-finetune.py:240-INFO >> Batch 76 of epoch 1/10, average training loss of previous 2 batches: 0.9796179533004761
|
| 39 |
+
2024-10-21 13:43:40-finetune.py:240-INFO >> Batch 78 of epoch 1/10, average training loss of previous 2 batches: 0.9251889288425446
|
| 40 |
+
2024-10-21 13:43:40-finetune.py:240-INFO >> Batch 80 of epoch 1/10, average training loss of previous 2 batches: 1.2643179893493652
|
| 41 |
+
2024-10-21 13:43:41-finetune.py:240-INFO >> Batch 82 of epoch 1/10, average training loss of previous 2 batches: 1.1511635184288025
|
| 42 |
+
2024-10-21 13:43:42-finetune.py:240-INFO >> Batch 84 of epoch 1/10, average training loss of previous 2 batches: 1.0833743810653687
|
| 43 |
+
2024-10-21 13:43:43-finetune.py:240-INFO >> Batch 86 of epoch 1/10, average training loss of previous 2 batches: 1.0614412426948547
|
| 44 |
+
2024-10-21 13:43:44-finetune.py:240-INFO >> Batch 88 of epoch 1/10, average training loss of previous 2 batches: 1.0845468044281006
|
| 45 |
+
2024-10-21 13:43:45-finetune.py:240-INFO >> Batch 90 of epoch 1/10, average training loss of previous 2 batches: 1.0898866057395935
|
| 46 |
+
2024-10-21 13:43:45-finetune.py:240-INFO >> Batch 92 of epoch 1/10, average training loss of previous 2 batches: 1.106179416179657
|
| 47 |
+
2024-10-21 13:43:46-finetune.py:240-INFO >> Batch 94 of epoch 1/10, average training loss of previous 2 batches: 0.9967300891876221
|
| 48 |
+
2024-10-21 13:43:47-finetune.py:240-INFO >> Batch 96 of epoch 1/10, average training loss of previous 2 batches: 1.0550669431686401
|
| 49 |
+
2024-10-21 13:43:48-finetune.py:240-INFO >> Batch 98 of epoch 1/10, average training loss of previous 2 batches: 1.003680169582367
|
| 50 |
+
2024-10-21 13:43:49-finetune.py:240-INFO >> Batch 100 of epoch 1/10, average training loss of previous 2 batches: 1.0866395235061646
|
| 51 |
+
2024-10-21 13:43:50-finetune.py:240-INFO >> Batch 2 of epoch 2/10, average training loss of previous 2 batches: 0.8128059804439545
|
| 52 |
+
2024-10-21 13:43:51-finetune.py:240-INFO >> Batch 4 of epoch 2/10, average training loss of previous 2 batches: 0.9414032399654388
|
| 53 |
+
2024-10-21 13:43:51-finetune.py:240-INFO >> Batch 6 of epoch 2/10, average training loss of previous 2 batches: 0.8227186799049377
|
| 54 |
+
2024-10-21 13:43:52-finetune.py:240-INFO >> Batch 8 of epoch 2/10, average training loss of previous 2 batches: 1.1816160678863525
|
| 55 |
+
2024-10-21 13:43:53-finetune.py:240-INFO >> Batch 10 of epoch 2/10, average training loss of previous 2 batches: 0.8436281085014343
|
| 56 |
+
2024-10-21 13:43:54-finetune.py:240-INFO >> Batch 12 of epoch 2/10, average training loss of previous 2 batches: 0.9917052984237671
|
| 57 |
+
2024-10-21 13:43:55-finetune.py:240-INFO >> Batch 14 of epoch 2/10, average training loss of previous 2 batches: 0.9916376769542694
|
| 58 |
+
2024-10-21 13:43:56-finetune.py:240-INFO >> Batch 16 of epoch 2/10, average training loss of previous 2 batches: 1.1409175992012024
|
| 59 |
+
2024-10-21 13:43:57-finetune.py:240-INFO >> Batch 18 of epoch 2/10, average training loss of previous 2 batches: 0.9628605842590332
|
| 60 |
+
2024-10-21 13:43:57-finetune.py:240-INFO >> Batch 20 of epoch 2/10, average training loss of previous 2 batches: 0.9078276753425598
|
| 61 |
+
2024-10-21 13:43:58-finetune.py:240-INFO >> Batch 22 of epoch 2/10, average training loss of previous 2 batches: 1.0480610132217407
|
| 62 |
+
2024-10-21 13:43:59-finetune.py:240-INFO >> Batch 24 of epoch 2/10, average training loss of previous 2 batches: 0.9932570159435272
|
| 63 |
+
2024-10-21 13:44:00-finetune.py:240-INFO >> Batch 26 of epoch 2/10, average training loss of previous 2 batches: 1.0300598442554474
|
| 64 |
+
2024-10-21 13:44:01-finetune.py:240-INFO >> Batch 28 of epoch 2/10, average training loss of previous 2 batches: 0.8787409961223602
|
| 65 |
+
2024-10-21 13:44:02-finetune.py:240-INFO >> Batch 30 of epoch 2/10, average training loss of previous 2 batches: 0.8204362094402313
|
| 66 |
+
2024-10-21 13:44:03-finetune.py:240-INFO >> Batch 32 of epoch 2/10, average training loss of previous 2 batches: 0.8951331973075867
|
| 67 |
+
2024-10-21 13:44:03-finetune.py:240-INFO >> Batch 34 of epoch 2/10, average training loss of previous 2 batches: 0.9430911242961884
|
| 68 |
+
2024-10-21 13:44:04-finetune.py:240-INFO >> Batch 36 of epoch 2/10, average training loss of previous 2 batches: 1.037635713815689
|
| 69 |
+
2024-10-21 13:44:05-finetune.py:240-INFO >> Batch 38 of epoch 2/10, average training loss of previous 2 batches: 0.8533597886562347
|
| 70 |
+
2024-10-21 13:44:06-finetune.py:240-INFO >> Batch 40 of epoch 2/10, average training loss of previous 2 batches: 0.8404317200183868
|
| 71 |
+
2024-10-21 13:44:07-finetune.py:240-INFO >> Batch 42 of epoch 2/10, average training loss of previous 2 batches: 0.879459410905838
|
| 72 |
+
2024-10-21 13:44:08-finetune.py:240-INFO >> Batch 44 of epoch 2/10, average training loss of previous 2 batches: 0.9176307320594788
|
| 73 |
+
2024-10-21 13:44:09-finetune.py:240-INFO >> Batch 46 of epoch 2/10, average training loss of previous 2 batches: 0.9854529798030853
|
| 74 |
+
2024-10-21 13:44:09-finetune.py:240-INFO >> Batch 48 of epoch 2/10, average training loss of previous 2 batches: 1.0120484232902527
|
| 75 |
+
2024-10-21 13:44:10-finetune.py:240-INFO >> Batch 50 of epoch 2/10, average training loss of previous 2 batches: 0.7464470267295837
|
| 76 |
+
2024-10-21 13:44:11-finetune.py:240-INFO >> Batch 52 of epoch 2/10, average training loss of previous 2 batches: 0.9358537495136261
|
| 77 |
+
2024-10-21 13:44:12-finetune.py:240-INFO >> Batch 54 of epoch 2/10, average training loss of previous 2 batches: 0.9208785891532898
|
| 78 |
+
2024-10-21 13:44:13-finetune.py:240-INFO >> Batch 56 of epoch 2/10, average training loss of previous 2 batches: 0.7592509090900421
|
| 79 |
+
2024-10-21 13:44:14-finetune.py:240-INFO >> Batch 58 of epoch 2/10, average training loss of previous 2 batches: 0.8204958140850067
|
| 80 |
+
2024-10-21 13:44:15-finetune.py:240-INFO >> Batch 60 of epoch 2/10, average training loss of previous 2 batches: 0.9596169292926788
|
| 81 |
+
2024-10-21 13:44:15-finetune.py:240-INFO >> Batch 62 of epoch 2/10, average training loss of previous 2 batches: 0.8344171643257141
|
| 82 |
+
2024-10-21 13:44:16-finetune.py:240-INFO >> Batch 64 of epoch 2/10, average training loss of previous 2 batches: 0.979409247636795
|
| 83 |
+
2024-10-21 13:44:17-finetune.py:240-INFO >> Batch 66 of epoch 2/10, average training loss of previous 2 batches: 0.8702147006988525
|
| 84 |
+
2024-10-21 13:44:18-finetune.py:240-INFO >> Batch 68 of epoch 2/10, average training loss of previous 2 batches: 0.8352161645889282
|
| 85 |
+
2024-10-21 13:44:19-finetune.py:240-INFO >> Batch 70 of epoch 2/10, average training loss of previous 2 batches: 0.9293053150177002
|
| 86 |
+
2024-10-21 13:44:20-finetune.py:240-INFO >> Batch 72 of epoch 2/10, average training loss of previous 2 batches: 0.7029016017913818
|
| 87 |
+
2024-10-21 13:44:21-finetune.py:240-INFO >> Batch 74 of epoch 2/10, average training loss of previous 2 batches: 0.8921840488910675
|
| 88 |
+
2024-10-21 13:44:21-finetune.py:240-INFO >> Batch 76 of epoch 2/10, average training loss of previous 2 batches: 0.8468500375747681
|
| 89 |
+
2024-10-21 13:44:22-finetune.py:240-INFO >> Batch 78 of epoch 2/10, average training loss of previous 2 batches: 0.8011757731437683
|
| 90 |
+
2024-10-21 13:44:23-finetune.py:240-INFO >> Batch 80 of epoch 2/10, average training loss of previous 2 batches: 1.1114087104797363
|
| 91 |
+
2024-10-21 13:44:24-finetune.py:240-INFO >> Batch 82 of epoch 2/10, average training loss of previous 2 batches: 1.0192084610462189
|
| 92 |
+
2024-10-21 13:44:25-finetune.py:240-INFO >> Batch 84 of epoch 2/10, average training loss of previous 2 batches: 0.941729873418808
|
| 93 |
+
2024-10-21 13:44:26-finetune.py:240-INFO >> Batch 86 of epoch 2/10, average training loss of previous 2 batches: 0.9171375632286072
|
| 94 |
+
2024-10-21 13:44:27-finetune.py:240-INFO >> Batch 88 of epoch 2/10, average training loss of previous 2 batches: 0.9673053622245789
|
| 95 |
+
2024-10-21 13:44:28-finetune.py:240-INFO >> Batch 90 of epoch 2/10, average training loss of previous 2 batches: 0.9618223011493683
|
| 96 |
+
2024-10-21 13:44:28-finetune.py:240-INFO >> Batch 92 of epoch 2/10, average training loss of previous 2 batches: 0.9772896766662598
|
| 97 |
+
2024-10-21 13:44:29-finetune.py:240-INFO >> Batch 94 of epoch 2/10, average training loss of previous 2 batches: 0.8849041163921356
|
| 98 |
+
2024-10-21 13:44:30-finetune.py:240-INFO >> Batch 96 of epoch 2/10, average training loss of previous 2 batches: 0.9389393627643585
|
| 99 |
+
2024-10-21 13:44:31-finetune.py:240-INFO >> Batch 98 of epoch 2/10, average training loss of previous 2 batches: 0.9033451080322266
|
| 100 |
+
2024-10-21 13:44:32-finetune.py:240-INFO >> Batch 100 of epoch 2/10, average training loss of previous 2 batches: 0.970057487487793
|
| 101 |
+
2024-10-21 13:44:33-finetune.py:240-INFO >> Batch 2 of epoch 3/10, average training loss of previous 2 batches: 0.736648291349411
|
| 102 |
+
2024-10-21 13:44:34-finetune.py:240-INFO >> Batch 4 of epoch 3/10, average training loss of previous 2 batches: 0.8554295003414154
|
| 103 |
+
2024-10-21 13:44:34-finetune.py:240-INFO >> Batch 6 of epoch 3/10, average training loss of previous 2 batches: 0.7599872350692749
|
| 104 |
+
2024-10-21 13:44:35-finetune.py:240-INFO >> Batch 8 of epoch 3/10, average training loss of previous 2 batches: 1.066439151763916
|
| 105 |
+
2024-10-21 13:44:36-finetune.py:240-INFO >> Batch 10 of epoch 3/10, average training loss of previous 2 batches: 0.7500385642051697
|
| 106 |
+
2024-10-21 13:44:37-finetune.py:240-INFO >> Batch 12 of epoch 3/10, average training loss of previous 2 batches: 0.8974490761756897
|
| 107 |
+
2024-10-21 13:44:38-finetune.py:240-INFO >> Batch 14 of epoch 3/10, average training loss of previous 2 batches: 0.9173659086227417
|
| 108 |
+
2024-10-21 13:44:39-finetune.py:240-INFO >> Batch 16 of epoch 3/10, average training loss of previous 2 batches: 1.0351774096488953
|
| 109 |
+
2024-10-21 13:44:40-finetune.py:240-INFO >> Batch 18 of epoch 3/10, average training loss of previous 2 batches: 0.8837870657444
|
| 110 |
+
2024-10-21 13:44:41-finetune.py:240-INFO >> Batch 20 of epoch 3/10, average training loss of previous 2 batches: 0.8277487754821777
|
| 111 |
+
2024-10-21 13:44:41-finetune.py:240-INFO >> Batch 22 of epoch 3/10, average training loss of previous 2 batches: 0.9443394541740417
|
| 112 |
+
2024-10-21 13:44:42-finetune.py:240-INFO >> Batch 24 of epoch 3/10, average training loss of previous 2 batches: 0.9046522080898285
|
| 113 |
+
2024-10-21 13:44:43-finetune.py:240-INFO >> Batch 26 of epoch 3/10, average training loss of previous 2 batches: 0.9271535575389862
|
| 114 |
+
2024-10-21 13:44:44-finetune.py:240-INFO >> Batch 28 of epoch 3/10, average training loss of previous 2 batches: 0.7925187945365906
|
| 115 |
+
2024-10-21 13:44:45-finetune.py:240-INFO >> Batch 30 of epoch 3/10, average training loss of previous 2 batches: 0.7540174126625061
|
| 116 |
+
2024-10-21 13:44:46-finetune.py:240-INFO >> Batch 32 of epoch 3/10, average training loss of previous 2 batches: 0.8095866143703461
|
| 117 |
+
2024-10-21 13:44:47-finetune.py:240-INFO >> Batch 34 of epoch 3/10, average training loss of previous 2 batches: 0.8659921884536743
|
| 118 |
+
2024-10-21 13:44:47-finetune.py:240-INFO >> Batch 36 of epoch 3/10, average training loss of previous 2 batches: 0.943943202495575
|
| 119 |
+
2024-10-21 13:44:48-finetune.py:240-INFO >> Batch 38 of epoch 3/10, average training loss of previous 2 batches: 0.7801710069179535
|
| 120 |
+
2024-10-21 13:44:49-finetune.py:240-INFO >> Batch 40 of epoch 3/10, average training loss of previous 2 batches: 0.7536539733409882
|
| 121 |
+
2024-10-21 13:44:50-finetune.py:240-INFO >> Batch 42 of epoch 3/10, average training loss of previous 2 batches: 0.7926651835441589
|
| 122 |
+
2024-10-21 13:44:51-finetune.py:240-INFO >> Batch 44 of epoch 3/10, average training loss of previous 2 batches: 0.8482162654399872
|
| 123 |
+
2024-10-21 13:44:52-finetune.py:240-INFO >> Batch 46 of epoch 3/10, average training loss of previous 2 batches: 0.9039164483547211
|
| 124 |
+
2024-10-21 13:44:53-finetune.py:240-INFO >> Batch 48 of epoch 3/10, average training loss of previous 2 batches: 0.923221081495285
|
| 125 |
+
2024-10-21 13:44:54-finetune.py:240-INFO >> Batch 50 of epoch 3/10, average training loss of previous 2 batches: 0.6753100752830505
|
| 126 |
+
2024-10-21 13:44:54-finetune.py:240-INFO >> Batch 52 of epoch 3/10, average training loss of previous 2 batches: 0.8334802985191345
|
| 127 |
+
2024-10-21 13:44:55-finetune.py:240-INFO >> Batch 54 of epoch 3/10, average training loss of previous 2 batches: 0.8219180703163147
|
| 128 |
+
2024-10-21 13:44:56-finetune.py:240-INFO >> Batch 56 of epoch 3/10, average training loss of previous 2 batches: 0.6824038624763489
|
| 129 |
+
2024-10-21 13:44:57-finetune.py:240-INFO >> Batch 58 of epoch 3/10, average training loss of previous 2 batches: 0.7407535016536713
|
| 130 |
+
2024-10-21 13:44:58-finetune.py:240-INFO >> Batch 60 of epoch 3/10, average training loss of previous 2 batches: 0.8619542419910431
|
| 131 |
+
2024-10-21 13:44:59-finetune.py:240-INFO >> Batch 62 of epoch 3/10, average training loss of previous 2 batches: 0.7519990801811218
|
| 132 |
+
2024-10-21 13:45:00-finetune.py:240-INFO >> Batch 64 of epoch 3/10, average training loss of previous 2 batches: 0.8981192409992218
|
| 133 |
+
2024-10-21 13:45:00-finetune.py:240-INFO >> Batch 66 of epoch 3/10, average training loss of previous 2 batches: 0.795328825712204
|
| 134 |
+
2024-10-21 13:45:01-finetune.py:240-INFO >> Batch 68 of epoch 3/10, average training loss of previous 2 batches: 0.7598937749862671
|
| 135 |
+
2024-10-21 13:45:02-finetune.py:240-INFO >> Batch 70 of epoch 3/10, average training loss of previous 2 batches: 0.8479012846946716
|
| 136 |
+
2024-10-21 13:45:03-finetune.py:240-INFO >> Batch 72 of epoch 3/10, average training loss of previous 2 batches: 0.6232497692108154
|
| 137 |
+
2024-10-21 13:45:04-finetune.py:240-INFO >> Batch 74 of epoch 3/10, average training loss of previous 2 batches: 0.8171462714672089
|
| 138 |
+
2024-10-21 13:45:05-finetune.py:240-INFO >> Batch 76 of epoch 3/10, average training loss of previous 2 batches: 0.7621821761131287
|
| 139 |
+
2024-10-21 13:45:06-finetune.py:240-INFO >> Batch 78 of epoch 3/10, average training loss of previous 2 batches: 0.7309359610080719
|
| 140 |
+
2024-10-21 13:45:07-finetune.py:240-INFO >> Batch 80 of epoch 3/10, average training loss of previous 2 batches: 1.012621521949768
|
| 141 |
+
2024-10-21 13:45:07-finetune.py:240-INFO >> Batch 82 of epoch 3/10, average training loss of previous 2 batches: 0.9371889233589172
|
| 142 |
+
2024-10-21 13:45:08-finetune.py:240-INFO >> Batch 84 of epoch 3/10, average training loss of previous 2 batches: 0.8546919226646423
|
| 143 |
+
2024-10-21 13:45:09-finetune.py:240-INFO >> Batch 86 of epoch 3/10, average training loss of previous 2 batches: 0.8201634585857391
|
| 144 |
+
2024-10-21 13:45:10-finetune.py:240-INFO >> Batch 88 of epoch 3/10, average training loss of previous 2 batches: 0.8924104869365692
|
| 145 |
+
2024-10-21 13:45:11-finetune.py:240-INFO >> Batch 90 of epoch 3/10, average training loss of previous 2 batches: 0.8748036623001099
|
| 146 |
+
2024-10-21 13:45:12-finetune.py:240-INFO >> Batch 92 of epoch 3/10, average training loss of previous 2 batches: 0.8777934610843658
|
| 147 |
+
2024-10-21 13:45:13-finetune.py:240-INFO >> Batch 94 of epoch 3/10, average training loss of previous 2 batches: 0.8174447417259216
|
| 148 |
+
2024-10-21 13:45:14-finetune.py:240-INFO >> Batch 96 of epoch 3/10, average training loss of previous 2 batches: 0.8672417104244232
|
| 149 |
+
2024-10-21 13:45:14-finetune.py:240-INFO >> Batch 98 of epoch 3/10, average training loss of previous 2 batches: 0.8363238275051117
|
| 150 |
+
2024-10-21 13:45:15-finetune.py:240-INFO >> Batch 100 of epoch 3/10, average training loss of previous 2 batches: 0.8956538736820221
|
| 151 |
+
2024-10-21 13:45:16-finetune.py:240-INFO >> Batch 2 of epoch 4/10, average training loss of previous 2 batches: 0.6835333406925201
|
| 152 |
+
2024-10-21 13:45:17-finetune.py:240-INFO >> Batch 4 of epoch 4/10, average training loss of previous 2 batches: 0.7852495908737183
|
| 153 |
+
2024-10-21 13:45:18-finetune.py:240-INFO >> Batch 6 of epoch 4/10, average training loss of previous 2 batches: 0.716625303030014
|
| 154 |
+
2024-10-21 13:45:19-finetune.py:240-INFO >> Batch 8 of epoch 4/10, average training loss of previous 2 batches: 0.984612375497818
|
| 155 |
+
2024-10-21 13:45:20-finetune.py:240-INFO >> Batch 10 of epoch 4/10, average training loss of previous 2 batches: 0.6900284886360168
|
| 156 |
+
2024-10-21 13:45:21-finetune.py:240-INFO >> Batch 12 of epoch 4/10, average training loss of previous 2 batches: 0.8278656899929047
|
| 157 |
+
2024-10-21 13:45:21-finetune.py:240-INFO >> Batch 14 of epoch 4/10, average training loss of previous 2 batches: 0.8600967824459076
|
| 158 |
+
2024-10-21 13:45:22-finetune.py:240-INFO >> Batch 16 of epoch 4/10, average training loss of previous 2 batches: 0.9587431848049164
|
| 159 |
+
2024-10-21 13:45:23-finetune.py:240-INFO >> Batch 18 of epoch 4/10, average training loss of previous 2 batches: 0.8181414604187012
|
| 160 |
+
2024-10-21 13:45:24-finetune.py:240-INFO >> Batch 20 of epoch 4/10, average training loss of previous 2 batches: 0.7698231935501099
|
| 161 |
+
2024-10-21 13:45:25-finetune.py:240-INFO >> Batch 22 of epoch 4/10, average training loss of previous 2 batches: 0.8695583045482635
|
| 162 |
+
2024-10-21 13:45:26-finetune.py:240-INFO >> Batch 24 of epoch 4/10, average training loss of previous 2 batches: 0.8343754410743713
|
| 163 |
+
2024-10-21 13:45:27-finetune.py:240-INFO >> Batch 26 of epoch 4/10, average training loss of previous 2 batches: 0.8548255264759064
|
| 164 |
+
2024-10-21 13:45:28-finetune.py:240-INFO >> Batch 28 of epoch 4/10, average training loss of previous 2 batches: 0.7314669489860535
|
| 165 |
+
2024-10-21 13:45:28-finetune.py:240-INFO >> Batch 30 of epoch 4/10, average training loss of previous 2 batches: 0.7021181285381317
|
| 166 |
+
2024-10-21 13:45:29-finetune.py:240-INFO >> Batch 32 of epoch 4/10, average training loss of previous 2 batches: 0.7473123967647552
|
| 167 |
+
2024-10-21 13:45:30-finetune.py:240-INFO >> Batch 34 of epoch 4/10, average training loss of previous 2 batches: 0.8069173097610474
|
| 168 |
+
2024-10-21 13:45:31-finetune.py:240-INFO >> Batch 36 of epoch 4/10, average training loss of previous 2 batches: 0.8744145035743713
|
| 169 |
+
2024-10-21 13:45:32-finetune.py:240-INFO >> Batch 38 of epoch 4/10, average training loss of previous 2 batches: 0.7218718826770782
|
| 170 |
+
2024-10-21 13:45:33-finetune.py:240-INFO >> Batch 40 of epoch 4/10, average training loss of previous 2 batches: 0.6963265836238861
|
| 171 |
+
2024-10-21 13:45:34-finetune.py:240-INFO >> Batch 42 of epoch 4/10, average training loss of previous 2 batches: 0.7223711013793945
|
| 172 |
+
2024-10-21 13:45:34-finetune.py:240-INFO >> Batch 44 of epoch 4/10, average training loss of previous 2 batches: 0.7927519381046295
|
| 173 |
+
2024-10-21 13:45:35-finetune.py:240-INFO >> Batch 46 of epoch 4/10, average training loss of previous 2 batches: 0.8368312120437622
|
| 174 |
+
2024-10-21 13:45:36-finetune.py:240-INFO >> Batch 48 of epoch 4/10, average training loss of previous 2 batches: 0.8447033166885376
|
| 175 |
+
2024-10-21 13:45:37-finetune.py:240-INFO >> Batch 50 of epoch 4/10, average training loss of previous 2 batches: 0.6274715662002563
|
| 176 |
+
2024-10-21 13:45:38-finetune.py:240-INFO >> Batch 52 of epoch 4/10, average training loss of previous 2 batches: 0.7559156715869904
|
| 177 |
+
2024-10-21 13:45:39-finetune.py:240-INFO >> Batch 54 of epoch 4/10, average training loss of previous 2 batches: 0.7475820183753967
|
| 178 |
+
2024-10-21 13:45:40-finetune.py:240-INFO >> Batch 56 of epoch 4/10, average training loss of previous 2 batches: 0.626720517873764
|
| 179 |
+
2024-10-21 13:45:41-finetune.py:240-INFO >> Batch 58 of epoch 4/10, average training loss of previous 2 batches: 0.6783787608146667
|
| 180 |
+
2024-10-21 13:45:41-finetune.py:240-INFO >> Batch 60 of epoch 4/10, average training loss of previous 2 batches: 0.7799589335918427
|
| 181 |
+
2024-10-21 13:45:42-finetune.py:240-INFO >> Batch 62 of epoch 4/10, average training loss of previous 2 batches: 0.6854929625988007
|
| 182 |
+
2024-10-21 13:45:43-finetune.py:240-INFO >> Batch 64 of epoch 4/10, average training loss of previous 2 batches: 0.8336123526096344
|
| 183 |
+
2024-10-21 13:45:44-finetune.py:240-INFO >> Batch 66 of epoch 4/10, average training loss of previous 2 batches: 0.7306537926197052
|
| 184 |
+
2024-10-21 13:45:45-finetune.py:240-INFO >> Batch 68 of epoch 4/10, average training loss of previous 2 batches: 0.7053666114807129
|
| 185 |
+
2024-10-21 13:45:46-finetune.py:240-INFO >> Batch 70 of epoch 4/10, average training loss of previous 2 batches: 0.7826265394687653
|
| 186 |
+
2024-10-21 13:45:47-finetune.py:240-INFO >> Batch 72 of epoch 4/10, average training loss of previous 2 batches: 0.5642250180244446
|
| 187 |
+
2024-10-21 13:45:48-finetune.py:240-INFO >> Batch 74 of epoch 4/10, average training loss of previous 2 batches: 0.7540428340435028
|
| 188 |
+
2024-10-21 13:45:48-finetune.py:240-INFO >> Batch 76 of epoch 4/10, average training loss of previous 2 batches: 0.6973932385444641
|
| 189 |
+
2024-10-21 13:45:49-finetune.py:240-INFO >> Batch 78 of epoch 4/10, average training loss of previous 2 batches: 0.6745010018348694
|
| 190 |
+
2024-10-21 13:45:50-finetune.py:240-INFO >> Batch 80 of epoch 4/10, average training loss of previous 2 batches: 0.9325462579727173
|
| 191 |
+
2024-10-21 13:45:51-finetune.py:240-INFO >> Batch 82 of epoch 4/10, average training loss of previous 2 batches: 0.8735631704330444
|
| 192 |
+
2024-10-21 13:45:52-finetune.py:240-INFO >> Batch 84 of epoch 4/10, average training loss of previous 2 batches: 0.7908239364624023
|
| 193 |
+
2024-10-21 13:45:53-finetune.py:240-INFO >> Batch 86 of epoch 4/10, average training loss of previous 2 batches: 0.7438755333423615
|
| 194 |
+
2024-10-21 13:45:54-finetune.py:240-INFO >> Batch 88 of epoch 4/10, average training loss of previous 2 batches: 0.830750972032547
|
| 195 |
+
2024-10-21 13:45:55-finetune.py:240-INFO >> Batch 90 of epoch 4/10, average training loss of previous 2 batches: 0.8026508390903473
|
| 196 |
+
2024-10-21 13:45:55-finetune.py:240-INFO >> Batch 92 of epoch 4/10, average training loss of previous 2 batches: 0.7958900928497314
|
| 197 |
+
2024-10-21 13:45:56-finetune.py:240-INFO >> Batch 94 of epoch 4/10, average training loss of previous 2 batches: 0.7622414231300354
|
| 198 |
+
2024-10-21 13:45:57-finetune.py:240-INFO >> Batch 96 of epoch 4/10, average training loss of previous 2 batches: 0.8120144009590149
|
| 199 |
+
2024-10-21 13:45:58-finetune.py:240-INFO >> Batch 98 of epoch 4/10, average training loss of previous 2 batches: 0.7778138220310211
|
| 200 |
+
2024-10-21 13:45:59-finetune.py:240-INFO >> Batch 100 of epoch 4/10, average training loss of previous 2 batches: 0.8295200765132904
|
| 201 |
+
2024-10-21 13:46:00-finetune.py:240-INFO >> Batch 2 of epoch 5/10, average training loss of previous 2 batches: 0.6343702375888824
|
| 202 |
+
2024-10-21 13:46:01-finetune.py:240-INFO >> Batch 4 of epoch 5/10, average training loss of previous 2 batches: 0.7222740948200226
|
| 203 |
+
2024-10-21 13:46:02-finetune.py:240-INFO >> Batch 6 of epoch 5/10, average training loss of previous 2 batches: 0.6714920699596405
|
| 204 |
+
2024-10-21 13:46:02-finetune.py:240-INFO >> Batch 8 of epoch 5/10, average training loss of previous 2 batches: 0.9150407016277313
|
| 205 |
+
2024-10-21 13:46:03-finetune.py:240-INFO >> Batch 10 of epoch 5/10, average training loss of previous 2 batches: 0.6372408270835876
|
| 206 |
+
2024-10-21 13:46:04-finetune.py:240-INFO >> Batch 12 of epoch 5/10, average training loss of previous 2 batches: 0.7676155269145966
|
| 207 |
+
2024-10-21 13:46:05-finetune.py:240-INFO >> Batch 14 of epoch 5/10, average training loss of previous 2 batches: 0.8106114268302917
|
| 208 |
+
2024-10-21 13:46:06-finetune.py:240-INFO >> Batch 16 of epoch 5/10, average training loss of previous 2 batches: 0.8868626058101654
|
| 209 |
+
2024-10-21 13:46:07-finetune.py:240-INFO >> Batch 18 of epoch 5/10, average training loss of previous 2 batches: 0.766834557056427
|
| 210 |
+
2024-10-21 13:46:08-finetune.py:240-INFO >> Batch 20 of epoch 5/10, average training loss of previous 2 batches: 0.716394305229187
|
| 211 |
+
2024-10-21 13:46:09-finetune.py:240-INFO >> Batch 22 of epoch 5/10, average training loss of previous 2 batches: 0.8100038468837738
|
| 212 |
+
2024-10-21 13:46:09-finetune.py:240-INFO >> Batch 24 of epoch 5/10, average training loss of previous 2 batches: 0.770490825176239
|
| 213 |
+
2024-10-21 13:46:10-finetune.py:240-INFO >> Batch 26 of epoch 5/10, average training loss of previous 2 batches: 0.7918453514575958
|
| 214 |
+
2024-10-21 13:46:11-finetune.py:240-INFO >> Batch 28 of epoch 5/10, average training loss of previous 2 batches: 0.6767605841159821
|
| 215 |
+
2024-10-21 13:46:12-finetune.py:240-INFO >> Batch 30 of epoch 5/10, average training loss of previous 2 batches: 0.6582407057285309
|
| 216 |
+
2024-10-21 13:46:13-finetune.py:240-INFO >> Batch 32 of epoch 5/10, average training loss of previous 2 batches: 0.6945566833019257
|
| 217 |
+
2024-10-21 13:46:14-finetune.py:240-INFO >> Batch 34 of epoch 5/10, average training loss of previous 2 batches: 0.7566858530044556
|
| 218 |
+
2024-10-21 13:46:15-finetune.py:240-INFO >> Batch 36 of epoch 5/10, average training loss of previous 2 batches: 0.8148128092288971
|
| 219 |
+
2024-10-21 13:46:16-finetune.py:240-INFO >> Batch 38 of epoch 5/10, average training loss of previous 2 batches: 0.6682511568069458
|
| 220 |
+
2024-10-21 13:46:16-finetune.py:240-INFO >> Batch 40 of epoch 5/10, average training loss of previous 2 batches: 0.6407213807106018
|
| 221 |
+
2024-10-21 13:46:17-finetune.py:240-INFO >> Batch 42 of epoch 5/10, average training loss of previous 2 batches: 0.6640065908432007
|
| 222 |
+
2024-10-21 13:46:18-finetune.py:240-INFO >> Batch 44 of epoch 5/10, average training loss of previous 2 batches: 0.7437308430671692
|
| 223 |
+
2024-10-21 13:46:19-finetune.py:240-INFO >> Batch 46 of epoch 5/10, average training loss of previous 2 batches: 0.7747414410114288
|
| 224 |
+
2024-10-21 13:46:20-finetune.py:240-INFO >> Batch 48 of epoch 5/10, average training loss of previous 2 batches: 0.7784460186958313
|
| 225 |
+
2024-10-21 13:46:21-finetune.py:240-INFO >> Batch 50 of epoch 5/10, average training loss of previous 2 batches: 0.5884788930416107
|
| 226 |
+
2024-10-21 13:46:22-finetune.py:240-INFO >> Batch 52 of epoch 5/10, average training loss of previous 2 batches: 0.6876950860023499
|
| 227 |
+
2024-10-21 13:46:23-finetune.py:240-INFO >> Batch 54 of epoch 5/10, average training loss of previous 2 batches: 0.6799062788486481
|
| 228 |
+
2024-10-21 13:46:23-finetune.py:240-INFO >> Batch 56 of epoch 5/10, average training loss of previous 2 batches: 0.5763688087463379
|
| 229 |
+
2024-10-21 13:46:24-finetune.py:240-INFO >> Batch 58 of epoch 5/10, average training loss of previous 2 batches: 0.6245492696762085
|
| 230 |
+
2024-10-21 13:46:25-finetune.py:240-INFO >> Batch 60 of epoch 5/10, average training loss of previous 2 batches: 0.7115638852119446
|
| 231 |
+
2024-10-21 13:46:26-finetune.py:240-INFO >> Batch 62 of epoch 5/10, average training loss of previous 2 batches: 0.6263779103755951
|
| 232 |
+
2024-10-21 13:46:27-finetune.py:240-INFO >> Batch 64 of epoch 5/10, average training loss of previous 2 batches: 0.7711111307144165
|
| 233 |
+
2024-10-21 13:46:28-finetune.py:240-INFO >> Batch 66 of epoch 5/10, average training loss of previous 2 batches: 0.6676397919654846
|
| 234 |
+
2024-10-21 13:46:29-finetune.py:240-INFO >> Batch 68 of epoch 5/10, average training loss of previous 2 batches: 0.6510529518127441
|
| 235 |
+
2024-10-21 13:46:30-finetune.py:240-INFO >> Batch 70 of epoch 5/10, average training loss of previous 2 batches: 0.719774454832077
|
| 236 |
+
2024-10-21 13:46:30-finetune.py:240-INFO >> Batch 72 of epoch 5/10, average training loss of previous 2 batches: 0.5127301961183548
|
| 237 |
+
2024-10-21 13:46:31-finetune.py:240-INFO >> Batch 74 of epoch 5/10, average training loss of previous 2 batches: 0.6937015056610107
|
| 238 |
+
2024-10-21 13:46:32-finetune.py:240-INFO >> Batch 76 of epoch 5/10, average training loss of previous 2 batches: 0.6372248828411102
|
| 239 |
+
2024-10-21 13:46:33-finetune.py:240-INFO >> Batch 78 of epoch 5/10, average training loss of previous 2 batches: 0.6238736808300018
|
| 240 |
+
2024-10-21 13:46:34-finetune.py:240-INFO >> Batch 80 of epoch 5/10, average training loss of previous 2 batches: 0.8558001518249512
|
| 241 |
+
2024-10-21 13:46:35-finetune.py:240-INFO >> Batch 82 of epoch 5/10, average training loss of previous 2 batches: 0.8063792586326599
|
| 242 |
+
2024-10-21 13:46:36-finetune.py:240-INFO >> Batch 84 of epoch 5/10, average training loss of previous 2 batches: 0.734971433877945
|
| 243 |
+
2024-10-21 13:46:37-finetune.py:240-INFO >> Batch 86 of epoch 5/10, average training loss of previous 2 batches: 0.6761626601219177
|
| 244 |
+
2024-10-21 13:46:37-finetune.py:240-INFO >> Batch 88 of epoch 5/10, average training loss of previous 2 batches: 0.7716780602931976
|
| 245 |
+
2024-10-21 13:46:38-finetune.py:240-INFO >> Batch 90 of epoch 5/10, average training loss of previous 2 batches: 0.7282805144786835
|
| 246 |
+
2024-10-21 13:46:39-finetune.py:240-INFO >> Batch 92 of epoch 5/10, average training loss of previous 2 batches: 0.7160176336765289
|
| 247 |
+
2024-10-21 13:46:40-finetune.py:240-INFO >> Batch 94 of epoch 5/10, average training loss of previous 2 batches: 0.7145473062992096
|
| 248 |
+
2024-10-21 13:46:41-finetune.py:240-INFO >> Batch 96 of epoch 5/10, average training loss of previous 2 batches: 0.7574485838413239
|
| 249 |
+
2024-10-21 13:46:42-finetune.py:240-INFO >> Batch 98 of epoch 5/10, average training loss of previous 2 batches: 0.7201968431472778
|
| 250 |
+
2024-10-21 13:46:43-finetune.py:240-INFO >> Batch 100 of epoch 5/10, average training loss of previous 2 batches: 0.7653315961360931
|
| 251 |
+
2024-10-21 13:46:44-finetune.py:240-INFO >> Batch 2 of epoch 6/10, average training loss of previous 2 batches: 0.5835276246070862
|
| 252 |
+
2024-10-21 13:46:44-finetune.py:240-INFO >> Batch 4 of epoch 6/10, average training loss of previous 2 batches: 0.6678467094898224
|
| 253 |
+
2024-10-21 13:46:45-finetune.py:240-INFO >> Batch 6 of epoch 6/10, average training loss of previous 2 batches: 0.6220656931400299
|
| 254 |
+
2024-10-21 13:46:46-finetune.py:240-INFO >> Batch 8 of epoch 6/10, average training loss of previous 2 batches: 0.8442187011241913
|
| 255 |
+
2024-10-21 13:46:47-finetune.py:240-INFO >> Batch 10 of epoch 6/10, average training loss of previous 2 batches: 0.5918219089508057
|
| 256 |
+
2024-10-21 13:46:48-finetune.py:240-INFO >> Batch 12 of epoch 6/10, average training loss of previous 2 batches: 0.7055420279502869
|
| 257 |
+
2024-10-21 13:46:49-finetune.py:240-INFO >> Batch 14 of epoch 6/10, average training loss of previous 2 batches: 0.7520930171012878
|
| 258 |
+
2024-10-21 13:46:50-finetune.py:240-INFO >> Batch 16 of epoch 6/10, average training loss of previous 2 batches: 0.8211362659931183
|
| 259 |
+
2024-10-21 13:46:51-finetune.py:240-INFO >> Batch 18 of epoch 6/10, average training loss of previous 2 batches: 0.7103014290332794
|
| 260 |
+
2024-10-21 13:46:51-finetune.py:240-INFO >> Batch 20 of epoch 6/10, average training loss of previous 2 batches: 0.6712160110473633
|
| 261 |
+
2024-10-21 13:46:52-finetune.py:240-INFO >> Batch 22 of epoch 6/10, average training loss of previous 2 batches: 0.7529181838035583
|
| 262 |
+
2024-10-21 13:46:53-finetune.py:240-INFO >> Batch 24 of epoch 6/10, average training loss of previous 2 batches: 0.7083960473537445
|
| 263 |
+
2024-10-21 13:46:54-finetune.py:240-INFO >> Batch 26 of epoch 6/10, average training loss of previous 2 batches: 0.7322015464305878
|
| 264 |
+
2024-10-21 13:46:55-finetune.py:240-INFO >> Batch 28 of epoch 6/10, average training loss of previous 2 batches: 0.6274860203266144
|
| 265 |
+
2024-10-21 13:46:56-finetune.py:240-INFO >> Batch 30 of epoch 6/10, average training loss of previous 2 batches: 0.6060699224472046
|
| 266 |
+
2024-10-21 13:46:57-finetune.py:240-INFO >> Batch 32 of epoch 6/10, average training loss of previous 2 batches: 0.6468688249588013
|
| 267 |
+
2024-10-21 13:46:58-finetune.py:240-INFO >> Batch 34 of epoch 6/10, average training loss of previous 2 batches: 0.7065079808235168
|
| 268 |
+
2024-10-21 13:46:58-finetune.py:240-INFO >> Batch 36 of epoch 6/10, average training loss of previous 2 batches: 0.7514010667800903
|
| 269 |
+
2024-10-21 13:46:59-finetune.py:240-INFO >> Batch 38 of epoch 6/10, average training loss of previous 2 batches: 0.6199988424777985
|
| 270 |
+
2024-10-21 13:47:00-finetune.py:240-INFO >> Batch 40 of epoch 6/10, average training loss of previous 2 batches: 0.5867652893066406
|
| 271 |
+
2024-10-21 13:47:01-finetune.py:240-INFO >> Batch 42 of epoch 6/10, average training loss of previous 2 batches: 0.6100884974002838
|
| 272 |
+
2024-10-21 13:47:02-finetune.py:240-INFO >> Batch 44 of epoch 6/10, average training loss of previous 2 batches: 0.688291072845459
|
| 273 |
+
2024-10-21 13:47:03-finetune.py:240-INFO >> Batch 46 of epoch 6/10, average training loss of previous 2 batches: 0.7133190035820007
|
| 274 |
+
2024-10-21 13:47:04-finetune.py:240-INFO >> Batch 48 of epoch 6/10, average training loss of previous 2 batches: 0.7070374190807343
|
| 275 |
+
2024-10-21 13:47:05-finetune.py:240-INFO >> Batch 50 of epoch 6/10, average training loss of previous 2 batches: 0.5412555932998657
|
| 276 |
+
2024-10-21 13:47:05-finetune.py:240-INFO >> Batch 52 of epoch 6/10, average training loss of previous 2 batches: 0.6222940981388092
|
| 277 |
+
2024-10-21 13:47:06-finetune.py:240-INFO >> Batch 54 of epoch 6/10, average training loss of previous 2 batches: 0.6158738136291504
|
| 278 |
+
2024-10-21 13:47:07-finetune.py:240-INFO >> Batch 56 of epoch 6/10, average training loss of previous 2 batches: 0.5234492123126984
|
| 279 |
+
2024-10-21 13:47:08-finetune.py:240-INFO >> Batch 58 of epoch 6/10, average training loss of previous 2 batches: 0.5703796744346619
|
| 280 |
+
2024-10-21 13:47:09-finetune.py:240-INFO >> Batch 60 of epoch 6/10, average training loss of previous 2 batches: 0.6404681205749512
|
| 281 |
+
2024-10-21 13:47:10-finetune.py:240-INFO >> Batch 62 of epoch 6/10, average training loss of previous 2 batches: 0.5724955499172211
|
| 282 |
+
2024-10-21 13:47:11-finetune.py:240-INFO >> Batch 64 of epoch 6/10, average training loss of previous 2 batches: 0.7094117403030396
|
| 283 |
+
2024-10-21 13:47:12-finetune.py:240-INFO >> Batch 66 of epoch 6/10, average training loss of previous 2 batches: 0.6026865541934967
|
| 284 |
+
2024-10-21 13:47:12-finetune.py:240-INFO >> Batch 68 of epoch 6/10, average training loss of previous 2 batches: 0.5966612547636032
|
| 285 |
+
2024-10-21 13:47:13-finetune.py:240-INFO >> Batch 70 of epoch 6/10, average training loss of previous 2 batches: 0.654717355966568
|
| 286 |
+
2024-10-21 13:47:14-finetune.py:240-INFO >> Batch 72 of epoch 6/10, average training loss of previous 2 batches: 0.46248213946819305
|
| 287 |
+
2024-10-21 13:47:15-finetune.py:240-INFO >> Batch 74 of epoch 6/10, average training loss of previous 2 batches: 0.6266474723815918
|
| 288 |
+
2024-10-21 13:47:16-finetune.py:240-INFO >> Batch 76 of epoch 6/10, average training loss of previous 2 batches: 0.5776263475418091
|
| 289 |
+
2024-10-21 13:47:17-finetune.py:240-INFO >> Batch 78 of epoch 6/10, average training loss of previous 2 batches: 0.5670168101787567
|
| 290 |
+
2024-10-21 13:47:18-finetune.py:240-INFO >> Batch 80 of epoch 6/10, average training loss of previous 2 batches: 0.7725215256214142
|
| 291 |
+
2024-10-21 13:47:19-finetune.py:240-INFO >> Batch 82 of epoch 6/10, average training loss of previous 2 batches: 0.737803190946579
|
| 292 |
+
2024-10-21 13:47:19-finetune.py:240-INFO >> Batch 84 of epoch 6/10, average training loss of previous 2 batches: 0.6771756410598755
|
| 293 |
+
2024-10-21 13:47:20-finetune.py:240-INFO >> Batch 86 of epoch 6/10, average training loss of previous 2 batches: 0.6052446365356445
|
| 294 |
+
2024-10-21 13:47:21-finetune.py:240-INFO >> Batch 88 of epoch 6/10, average training loss of previous 2 batches: 0.7067574262619019
|
| 295 |
+
2024-10-21 13:47:22-finetune.py:240-INFO >> Batch 90 of epoch 6/10, average training loss of previous 2 batches: 0.6441330015659332
|
| 296 |
+
2024-10-21 13:47:23-finetune.py:240-INFO >> Batch 92 of epoch 6/10, average training loss of previous 2 batches: 0.6359881162643433
|
| 297 |
+
2024-10-21 13:47:24-finetune.py:240-INFO >> Batch 94 of epoch 6/10, average training loss of previous 2 batches: 0.6587474346160889
|
| 298 |
+
2024-10-21 13:47:25-finetune.py:240-INFO >> Batch 96 of epoch 6/10, average training loss of previous 2 batches: 0.6953521072864532
|
| 299 |
+
2024-10-21 13:47:26-finetune.py:240-INFO >> Batch 98 of epoch 6/10, average training loss of previous 2 batches: 0.6608266532421112
|
| 300 |
+
2024-10-21 13:47:26-finetune.py:240-INFO >> Batch 100 of epoch 6/10, average training loss of previous 2 batches: 0.7046919167041779
|
| 301 |
+
2024-10-21 13:47:27-finetune.py:240-INFO >> Batch 2 of epoch 7/10, average training loss of previous 2 batches: 0.5358765721321106
|
| 302 |
+
2024-10-21 13:47:28-finetune.py:240-INFO >> Batch 4 of epoch 7/10, average training loss of previous 2 batches: 0.6107850074768066
|
| 303 |
+
2024-10-21 13:47:29-finetune.py:240-INFO >> Batch 6 of epoch 7/10, average training loss of previous 2 batches: 0.5688601732254028
|
| 304 |
+
2024-10-21 13:47:30-finetune.py:240-INFO >> Batch 8 of epoch 7/10, average training loss of previous 2 batches: 0.7673291265964508
|
| 305 |
+
2024-10-21 13:47:31-finetune.py:240-INFO >> Batch 10 of epoch 7/10, average training loss of previous 2 batches: 0.5407192707061768
|
| 306 |
+
2024-10-21 13:47:32-finetune.py:240-INFO >> Batch 12 of epoch 7/10, average training loss of previous 2 batches: 0.6451371908187866
|
| 307 |
+
2024-10-21 13:47:33-finetune.py:240-INFO >> Batch 14 of epoch 7/10, average training loss of previous 2 batches: 0.6916760802268982
|
| 308 |
+
2024-10-21 13:47:33-finetune.py:240-INFO >> Batch 16 of epoch 7/10, average training loss of previous 2 batches: 0.7509200572967529
|
| 309 |
+
2024-10-21 13:47:34-finetune.py:240-INFO >> Batch 18 of epoch 7/10, average training loss of previous 2 batches: 0.6493522524833679
|
| 310 |
+
2024-10-21 13:47:35-finetune.py:240-INFO >> Batch 20 of epoch 7/10, average training loss of previous 2 batches: 0.6075893342494965
|
| 311 |
+
2024-10-21 13:47:36-finetune.py:240-INFO >> Batch 22 of epoch 7/10, average training loss of previous 2 batches: 0.6914695501327515
|
| 312 |
+
2024-10-21 13:47:37-finetune.py:240-INFO >> Batch 24 of epoch 7/10, average training loss of previous 2 batches: 0.6442109644412994
|
| 313 |
+
2024-10-21 13:47:38-finetune.py:240-INFO >> Batch 26 of epoch 7/10, average training loss of previous 2 batches: 0.6734310984611511
|
| 314 |
+
2024-10-21 13:47:39-finetune.py:240-INFO >> Batch 28 of epoch 7/10, average training loss of previous 2 batches: 0.5740040689706802
|
| 315 |
+
2024-10-21 13:47:40-finetune.py:240-INFO >> Batch 30 of epoch 7/10, average training loss of previous 2 batches: 0.5496492981910706
|
| 316 |
+
2024-10-21 13:47:40-finetune.py:240-INFO >> Batch 32 of epoch 7/10, average training loss of previous 2 batches: 0.5928905308246613
|
| 317 |
+
2024-10-21 13:47:41-finetune.py:240-INFO >> Batch 34 of epoch 7/10, average training loss of previous 2 batches: 0.650152176618576
|
| 318 |
+
2024-10-21 13:47:42-finetune.py:240-INFO >> Batch 36 of epoch 7/10, average training loss of previous 2 batches: 0.6886909306049347
|
| 319 |
+
2024-10-21 13:47:43-finetune.py:240-INFO >> Batch 38 of epoch 7/10, average training loss of previous 2 batches: 0.5623027682304382
|
| 320 |
+
2024-10-21 13:47:44-finetune.py:240-INFO >> Batch 40 of epoch 7/10, average training loss of previous 2 batches: 0.5302377045154572
|
| 321 |
+
2024-10-21 13:47:45-finetune.py:240-INFO >> Batch 42 of epoch 7/10, average training loss of previous 2 batches: 0.5541674494743347
|
| 322 |
+
2024-10-21 13:47:46-finetune.py:240-INFO >> Batch 44 of epoch 7/10, average training loss of previous 2 batches: 0.6216500401496887
|
| 323 |
+
2024-10-21 13:47:47-finetune.py:240-INFO >> Batch 46 of epoch 7/10, average training loss of previous 2 batches: 0.6431004106998444
|
| 324 |
+
2024-10-21 13:47:47-finetune.py:240-INFO >> Batch 48 of epoch 7/10, average training loss of previous 2 batches: 0.6332841515541077
|
| 325 |
+
2024-10-21 13:47:48-finetune.py:240-INFO >> Batch 50 of epoch 7/10, average training loss of previous 2 batches: 0.4922148138284683
|
| 326 |
+
2024-10-21 13:47:49-finetune.py:240-INFO >> Batch 52 of epoch 7/10, average training loss of previous 2 batches: 0.5581210851669312
|
| 327 |
+
2024-10-21 13:47:50-finetune.py:240-INFO >> Batch 54 of epoch 7/10, average training loss of previous 2 batches: 0.554036945104599
|
| 328 |
+
2024-10-21 13:47:51-finetune.py:240-INFO >> Batch 56 of epoch 7/10, average training loss of previous 2 batches: 0.46584491431713104
|
| 329 |
+
2024-10-21 13:47:52-finetune.py:240-INFO >> Batch 58 of epoch 7/10, average training loss of previous 2 batches: 0.5075838267803192
|
| 330 |
+
2024-10-21 13:47:53-finetune.py:240-INFO >> Batch 60 of epoch 7/10, average training loss of previous 2 batches: 0.5658158361911774
|
| 331 |
+
2024-10-21 13:47:54-finetune.py:240-INFO >> Batch 62 of epoch 7/10, average training loss of previous 2 batches: 0.5088649243116379
|
| 332 |
+
2024-10-21 13:47:54-finetune.py:240-INFO >> Batch 64 of epoch 7/10, average training loss of previous 2 batches: 0.6368188560009003
|
| 333 |
+
2024-10-21 13:47:55-finetune.py:240-INFO >> Batch 66 of epoch 7/10, average training loss of previous 2 batches: 0.5332675874233246
|
| 334 |
+
2024-10-21 13:47:56-finetune.py:240-INFO >> Batch 68 of epoch 7/10, average training loss of previous 2 batches: 0.5267028659582138
|
| 335 |
+
2024-10-21 13:47:57-finetune.py:240-INFO >> Batch 70 of epoch 7/10, average training loss of previous 2 batches: 0.5833128392696381
|
| 336 |
+
2024-10-21 13:47:58-finetune.py:240-INFO >> Batch 72 of epoch 7/10, average training loss of previous 2 batches: 0.40017683804035187
|
| 337 |
+
2024-10-21 13:47:59-finetune.py:240-INFO >> Batch 74 of epoch 7/10, average training loss of previous 2 batches: 0.5564847886562347
|
| 338 |
+
2024-10-21 13:48:00-finetune.py:240-INFO >> Batch 76 of epoch 7/10, average training loss of previous 2 batches: 0.5111153274774551
|
| 339 |
+
2024-10-21 13:48:01-finetune.py:240-INFO >> Batch 78 of epoch 7/10, average training loss of previous 2 batches: 0.510166272521019
|
| 340 |
+
2024-10-21 13:48:01-finetune.py:240-INFO >> Batch 80 of epoch 7/10, average training loss of previous 2 batches: 0.679284930229187
|
| 341 |
+
2024-10-21 13:48:02-finetune.py:240-INFO >> Batch 82 of epoch 7/10, average training loss of previous 2 batches: 0.6608410477638245
|
| 342 |
+
2024-10-21 13:48:03-finetune.py:240-INFO >> Batch 84 of epoch 7/10, average training loss of previous 2 batches: 0.6204186677932739
|
| 343 |
+
2024-10-21 13:48:04-finetune.py:240-INFO >> Batch 86 of epoch 7/10, average training loss of previous 2 batches: 0.5293649286031723
|
| 344 |
+
2024-10-21 13:48:05-finetune.py:240-INFO >> Batch 88 of epoch 7/10, average training loss of previous 2 batches: 0.627184271812439
|
| 345 |
+
2024-10-21 13:48:06-finetune.py:240-INFO >> Batch 90 of epoch 7/10, average training loss of previous 2 batches: 0.5681014955043793
|
| 346 |
+
2024-10-21 13:48:07-finetune.py:240-INFO >> Batch 92 of epoch 7/10, average training loss of previous 2 batches: 0.5675705075263977
|
| 347 |
+
2024-10-21 13:48:08-finetune.py:240-INFO >> Batch 94 of epoch 7/10, average training loss of previous 2 batches: 0.5900659561157227
|
| 348 |
+
2024-10-21 13:48:08-finetune.py:240-INFO >> Batch 96 of epoch 7/10, average training loss of previous 2 batches: 0.6214489638805389
|
| 349 |
+
2024-10-21 13:48:09-finetune.py:240-INFO >> Batch 98 of epoch 7/10, average training loss of previous 2 batches: 0.5980262756347656
|
| 350 |
+
2024-10-21 13:48:10-finetune.py:240-INFO >> Batch 100 of epoch 7/10, average training loss of previous 2 batches: 0.6394449472427368
|
| 351 |
+
2024-10-21 13:48:11-finetune.py:240-INFO >> Batch 2 of epoch 8/10, average training loss of previous 2 batches: 0.4861665964126587
|
| 352 |
+
2024-10-21 13:48:12-finetune.py:240-INFO >> Batch 4 of epoch 8/10, average training loss of previous 2 batches: 0.5529729425907135
|
| 353 |
+
2024-10-21 13:48:13-finetune.py:240-INFO >> Batch 6 of epoch 8/10, average training loss of previous 2 batches: 0.5235056579113007
|
| 354 |
+
2024-10-21 13:48:14-finetune.py:240-INFO >> Batch 8 of epoch 8/10, average training loss of previous 2 batches: 0.6846783459186554
|
| 355 |
+
2024-10-21 13:48:15-finetune.py:240-INFO >> Batch 10 of epoch 8/10, average training loss of previous 2 batches: 0.4859624058008194
|
| 356 |
+
2024-10-21 13:48:15-finetune.py:240-INFO >> Batch 12 of epoch 8/10, average training loss of previous 2 batches: 0.5769771039485931
|
| 357 |
+
2024-10-21 13:48:16-finetune.py:240-INFO >> Batch 14 of epoch 8/10, average training loss of previous 2 batches: 0.6253271400928497
|
| 358 |
+
2024-10-21 13:48:17-finetune.py:240-INFO >> Batch 16 of epoch 8/10, average training loss of previous 2 batches: 0.6685461699962616
|
| 359 |
+
2024-10-21 13:48:18-finetune.py:240-INFO >> Batch 18 of epoch 8/10, average training loss of previous 2 batches: 0.5832309424877167
|
| 360 |
+
2024-10-21 13:48:19-finetune.py:240-INFO >> Batch 20 of epoch 8/10, average training loss of previous 2 batches: 0.5380215048789978
|
| 361 |
+
2024-10-21 13:48:20-finetune.py:240-INFO >> Batch 22 of epoch 8/10, average training loss of previous 2 batches: 0.6208047866821289
|
| 362 |
+
2024-10-21 13:48:21-finetune.py:240-INFO >> Batch 24 of epoch 8/10, average training loss of previous 2 batches: 0.5718256086111069
|
| 363 |
+
2024-10-21 13:48:22-finetune.py:240-INFO >> Batch 26 of epoch 8/10, average training loss of previous 2 batches: 0.5938026905059814
|
| 364 |
+
2024-10-21 13:48:22-finetune.py:240-INFO >> Batch 28 of epoch 8/10, average training loss of previous 2 batches: 0.5184406787157059
|
| 365 |
+
2024-10-21 13:48:23-finetune.py:240-INFO >> Batch 30 of epoch 8/10, average training loss of previous 2 batches: 0.47936880588531494
|
| 366 |
+
2024-10-21 13:48:24-finetune.py:240-INFO >> Batch 32 of epoch 8/10, average training loss of previous 2 batches: 0.5321025252342224
|
| 367 |
+
2024-10-21 13:48:25-finetune.py:240-INFO >> Batch 34 of epoch 8/10, average training loss of previous 2 batches: 0.590179055929184
|
| 368 |
+
2024-10-21 13:48:26-finetune.py:240-INFO >> Batch 36 of epoch 8/10, average training loss of previous 2 batches: 0.6113237738609314
|
| 369 |
+
2024-10-21 13:48:27-finetune.py:240-INFO >> Batch 38 of epoch 8/10, average training loss of previous 2 batches: 0.4939865618944168
|
| 370 |
+
2024-10-21 13:48:28-finetune.py:240-INFO >> Batch 40 of epoch 8/10, average training loss of previous 2 batches: 0.4611608684062958
|
| 371 |
+
2024-10-21 13:48:29-finetune.py:240-INFO >> Batch 42 of epoch 8/10, average training loss of previous 2 batches: 0.48525552451610565
|
| 372 |
+
2024-10-21 13:48:29-finetune.py:240-INFO >> Batch 44 of epoch 8/10, average training loss of previous 2 batches: 0.5413184463977814
|
| 373 |
+
2024-10-21 13:48:30-finetune.py:240-INFO >> Batch 46 of epoch 8/10, average training loss of previous 2 batches: 0.5646709501743317
|
| 374 |
+
2024-10-21 13:48:31-finetune.py:240-INFO >> Batch 48 of epoch 8/10, average training loss of previous 2 batches: 0.5467158555984497
|
| 375 |
+
2024-10-21 13:48:32-finetune.py:240-INFO >> Batch 50 of epoch 8/10, average training loss of previous 2 batches: 0.43738752603530884
|
| 376 |
+
2024-10-21 13:48:33-finetune.py:240-INFO >> Batch 52 of epoch 8/10, average training loss of previous 2 batches: 0.48426760733127594
|
| 377 |
+
2024-10-21 13:48:34-finetune.py:240-INFO >> Batch 54 of epoch 8/10, average training loss of previous 2 batches: 0.48678092658519745
|
| 378 |
+
2024-10-21 13:48:35-finetune.py:240-INFO >> Batch 56 of epoch 8/10, average training loss of previous 2 batches: 0.39716456830501556
|
| 379 |
+
2024-10-21 13:48:36-finetune.py:240-INFO >> Batch 58 of epoch 8/10, average training loss of previous 2 batches: 0.43101413547992706
|
| 380 |
+
2024-10-21 13:48:36-finetune.py:240-INFO >> Batch 60 of epoch 8/10, average training loss of previous 2 batches: 0.48130224645137787
|
| 381 |
+
2024-10-21 13:48:37-finetune.py:240-INFO >> Batch 62 of epoch 8/10, average training loss of previous 2 batches: 0.43233947455883026
|
| 382 |
+
2024-10-21 13:48:38-finetune.py:240-INFO >> Batch 64 of epoch 8/10, average training loss of previous 2 batches: 0.551066517829895
|
| 383 |
+
2024-10-21 13:48:39-finetune.py:240-INFO >> Batch 66 of epoch 8/10, average training loss of previous 2 batches: 0.45367753505706787
|
| 384 |
+
2024-10-21 13:48:40-finetune.py:240-INFO >> Batch 68 of epoch 8/10, average training loss of previous 2 batches: 0.43592333793640137
|
| 385 |
+
2024-10-21 13:48:41-finetune.py:240-INFO >> Batch 70 of epoch 8/10, average training loss of previous 2 batches: 0.5107550472021103
|
| 386 |
+
2024-10-21 13:48:42-finetune.py:240-INFO >> Batch 72 of epoch 8/10, average training loss of previous 2 batches: 0.3210889846086502
|
| 387 |
+
2024-10-21 13:48:43-finetune.py:240-INFO >> Batch 74 of epoch 8/10, average training loss of previous 2 batches: 0.4529934674501419
|
| 388 |
+
2024-10-21 13:48:43-finetune.py:240-INFO >> Batch 76 of epoch 8/10, average training loss of previous 2 batches: 0.4303017109632492
|
| 389 |
+
2024-10-21 13:48:44-finetune.py:240-INFO >> Batch 78 of epoch 8/10, average training loss of previous 2 batches: 0.4270349144935608
|
| 390 |
+
2024-10-21 13:48:45-finetune.py:240-INFO >> Batch 80 of epoch 8/10, average training loss of previous 2 batches: 0.5657246708869934
|
| 391 |
+
2024-10-21 13:48:46-finetune.py:240-INFO >> Batch 82 of epoch 8/10, average training loss of previous 2 batches: 0.5790116190910339
|
| 392 |
+
2024-10-21 13:48:47-finetune.py:240-INFO >> Batch 84 of epoch 8/10, average training loss of previous 2 batches: 0.5355011522769928
|
| 393 |
+
2024-10-21 13:48:48-finetune.py:240-INFO >> Batch 86 of epoch 8/10, average training loss of previous 2 batches: 0.4346868395805359
|
| 394 |
+
2024-10-21 13:48:49-finetune.py:240-INFO >> Batch 88 of epoch 8/10, average training loss of previous 2 batches: 0.5409484803676605
|
| 395 |
+
2024-10-21 13:48:50-finetune.py:240-INFO >> Batch 90 of epoch 8/10, average training loss of previous 2 batches: 0.47924110293388367
|
| 396 |
+
2024-10-21 13:48:50-finetune.py:240-INFO >> Batch 92 of epoch 8/10, average training loss of previous 2 batches: 0.4809097498655319
|
| 397 |
+
2024-10-21 13:48:51-finetune.py:240-INFO >> Batch 94 of epoch 8/10, average training loss of previous 2 batches: 0.5057264566421509
|
| 398 |
+
2024-10-21 13:48:52-finetune.py:240-INFO >> Batch 96 of epoch 8/10, average training loss of previous 2 batches: 0.5394330024719238
|
| 399 |
+
2024-10-21 13:48:53-finetune.py:240-INFO >> Batch 98 of epoch 8/10, average training loss of previous 2 batches: 0.5305484235286713
|
| 400 |
+
2024-10-21 13:48:54-finetune.py:240-INFO >> Batch 100 of epoch 8/10, average training loss of previous 2 batches: 0.5695830583572388
|
| 401 |
+
2024-10-21 13:48:55-finetune.py:240-INFO >> Batch 2 of epoch 9/10, average training loss of previous 2 batches: 0.44237884879112244
|
| 402 |
+
2024-10-21 13:48:56-finetune.py:240-INFO >> Batch 4 of epoch 9/10, average training loss of previous 2 batches: 0.4790229946374893
|
| 403 |
+
2024-10-21 13:48:57-finetune.py:240-INFO >> Batch 6 of epoch 9/10, average training loss of previous 2 batches: 0.44741562008857727
|
| 404 |
+
2024-10-21 13:48:57-finetune.py:240-INFO >> Batch 8 of epoch 9/10, average training loss of previous 2 batches: 0.5887115597724915
|
| 405 |
+
2024-10-21 13:48:58-finetune.py:240-INFO >> Batch 10 of epoch 9/10, average training loss of previous 2 batches: 0.40649476647377014
|
| 406 |
+
2024-10-21 13:48:59-finetune.py:240-INFO >> Batch 12 of epoch 9/10, average training loss of previous 2 batches: 0.49968409538269043
|
| 407 |
+
2024-10-21 13:49:00-finetune.py:240-INFO >> Batch 14 of epoch 9/10, average training loss of previous 2 batches: 0.5478889048099518
|
| 408 |
+
2024-10-21 13:49:01-finetune.py:240-INFO >> Batch 16 of epoch 9/10, average training loss of previous 2 batches: 0.5751430839300156
|
| 409 |
+
2024-10-21 13:49:02-finetune.py:240-INFO >> Batch 18 of epoch 9/10, average training loss of previous 2 batches: 0.5027433037757874
|
| 410 |
+
2024-10-21 13:49:03-finetune.py:240-INFO >> Batch 20 of epoch 9/10, average training loss of previous 2 batches: 0.46165189146995544
|
| 411 |
+
2024-10-21 13:49:04-finetune.py:240-INFO >> Batch 22 of epoch 9/10, average training loss of previous 2 batches: 0.5430303812026978
|
| 412 |
+
2024-10-21 13:49:04-finetune.py:240-INFO >> Batch 24 of epoch 9/10, average training loss of previous 2 batches: 0.49410927295684814
|
| 413 |
+
2024-10-21 13:49:05-finetune.py:240-INFO >> Batch 26 of epoch 9/10, average training loss of previous 2 batches: 0.5107497572898865
|
| 414 |
+
2024-10-21 13:49:06-finetune.py:240-INFO >> Batch 28 of epoch 9/10, average training loss of previous 2 batches: 0.44823019206523895
|
| 415 |
+
2024-10-21 13:49:07-finetune.py:240-INFO >> Batch 30 of epoch 9/10, average training loss of previous 2 batches: 0.39417172968387604
|
| 416 |
+
2024-10-21 13:49:08-finetune.py:240-INFO >> Batch 32 of epoch 9/10, average training loss of previous 2 batches: 0.4584824740886688
|
| 417 |
+
2024-10-21 13:49:09-finetune.py:240-INFO >> Batch 34 of epoch 9/10, average training loss of previous 2 batches: 0.5107963383197784
|
| 418 |
+
2024-10-21 13:49:10-finetune.py:240-INFO >> Batch 36 of epoch 9/10, average training loss of previous 2 batches: 0.5222683548927307
|
| 419 |
+
2024-10-21 13:49:11-finetune.py:240-INFO >> Batch 38 of epoch 9/10, average training loss of previous 2 batches: 0.4061500281095505
|
| 420 |
+
2024-10-21 13:49:11-finetune.py:240-INFO >> Batch 40 of epoch 9/10, average training loss of previous 2 batches: 0.3827553242444992
|
| 421 |
+
2024-10-21 13:49:12-finetune.py:240-INFO >> Batch 42 of epoch 9/10, average training loss of previous 2 batches: 0.40545518696308136
|
| 422 |
+
2024-10-21 13:49:13-finetune.py:240-INFO >> Batch 44 of epoch 9/10, average training loss of previous 2 batches: 0.45334774255752563
|
| 423 |
+
2024-10-21 13:49:14-finetune.py:240-INFO >> Batch 46 of epoch 9/10, average training loss of previous 2 batches: 0.4850813150405884
|
| 424 |
+
2024-10-21 13:49:15-finetune.py:240-INFO >> Batch 48 of epoch 9/10, average training loss of previous 2 batches: 0.4533022940158844
|
| 425 |
+
2024-10-21 13:49:16-finetune.py:240-INFO >> Batch 50 of epoch 9/10, average training loss of previous 2 batches: 0.3548138439655304
|
| 426 |
+
2024-10-21 13:49:17-finetune.py:240-INFO >> Batch 52 of epoch 9/10, average training loss of previous 2 batches: 0.3971509784460068
|
| 427 |
+
2024-10-21 13:49:18-finetune.py:240-INFO >> Batch 54 of epoch 9/10, average training loss of previous 2 batches: 0.41640329360961914
|
| 428 |
+
2024-10-21 13:49:18-finetune.py:240-INFO >> Batch 56 of epoch 9/10, average training loss of previous 2 batches: 0.32541070878505707
|
| 429 |
+
2024-10-21 13:49:19-finetune.py:240-INFO >> Batch 58 of epoch 9/10, average training loss of previous 2 batches: 0.3746839165687561
|
| 430 |
+
2024-10-21 13:49:20-finetune.py:240-INFO >> Batch 60 of epoch 9/10, average training loss of previous 2 batches: 0.39958494901657104
|
| 431 |
+
2024-10-21 13:49:21-finetune.py:240-INFO >> Batch 62 of epoch 9/10, average training loss of previous 2 batches: 0.3565339893102646
|
| 432 |
+
2024-10-21 13:49:22-finetune.py:240-INFO >> Batch 64 of epoch 9/10, average training loss of previous 2 batches: 0.45016323029994965
|
| 433 |
+
2024-10-21 13:49:23-finetune.py:240-INFO >> Batch 66 of epoch 9/10, average training loss of previous 2 batches: 0.3586081713438034
|
| 434 |
+
2024-10-21 13:49:24-finetune.py:240-INFO >> Batch 68 of epoch 9/10, average training loss of previous 2 batches: 0.3396856337785721
|
| 435 |
+
2024-10-21 13:49:25-finetune.py:240-INFO >> Batch 70 of epoch 9/10, average training loss of previous 2 batches: 0.4208012521266937
|
| 436 |
+
2024-10-21 13:49:25-finetune.py:240-INFO >> Batch 72 of epoch 9/10, average training loss of previous 2 batches: 0.2421998679637909
|
| 437 |
+
2024-10-21 13:49:26-finetune.py:240-INFO >> Batch 74 of epoch 9/10, average training loss of previous 2 batches: 0.36714886128902435
|
| 438 |
+
2024-10-21 13:49:27-finetune.py:240-INFO >> Batch 76 of epoch 9/10, average training loss of previous 2 batches: 0.3384854272007942
|
| 439 |
+
2024-10-21 13:49:28-finetune.py:240-INFO >> Batch 78 of epoch 9/10, average training loss of previous 2 batches: 0.34868310391902924
|
| 440 |
+
2024-10-21 13:49:29-finetune.py:240-INFO >> Batch 80 of epoch 9/10, average training loss of previous 2 batches: 0.46110133826732635
|
| 441 |
+
2024-10-21 13:49:30-finetune.py:240-INFO >> Batch 82 of epoch 9/10, average training loss of previous 2 batches: 0.4703945964574814
|
| 442 |
+
2024-10-21 13:49:31-finetune.py:240-INFO >> Batch 84 of epoch 9/10, average training loss of previous 2 batches: 0.44025567173957825
|
| 443 |
+
2024-10-21 13:49:32-finetune.py:240-INFO >> Batch 86 of epoch 9/10, average training loss of previous 2 batches: 0.34290696680545807
|
| 444 |
+
2024-10-21 13:49:32-finetune.py:240-INFO >> Batch 88 of epoch 9/10, average training loss of previous 2 batches: 0.4461369514465332
|
| 445 |
+
2024-10-21 13:49:33-finetune.py:240-INFO >> Batch 90 of epoch 9/10, average training loss of previous 2 batches: 0.38705192506313324
|
| 446 |
+
2024-10-21 13:49:34-finetune.py:240-INFO >> Batch 92 of epoch 9/10, average training loss of previous 2 batches: 0.4243104159832001
|
| 447 |
+
2024-10-21 13:49:35-finetune.py:240-INFO >> Batch 94 of epoch 9/10, average training loss of previous 2 batches: 0.41809484362602234
|
| 448 |
+
2024-10-21 13:49:36-finetune.py:240-INFO >> Batch 96 of epoch 9/10, average training loss of previous 2 batches: 0.47193707525730133
|
| 449 |
+
2024-10-21 13:49:37-finetune.py:240-INFO >> Batch 98 of epoch 9/10, average training loss of previous 2 batches: 0.47036002576351166
|
| 450 |
+
2024-10-21 13:49:38-finetune.py:240-INFO >> Batch 100 of epoch 9/10, average training loss of previous 2 batches: 0.49740438163280487
|
| 451 |
+
2024-10-21 13:49:39-finetune.py:240-INFO >> Batch 2 of epoch 10/10, average training loss of previous 2 batches: 0.37895165383815765
|
| 452 |
+
2024-10-21 13:49:39-finetune.py:240-INFO >> Batch 4 of epoch 10/10, average training loss of previous 2 batches: 0.40976959466934204
|
| 453 |
+
2024-10-21 13:49:40-finetune.py:240-INFO >> Batch 6 of epoch 10/10, average training loss of previous 2 batches: 0.4165492206811905
|
| 454 |
+
2024-10-21 13:49:41-finetune.py:240-INFO >> Batch 8 of epoch 10/10, average training loss of previous 2 batches: 0.517926961183548
|
| 455 |
+
2024-10-21 13:49:42-finetune.py:240-INFO >> Batch 10 of epoch 10/10, average training loss of previous 2 batches: 0.35744038224220276
|
| 456 |
+
2024-10-21 13:49:43-finetune.py:240-INFO >> Batch 12 of epoch 10/10, average training loss of previous 2 batches: 0.41863881051540375
|
| 457 |
+
2024-10-21 13:49:44-finetune.py:240-INFO >> Batch 14 of epoch 10/10, average training loss of previous 2 batches: 0.46624137461185455
|
| 458 |
+
2024-10-21 13:49:45-finetune.py:240-INFO >> Batch 16 of epoch 10/10, average training loss of previous 2 batches: 0.48127393424510956
|
| 459 |
+
2024-10-21 13:49:46-finetune.py:240-INFO >> Batch 18 of epoch 10/10, average training loss of previous 2 batches: 0.4298028498888016
|
| 460 |
+
2024-10-21 13:49:46-finetune.py:240-INFO >> Batch 20 of epoch 10/10, average training loss of previous 2 batches: 0.40097755193710327
|
| 461 |
+
2024-10-21 13:49:47-finetune.py:240-INFO >> Batch 22 of epoch 10/10, average training loss of previous 2 batches: 0.47262363135814667
|
| 462 |
+
2024-10-21 13:49:48-finetune.py:240-INFO >> Batch 24 of epoch 10/10, average training loss of previous 2 batches: 0.4252665340900421
|
| 463 |
+
2024-10-21 13:49:49-finetune.py:240-INFO >> Batch 26 of epoch 10/10, average training loss of previous 2 batches: 0.4307839721441269
|
| 464 |
+
2024-10-21 13:49:50-finetune.py:240-INFO >> Batch 28 of epoch 10/10, average training loss of previous 2 batches: 0.3781176954507828
|
| 465 |
+
2024-10-21 13:49:51-finetune.py:240-INFO >> Batch 30 of epoch 10/10, average training loss of previous 2 batches: 0.32740722596645355
|
| 466 |
+
2024-10-21 13:49:52-finetune.py:240-INFO >> Batch 32 of epoch 10/10, average training loss of previous 2 batches: 0.3956973999738693
|
| 467 |
+
2024-10-21 13:49:53-finetune.py:240-INFO >> Batch 34 of epoch 10/10, average training loss of previous 2 batches: 0.4431634843349457
|
| 468 |
+
2024-10-21 13:49:53-finetune.py:240-INFO >> Batch 36 of epoch 10/10, average training loss of previous 2 batches: 0.4427264481782913
|
| 469 |
+
2024-10-21 13:49:54-finetune.py:240-INFO >> Batch 38 of epoch 10/10, average training loss of previous 2 batches: 0.3347105532884598
|
| 470 |
+
2024-10-21 13:49:55-finetune.py:240-INFO >> Batch 40 of epoch 10/10, average training loss of previous 2 batches: 0.311638668179512
|
| 471 |
+
2024-10-21 13:49:56-finetune.py:240-INFO >> Batch 42 of epoch 10/10, average training loss of previous 2 batches: 0.3355199694633484
|
| 472 |
+
2024-10-21 13:49:57-finetune.py:240-INFO >> Batch 44 of epoch 10/10, average training loss of previous 2 batches: 0.35510262846946716
|
| 473 |
+
2024-10-21 13:49:58-finetune.py:240-INFO >> Batch 46 of epoch 10/10, average training loss of previous 2 batches: 0.39711587131023407
|
| 474 |
+
2024-10-21 13:49:59-finetune.py:240-INFO >> Batch 48 of epoch 10/10, average training loss of previous 2 batches: 0.3645806759595871
|
| 475 |
+
2024-10-21 13:50:00-finetune.py:240-INFO >> Batch 50 of epoch 10/10, average training loss of previous 2 batches: 0.28065013885498047
|
| 476 |
+
2024-10-21 13:50:00-finetune.py:240-INFO >> Batch 52 of epoch 10/10, average training loss of previous 2 batches: 0.33256326615810394
|
| 477 |
+
2024-10-21 13:50:01-finetune.py:240-INFO >> Batch 54 of epoch 10/10, average training loss of previous 2 batches: 0.3533191382884979
|
| 478 |
+
2024-10-21 13:50:02-finetune.py:240-INFO >> Batch 56 of epoch 10/10, average training loss of previous 2 batches: 0.2695348188281059
|
| 479 |
+
2024-10-21 13:50:03-finetune.py:240-INFO >> Batch 58 of epoch 10/10, average training loss of previous 2 batches: 0.2988472878932953
|
| 480 |
+
2024-10-21 13:50:04-finetune.py:240-INFO >> Batch 60 of epoch 10/10, average training loss of previous 2 batches: 0.3416874259710312
|
| 481 |
+
2024-10-21 13:50:05-finetune.py:240-INFO >> Batch 62 of epoch 10/10, average training loss of previous 2 batches: 0.30137424170970917
|
| 482 |
+
2024-10-21 13:50:06-finetune.py:240-INFO >> Batch 64 of epoch 10/10, average training loss of previous 2 batches: 0.35360729694366455
|
| 483 |
+
2024-10-21 13:50:07-finetune.py:240-INFO >> Batch 66 of epoch 10/10, average training loss of previous 2 batches: 0.287218913435936
|
| 484 |
+
2024-10-21 13:50:07-finetune.py:240-INFO >> Batch 68 of epoch 10/10, average training loss of previous 2 batches: 0.254774734377861
|
| 485 |
+
2024-10-21 13:50:08-finetune.py:240-INFO >> Batch 70 of epoch 10/10, average training loss of previous 2 batches: 0.3343530595302582
|
| 486 |
+
2024-10-21 13:50:09-finetune.py:240-INFO >> Batch 72 of epoch 10/10, average training loss of previous 2 batches: 0.18388479948043823
|
| 487 |
+
2024-10-21 13:50:10-finetune.py:240-INFO >> Batch 74 of epoch 10/10, average training loss of previous 2 batches: 0.2653023526072502
|
| 488 |
+
2024-10-21 13:50:11-finetune.py:240-INFO >> Batch 76 of epoch 10/10, average training loss of previous 2 batches: 0.2973957806825638
|
| 489 |
+
2024-10-21 13:50:12-finetune.py:240-INFO >> Batch 78 of epoch 10/10, average training loss of previous 2 batches: 0.30567221343517303
|
| 490 |
+
2024-10-21 13:50:13-finetune.py:240-INFO >> Batch 80 of epoch 10/10, average training loss of previous 2 batches: 0.40487077832221985
|
| 491 |
+
2024-10-21 13:50:14-finetune.py:240-INFO >> Batch 82 of epoch 10/10, average training loss of previous 2 batches: 0.40729472041130066
|
| 492 |
+
2024-10-21 13:50:14-finetune.py:240-INFO >> Batch 84 of epoch 10/10, average training loss of previous 2 batches: 0.365250363945961
|
| 493 |
+
2024-10-21 13:50:15-finetune.py:240-INFO >> Batch 86 of epoch 10/10, average training loss of previous 2 batches: 0.28013309836387634
|
| 494 |
+
2024-10-21 13:50:16-finetune.py:240-INFO >> Batch 88 of epoch 10/10, average training loss of previous 2 batches: 0.3896760493516922
|
| 495 |
+
2024-10-21 13:50:17-finetune.py:240-INFO >> Batch 90 of epoch 10/10, average training loss of previous 2 batches: 0.3475548326969147
|
| 496 |
+
2024-10-21 13:50:18-finetune.py:240-INFO >> Batch 92 of epoch 10/10, average training loss of previous 2 batches: 0.35538066923618317
|
| 497 |
+
2024-10-21 13:50:19-finetune.py:240-INFO >> Batch 94 of epoch 10/10, average training loss of previous 2 batches: 0.37024180591106415
|
| 498 |
+
2024-10-21 13:50:20-finetune.py:240-INFO >> Batch 96 of epoch 10/10, average training loss of previous 2 batches: 0.3847298175096512
|
| 499 |
+
2024-10-21 13:50:21-finetune.py:240-INFO >> Batch 98 of epoch 10/10, average training loss of previous 2 batches: 0.4170578420162201
|
| 500 |
+
2024-10-21 13:50:21-finetune.py:240-INFO >> Batch 100 of epoch 10/10, average training loss of previous 2 batches: 0.44036656618118286
|
| 501 |
+
2024-10-21 13:50:30-finetune.py:118-INFO >> chat template saved in train_output/20241021134301/chat_template.json
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_convert_rgb": true,
|
| 3 |
+
"do_normalize": true,
|
| 4 |
+
"do_rescale": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"image_mean": [
|
| 7 |
+
0.48145466,
|
| 8 |
+
0.4578275,
|
| 9 |
+
0.40821073
|
| 10 |
+
],
|
| 11 |
+
"image_processor_type": "Qwen2VLImageProcessor",
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.26862954,
|
| 14 |
+
0.26130258,
|
| 15 |
+
0.27577711
|
| 16 |
+
],
|
| 17 |
+
"max_pixels": 401408,
|
| 18 |
+
"merge_size": 2,
|
| 19 |
+
"min_pixels": 200704,
|
| 20 |
+
"patch_size": 14,
|
| 21 |
+
"processor_class": "Qwen2VLProcessor",
|
| 22 |
+
"resample": 3,
|
| 23 |
+
"rescale_factor": 0.00392156862745098,
|
| 24 |
+
"size": {
|
| 25 |
+
"max_pixels": 12845056,
|
| 26 |
+
"min_pixels": 3136
|
| 27 |
+
},
|
| 28 |
+
"temporal_patch_size": 2
|
| 29 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f33787292af226c4a4842be48a0e614d9524e25dc248e48bb1af0593de5564f9
|
| 3 |
+
size 11420539
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"151643": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"151644": {
|
| 13 |
+
"content": "<|im_start|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"151645": {
|
| 21 |
+
"content": "<|im_end|>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"151646": {
|
| 29 |
+
"content": "<|object_ref_start|>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"151647": {
|
| 37 |
+
"content": "<|object_ref_end|>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"151648": {
|
| 45 |
+
"content": "<|box_start|>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"151649": {
|
| 53 |
+
"content": "<|box_end|>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"151650": {
|
| 61 |
+
"content": "<|quad_start|>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"151651": {
|
| 69 |
+
"content": "<|quad_end|>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"151652": {
|
| 77 |
+
"content": "<|vision_start|>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"151653": {
|
| 85 |
+
"content": "<|vision_end|>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"151654": {
|
| 93 |
+
"content": "<|vision_pad|>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"151655": {
|
| 101 |
+
"content": "<|image_pad|>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"151656": {
|
| 109 |
+
"content": "<|video_pad|>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
}
|
| 116 |
+
},
|
| 117 |
+
"additional_special_tokens": [
|
| 118 |
+
"<|im_start|>",
|
| 119 |
+
"<|im_end|>",
|
| 120 |
+
"<|object_ref_start|>",
|
| 121 |
+
"<|object_ref_end|>",
|
| 122 |
+
"<|box_start|>",
|
| 123 |
+
"<|box_end|>",
|
| 124 |
+
"<|quad_start|>",
|
| 125 |
+
"<|quad_end|>",
|
| 126 |
+
"<|vision_start|>",
|
| 127 |
+
"<|vision_end|>",
|
| 128 |
+
"<|vision_pad|>",
|
| 129 |
+
"<|image_pad|>",
|
| 130 |
+
"<|video_pad|>"
|
| 131 |
+
],
|
| 132 |
+
"bos_token": null,
|
| 133 |
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
|
| 134 |
+
"clean_up_tokenization_spaces": false,
|
| 135 |
+
"eos_token": "<|im_end|>",
|
| 136 |
+
"errors": "replace",
|
| 137 |
+
"max_pixels": 401408,
|
| 138 |
+
"min_pixels": 200704,
|
| 139 |
+
"model_max_length": 32768,
|
| 140 |
+
"pad_token": "<|endoftext|>",
|
| 141 |
+
"padding_side": "right",
|
| 142 |
+
"processor_class": "Qwen2VLProcessor",
|
| 143 |
+
"split_special_tokens": false,
|
| 144 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 145 |
+
"unk_token": null
|
| 146 |
+
}
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|