Incomple commited on
Commit
6d35e97
·
verified ·
1 Parent(s): 7e3c59d

Training in progress, epoch 0

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
adapter_config.json ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-3-4b-it",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "up_proj",
28
+ "model.layers.6.self_attn.v_proj",
29
+ "model.layers.1.self_attn.v_proj",
30
+ "model.layers.12.self_attn.k_proj",
31
+ "model.layers.22.self_attn.v_proj",
32
+ "model.layers.23.self_attn.v_proj",
33
+ "model.layers.25.self_attn.v_proj",
34
+ "33.self_attn.v_proj",
35
+ "model.layers.10.self_attn.q_proj",
36
+ "model.layers.16.self_attn.q_proj",
37
+ "model.layers.7.self_attn.q_proj",
38
+ "model.layers.20.self_attn.v_proj",
39
+ "model.layers.21.self_attn.q_proj",
40
+ "model.layers.24.self_attn.v_proj",
41
+ "model.layers.15.self_attn.k_proj",
42
+ "model.layers.8.self_attn.k_proj",
43
+ "model.layers.21.self_attn.v_proj",
44
+ "model.layers.9.self_attn.k_proj",
45
+ "model.layers.6.self_attn.q_proj",
46
+ "model.layers.0.self_attn.q_proj",
47
+ "model.layers.1.self_attn.k_proj",
48
+ "model.layers.19.self_attn.q_proj",
49
+ "model.layers.15.self_attn.v_proj",
50
+ "27.self_attn.v_proj",
51
+ "model.layers.16.self_attn.v_proj",
52
+ "model.layers.7.self_attn.k_proj",
53
+ "model.layers.14.self_attn.v_proj",
54
+ "down_proj",
55
+ "model.layers.2.self_attn.q_proj",
56
+ "30.self_attn.v_proj",
57
+ "model.layers.19.self_attn.k_proj",
58
+ "28.self_attn.k_proj",
59
+ "model.layers.15.self_attn.q_proj",
60
+ "33.self_attn.k_proj",
61
+ "model.layers.3.self_attn.k_proj",
62
+ "model.layers.2.self_attn.v_proj",
63
+ "model.layers.0.self_attn.v_proj",
64
+ "model.layers.23.self_attn.q_proj",
65
+ "model.layers.18.self_attn.k_proj",
66
+ "model.layers.22.self_attn.k_proj",
67
+ "model.layers.17.self_attn.q_proj",
68
+ "model.layers.1.self_attn.q_proj",
69
+ "29.self_attn.q_proj",
70
+ "model.layers.3.self_attn.q_proj",
71
+ "model.layers.6.self_attn.k_proj",
72
+ "model.layers.19.self_attn.v_proj",
73
+ "model.layers.12.self_attn.q_proj",
74
+ "model.layers.9.self_attn.q_proj",
75
+ "model.layers.11.self_attn.v_proj",
76
+ "28.self_attn.v_proj",
77
+ "model.layers.11.self_attn.q_proj",
78
+ "model.layers.8.self_attn.v_proj",
79
+ "model.layers.13.self_attn.k_proj",
80
+ "27.self_attn.q_proj",
81
+ "o_proj",
82
+ "29.self_attn.k_proj",
83
+ "28.self_attn.q_proj",
84
+ "model.layers.17.self_attn.k_proj",
85
+ "27.self_attn.k_proj",
86
+ "model.layers.12.self_attn.v_proj",
87
+ "33.self_attn.q_proj",
88
+ "30.self_attn.q_proj",
89
+ "model.layers.5.self_attn.k_proj",
90
+ "32.self_attn.q_proj",
91
+ "model.layers.14.self_attn.k_proj",
92
+ "model.layers.5.self_attn.q_proj",
93
+ "model.layers.3.self_attn.v_proj",
94
+ "model.layers.14.self_attn.q_proj",
95
+ "model.layers.26.self_attn.q_proj",
96
+ "model.layers.25.self_attn.k_proj",
97
+ "32.self_attn.k_proj",
98
+ "model.layers.18.self_attn.q_proj",
99
+ "model.layers.23.self_attn.k_proj",
100
+ "model.layers.25.self_attn.q_proj",
101
+ "29.self_attn.v_proj",
102
+ "model.layers.13.self_attn.q_proj",
103
+ "model.layers.16.self_attn.k_proj",
104
+ "model.layers.9.self_attn.v_proj",
105
+ "model.layers.8.self_attn.q_proj",
106
+ "model.layers.26.self_attn.v_proj",
107
+ "32.self_attn.v_proj",
108
+ "model.layers.18.self_attn.v_proj",
109
+ "model.layers.22.self_attn.q_proj",
110
+ "31.self_attn.v_proj",
111
+ "model.layers.2.self_attn.k_proj",
112
+ "model.layers.0.self_attn.k_proj",
113
+ "31.self_attn.k_proj",
114
+ "model.layers.4.self_attn.q_proj",
115
+ "model.layers.10.self_attn.v_proj",
116
+ "30.self_attn.k_proj",
117
+ "model.layers.4.self_attn.v_proj",
118
+ "model.layers.11.self_attn.k_proj",
119
+ "model.layers.13.self_attn.v_proj",
120
+ "31.self_attn.q_proj",
121
+ "model.layers.5.self_attn.v_proj",
122
+ "model.layers.24.self_attn.q_proj",
123
+ "model.layers.20.self_attn.k_proj",
124
+ "model.layers.20.self_attn.q_proj",
125
+ "model.layers.7.self_attn.v_proj",
126
+ "model.layers.24.self_attn.k_proj",
127
+ "model.layers.4.self_attn.k_proj",
128
+ "model.layers.17.self_attn.v_proj",
129
+ "model.layers.10.self_attn.k_proj",
130
+ "model.layers.26.self_attn.k_proj",
131
+ "gate_proj",
132
+ "model.layers.21.self_attn.k_proj"
133
+ ],
134
+ "task_type": "CAUSAL_LM",
135
+ "trainable_token_indices": null,
136
+ "use_dora": false,
137
+ "use_rslora": false
138
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15b1ec212c18762501843c304a37bb5ed75e92575be109cb4f396fe82452761f
3
+ size 59675008
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<end_of_turn>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
trainer_log.jsonl ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 67, "total_steps": 1337, "loss": 2.8802, "lr": 4.925373134328357e-07, "epoch": 0.05009345794392523, "percentage": 5.01, "elapsed_time": "0:03:06", "remaining_time": "0:59:00"}
2
+ {"current_steps": 134, "total_steps": 1337, "loss": 2.3089, "lr": 9.925373134328357e-07, "epoch": 0.10018691588785046, "percentage": 10.02, "elapsed_time": "0:06:13", "remaining_time": "0:55:54"}
3
+ {"current_steps": 201, "total_steps": 1337, "loss": 1.1023, "lr": 9.451371571072319e-07, "epoch": 0.1502803738317757, "percentage": 15.03, "elapsed_time": "0:09:20", "remaining_time": "0:52:49"}
4
+ {"current_steps": 250, "total_steps": 1337, "eval_loss": 0.8559619188308716, "epoch": 0.18691588785046728, "percentage": 18.7, "elapsed_time": "0:12:44", "remaining_time": "0:55:25"}
5
+ {"current_steps": 268, "total_steps": 1337, "loss": 0.8727, "lr": 8.894430590191188e-07, "epoch": 0.20037383177570092, "percentage": 20.04, "elapsed_time": "0:13:34", "remaining_time": "0:54:10"}
6
+ {"current_steps": 335, "total_steps": 1337, "loss": 0.6746, "lr": 8.337489609310058e-07, "epoch": 0.2504672897196262, "percentage": 25.06, "elapsed_time": "0:16:41", "remaining_time": "0:49:55"}
7
+ {"current_steps": 402, "total_steps": 1337, "loss": 0.5801, "lr": 7.780548628428927e-07, "epoch": 0.3005607476635514, "percentage": 30.07, "elapsed_time": "0:19:47", "remaining_time": "0:46:02"}
8
+ {"current_steps": 469, "total_steps": 1337, "loss": 0.4428, "lr": 7.223607647547797e-07, "epoch": 0.3506542056074766, "percentage": 35.08, "elapsed_time": "0:22:54", "remaining_time": "0:42:23"}
9
+ {"current_steps": 500, "total_steps": 1337, "eval_loss": 0.438012033700943, "epoch": 0.37383177570093457, "percentage": 37.4, "elapsed_time": "0:25:28", "remaining_time": "0:42:38"}
10
+ {"current_steps": 536, "total_steps": 1337, "loss": 0.4403, "lr": 6.666666666666666e-07, "epoch": 0.40074766355140184, "percentage": 40.09, "elapsed_time": "0:27:08", "remaining_time": "0:40:33"}
11
+ {"current_steps": 603, "total_steps": 1337, "loss": 0.3645, "lr": 6.109725685785536e-07, "epoch": 0.4508411214953271, "percentage": 45.1, "elapsed_time": "0:30:15", "remaining_time": "0:36:50"}
12
+ {"current_steps": 670, "total_steps": 1337, "loss": 0.2658, "lr": 5.552784704904405e-07, "epoch": 0.5009345794392523, "percentage": 50.11, "elapsed_time": "0:33:24", "remaining_time": "0:33:15"}
13
+ {"current_steps": 737, "total_steps": 1337, "loss": 0.2573, "lr": 4.995843724023275e-07, "epoch": 0.5510280373831775, "percentage": 55.12, "elapsed_time": "0:36:30", "remaining_time": "0:29:43"}
14
+ {"current_steps": 750, "total_steps": 1337, "eval_loss": 0.254865825176239, "epoch": 0.5607476635514018, "percentage": 56.1, "elapsed_time": "0:38:14", "remaining_time": "0:29:56"}
15
+ {"current_steps": 804, "total_steps": 1337, "loss": 0.2482, "lr": 4.438902743142144e-07, "epoch": 0.6011214953271028, "percentage": 60.13, "elapsed_time": "0:40:45", "remaining_time": "0:27:00"}
16
+ {"current_steps": 871, "total_steps": 1337, "loss": 0.2215, "lr": 3.881961762261014e-07, "epoch": 0.6512149532710281, "percentage": 65.15, "elapsed_time": "0:43:51", "remaining_time": "0:23:27"}
17
+ {"current_steps": 938, "total_steps": 1337, "loss": 0.2055, "lr": 3.3250207813798835e-07, "epoch": 0.7013084112149532, "percentage": 70.16, "elapsed_time": "0:46:57", "remaining_time": "0:19:58"}
18
+ {"current_steps": 1000, "total_steps": 1337, "eval_loss": 0.19729501008987427, "epoch": 0.7476635514018691, "percentage": 74.79, "elapsed_time": "0:50:59", "remaining_time": "0:17:10"}
19
+ {"current_steps": 1005, "total_steps": 1337, "loss": 0.1745, "lr": 2.7680798004987534e-07, "epoch": 0.7514018691588785, "percentage": 75.17, "elapsed_time": "0:51:13", "remaining_time": "0:16:55"}
20
+ {"current_steps": 1072, "total_steps": 1337, "loss": 0.1734, "lr": 2.2111388196176226e-07, "epoch": 0.8014953271028037, "percentage": 80.18, "elapsed_time": "0:54:20", "remaining_time": "0:13:26"}
21
+ {"current_steps": 1139, "total_steps": 1337, "loss": 0.1929, "lr": 1.6541978387364923e-07, "epoch": 0.851588785046729, "percentage": 85.19, "elapsed_time": "0:57:27", "remaining_time": "0:09:59"}
22
+ {"current_steps": 1206, "total_steps": 1337, "loss": 0.1766, "lr": 1.0972568578553615e-07, "epoch": 0.9016822429906542, "percentage": 90.2, "elapsed_time": "1:00:34", "remaining_time": "0:06:34"}
23
+ {"current_steps": 1250, "total_steps": 1337, "eval_loss": 0.18509575724601746, "epoch": 0.9345794392523364, "percentage": 93.49, "elapsed_time": "1:03:44", "remaining_time": "0:04:26"}
24
+ {"current_steps": 1273, "total_steps": 1337, "loss": 0.1678, "lr": 5.403158769742311e-08, "epoch": 0.9517757009345794, "percentage": 95.21, "elapsed_time": "1:04:48", "remaining_time": "0:03:15"}
25
+ {"current_steps": 1337, "total_steps": 1337, "epoch": 0.9996261682242991, "percentage": 100.0, "elapsed_time": "1:07:51", "remaining_time": "0:00:00"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:697833aa52010ed5782f7fd8dc967740be6af10937aab104685cd45273eda59f
3
+ size 5752