itsnotacreativeuser commited on
Commit
fc64e92
·
verified ·
1 Parent(s): 48416ef

End of training

Browse files
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: apache-2.0
4
+ base_model: HuggingFaceTB/SmolVLM-256M-Instruct
5
+ tags:
6
+ - base_model:adapter:HuggingFaceTB/SmolVLM-256M-Instruct
7
+ - lora
8
+ - transformers
9
+ model-index:
10
+ - name: SmolVLM-256M-Instruct-vqav2
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # SmolVLM-256M-Instruct-vqav2
18
+
19
+ This model is a fine-tuned version of [HuggingFaceTB/SmolVLM-256M-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM-256M-Instruct) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.8402
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0001
41
+ - train_batch_size: 4
42
+ - eval_batch_size: 8
43
+ - seed: 42
44
+ - gradient_accumulation_steps: 4
45
+ - total_train_batch_size: 16
46
+ - optimizer: Use OptimizerNames.PAGED_ADAMW_8BIT with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
+ - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_steps: 50
49
+ - num_epochs: 0.35
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss |
55
+ |:-------------:|:------:|:----:|:---------------:|
56
+ | 2.857 | 0.0599 | 20 | 2.7762 |
57
+ | 1.8633 | 0.1199 | 40 | 1.8233 |
58
+ | 1.1164 | 0.1798 | 60 | 1.0640 |
59
+ | 0.8947 | 0.2397 | 80 | 0.8909 |
60
+ | 0.8471 | 0.2996 | 100 | 0.8402 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - PEFT 0.18.0
66
+ - Transformers 4.57.3
67
+ - Pytorch 2.9.0+cu126
68
+ - Datasets 4.0.0
69
+ - Tokenizers 0.22.1
adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": {
6
+ "base_model_class": "Idefics3ForConditionalGeneration",
7
+ "parent_library": "transformers.models.idefics3.modeling_idefics3"
8
+ },
9
+ "base_model_name_or_path": "HuggingFaceTB/SmolVLM-256M-Instruct",
10
+ "bias": "none",
11
+ "corda_config": null,
12
+ "ensure_weight_tying": false,
13
+ "eva_config": null,
14
+ "exclude_modules": null,
15
+ "fan_in_fan_out": false,
16
+ "inference_mode": true,
17
+ "init_lora_weights": "gaussian",
18
+ "layer_replication": null,
19
+ "layers_pattern": null,
20
+ "layers_to_transform": null,
21
+ "loftq_config": {},
22
+ "lora_alpha": 8,
23
+ "lora_bias": false,
24
+ "lora_dropout": 0.1,
25
+ "megatron_config": null,
26
+ "megatron_core": "megatron.core",
27
+ "modules_to_save": null,
28
+ "peft_type": "LORA",
29
+ "peft_version": "0.18.0",
30
+ "qalora_group_size": 16,
31
+ "r": 8,
32
+ "rank_pattern": {},
33
+ "revision": null,
34
+ "target_modules": [
35
+ "v_proj",
36
+ "down_proj",
37
+ "up_proj",
38
+ "o_proj",
39
+ "gate_proj",
40
+ "q_proj",
41
+ "k_proj"
42
+ ],
43
+ "target_parameters": null,
44
+ "task_type": null,
45
+ "trainable_token_indices": null,
46
+ "use_dora": false,
47
+ "use_qalora": false,
48
+ "use_rslora": false
49
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7698ec7e067bc991568e5d92156b2db5dce4a75e1d43c3cad576ffc81d50718e
3
+ size 11608320
runs/Dec11_16-53-56_6db8a76e6710/events.out.tfevents.1765472047.6db8a76e6710.425.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:333c847d6bd2f451717cf28410431ad8a3ff97eb58e8369f401d18d9a02b5a79
3
+ size 35078
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be42b7b22367067db071385222d11ba7be079648cef66baf464a48ea9d99065a
3
+ size 5905