Luna777 commited on
Commit
e55be44
·
verified ·
1 Parent(s): 6efc6cb

End of training

Browse files
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: bsd-3-clause
4
+ base_model: Salesforce/blip-image-captioning-base
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: model_output
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # model_output
16
+
17
+ This model is a fine-tuned version of [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 5e-05
37
+ - train_batch_size: 8
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 3
43
+
44
+ ### Framework versions
45
+
46
+ - Transformers 4.48.3
47
+ - Pytorch 2.5.1+cu124
48
+ - Tokenizers 0.21.0
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Salesforce/blip-image-captioning-base",
3
+ "architectures": [
4
+ "BlipForConditionalGeneration"
5
+ ],
6
+ "image_text_hidden_size": 256,
7
+ "initializer_factor": 1.0,
8
+ "initializer_range": 0.02,
9
+ "label_smoothing": 0.0,
10
+ "logit_scale_init_value": 2.6592,
11
+ "model_type": "blip",
12
+ "projection_dim": 512,
13
+ "text_config": {
14
+ "_attn_implementation_autoset": true,
15
+ "initializer_factor": 1.0,
16
+ "model_type": "blip_text_model",
17
+ "num_attention_heads": 12
18
+ },
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.48.3",
21
+ "vision_config": {
22
+ "_attn_implementation_autoset": true,
23
+ "dropout": 0.0,
24
+ "initializer_factor": 1.0,
25
+ "initializer_range": 0.02,
26
+ "model_type": "blip_vision_model",
27
+ "num_channels": 3
28
+ }
29
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 30522,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.48.3"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41a358c373bb50419c1fc5dad8c40a283bf4b70001cba90a7624752cc1e1a7f7
3
+ size 989717056
runs/Feb14_19-50-24_3145a4bd5469/events.out.tfevents.1739562625.3145a4bd5469.202.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ad0c1874df60e1f74e5a45ab0e02fba39e40a152d2ca2e49b39318bfb91d305
3
+ size 5164
runs/Feb14_19-53-00_3145a4bd5469/events.out.tfevents.1739562780.3145a4bd5469.202.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e4e711ec385264336dc1a981e592365fdf50fb9b89eb8bcce18552494bd3b0e
3
+ size 4184
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:861f06b5d32ffe5e1aaf85e6fb239b9b9ebe1cb08d1b3718cd6f01483a6f985a
3
+ size 5304