sharmajai901 commited on
Commit
804fc24
·
verified ·
1 Parent(s): 031075f

End of training

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: Accomodation_room_classification
12
+ results:
13
+ - task:
14
+ name: Image Classification
15
+ type: image-classification
16
+ dataset:
17
+ name: imagefolder
18
+ type: imagefolder
19
+ config: default
20
+ split: validation
21
+ args: default
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.875
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # Accomodation_room_classification
32
+
33
+ This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.3910
36
+ - Accuracy: 0.875
37
+
38
+ ## Model description
39
+
40
+ More information needed
41
+
42
+ ## Intended uses & limitations
43
+
44
+ More information needed
45
+
46
+ ## Training and evaluation data
47
+
48
+ More information needed
49
+
50
+ ## Training procedure
51
+
52
+ ### Training hyperparameters
53
+
54
+ The following hyperparameters were used during training:
55
+ - learning_rate: 5e-05
56
+ - train_batch_size: 32
57
+ - eval_batch_size: 32
58
+ - seed: 42
59
+ - gradient_accumulation_steps: 4
60
+ - total_train_batch_size: 128
61
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
62
+ - lr_scheduler_type: linear
63
+ - lr_scheduler_warmup_ratio: 0.1
64
+ - num_epochs: 7
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
69
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
70
+ | No log | 1.0 | 5 | 0.6710 | 0.8182 |
71
+ | 0.5771 | 2.0 | 10 | 0.5322 | 0.8523 |
72
+ | 0.5771 | 3.0 | 15 | 0.4599 | 0.8580 |
73
+ | 0.3947 | 4.0 | 20 | 0.4182 | 0.8636 |
74
+ | 0.3947 | 5.0 | 25 | 0.3910 | 0.875 |
75
+ | 0.3635 | 6.0 | 30 | 0.3867 | 0.875 |
76
+ | 0.3635 | 7.0 | 35 | 0.3858 | 0.8580 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.40.0
82
+ - Pytorch 2.2.1+cu121
83
+ - Datasets 2.19.0
84
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.0,
3
+ "eval_accuracy": 0.875,
4
+ "eval_loss": 0.3910140097141266,
5
+ "eval_runtime": 4.9449,
6
+ "eval_samples_per_second": 35.592,
7
+ "eval_steps_per_second": 1.213,
8
+ "total_flos": 3.412155785301688e+17,
9
+ "train_loss": 0.42741406985691616,
10
+ "train_runtime": 323.3094,
11
+ "train_samples_per_second": 13.619,
12
+ "train_steps_per_second": 0.108
13
+ }
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "amenities",
13
+ "1": "bathroom",
14
+ "2": "bedroom",
15
+ "3": "exterior",
16
+ "4": "floor_plan",
17
+ "5": "kitchen",
18
+ "6": "living_room",
19
+ "7": "others"
20
+ },
21
+ "image_size": 224,
22
+ "initializer_range": 0.02,
23
+ "intermediate_size": 3072,
24
+ "label2id": {
25
+ "amenities": 0,
26
+ "bathroom": 1,
27
+ "bedroom": 2,
28
+ "exterior": 3,
29
+ "floor_plan": 4,
30
+ "kitchen": 5,
31
+ "living_room": 6,
32
+ "others": 7
33
+ },
34
+ "layer_norm_eps": 1e-12,
35
+ "model_type": "vit",
36
+ "num_attention_heads": 12,
37
+ "num_channels": 3,
38
+ "num_hidden_layers": 12,
39
+ "patch_size": 16,
40
+ "problem_type": "single_label_classification",
41
+ "qkv_bias": true,
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.40.0"
44
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.0,
3
+ "eval_accuracy": 0.875,
4
+ "eval_loss": 0.3910140097141266,
5
+ "eval_runtime": 4.9449,
6
+ "eval_samples_per_second": 35.592,
7
+ "eval_steps_per_second": 1.213
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b7eff9d81dca60dc599bc43c868acf7790dd9c4dd43424cfdbd67c52d541346
3
+ size 343242432
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
runs/Apr27_12-31-05_37cc8a3686bb/events.out.tfevents.1714221066.37cc8a3686bb.2342.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb281e7c5eb782ac3696b6bb3428382281db0f88cc18bf0fbdde5838489c4b3
3
+ size 4976
runs/Apr27_12-33-11_37cc8a3686bb/events.out.tfevents.1714221204.37cc8a3686bb.2342.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090d8e487fff6a8ef0b711386a267c81b4a58593c2eacf08b6acd172a1fe0cd9
3
+ size 7323
runs/Apr27_12-33-11_37cc8a3686bb/events.out.tfevents.1714221455.37cc8a3686bb.2342.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5bcc6815cea79c5cd99e1ac1c43a37591832bb81459d00bc704866004804542
3
+ size 722
runs/Apr27_12-39-16_37cc8a3686bb/events.out.tfevents.1714221573.37cc8a3686bb.2342.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2331d8bee942eb5943218e79d64f1a68b461eeff5c8e69fbf9c334b07bfbf6bd
3
+ size 8241
runs/Apr27_12-39-16_37cc8a3686bb/events.out.tfevents.1714221910.37cc8a3686bb.2342.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e19e944b63c3a5dff842bc6421a0af43c154e7a98d92344fa85f86155b56e861
3
+ size 405
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 7.0,
3
+ "total_flos": 3.412155785301688e+17,
4
+ "train_loss": 0.42741406985691616,
5
+ "train_runtime": 323.3094,
6
+ "train_samples_per_second": 13.619,
7
+ "train_steps_per_second": 0.108
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.875,
3
+ "best_model_checkpoint": "Accomodation_room_classification/checkpoint-25",
4
+ "epoch": 7.0,
5
+ "eval_steps": 500,
6
+ "global_step": 35,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.8181818181818182,
14
+ "eval_loss": 0.6709501147270203,
15
+ "eval_runtime": 4.9691,
16
+ "eval_samples_per_second": 35.419,
17
+ "eval_steps_per_second": 1.207,
18
+ "step": 5
19
+ },
20
+ {
21
+ "epoch": 2.0,
22
+ "grad_norm": 1.9039503335952759,
23
+ "learning_rate": 4.032258064516129e-05,
24
+ "loss": 0.5771,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 2.0,
29
+ "eval_accuracy": 0.8522727272727273,
30
+ "eval_loss": 0.5321911573410034,
31
+ "eval_runtime": 5.4706,
32
+ "eval_samples_per_second": 32.172,
33
+ "eval_steps_per_second": 1.097,
34
+ "step": 10
35
+ },
36
+ {
37
+ "epoch": 3.0,
38
+ "eval_accuracy": 0.8579545454545454,
39
+ "eval_loss": 0.459867924451828,
40
+ "eval_runtime": 5.2619,
41
+ "eval_samples_per_second": 33.448,
42
+ "eval_steps_per_second": 1.14,
43
+ "step": 15
44
+ },
45
+ {
46
+ "epoch": 4.0,
47
+ "grad_norm": 1.7084476947784424,
48
+ "learning_rate": 2.4193548387096777e-05,
49
+ "loss": 0.3947,
50
+ "step": 20
51
+ },
52
+ {
53
+ "epoch": 4.0,
54
+ "eval_accuracy": 0.8636363636363636,
55
+ "eval_loss": 0.4181961715221405,
56
+ "eval_runtime": 5.1139,
57
+ "eval_samples_per_second": 34.416,
58
+ "eval_steps_per_second": 1.173,
59
+ "step": 20
60
+ },
61
+ {
62
+ "epoch": 5.0,
63
+ "eval_accuracy": 0.875,
64
+ "eval_loss": 0.3910140097141266,
65
+ "eval_runtime": 5.509,
66
+ "eval_samples_per_second": 31.947,
67
+ "eval_steps_per_second": 1.089,
68
+ "step": 25
69
+ },
70
+ {
71
+ "epoch": 6.0,
72
+ "grad_norm": 2.146559238433838,
73
+ "learning_rate": 8.064516129032258e-06,
74
+ "loss": 0.3635,
75
+ "step": 30
76
+ },
77
+ {
78
+ "epoch": 6.0,
79
+ "eval_accuracy": 0.875,
80
+ "eval_loss": 0.3867148458957672,
81
+ "eval_runtime": 5.0182,
82
+ "eval_samples_per_second": 35.072,
83
+ "eval_steps_per_second": 1.196,
84
+ "step": 30
85
+ },
86
+ {
87
+ "epoch": 7.0,
88
+ "eval_accuracy": 0.8579545454545454,
89
+ "eval_loss": 0.38580694794654846,
90
+ "eval_runtime": 5.5963,
91
+ "eval_samples_per_second": 31.449,
92
+ "eval_steps_per_second": 1.072,
93
+ "step": 35
94
+ },
95
+ {
96
+ "epoch": 7.0,
97
+ "step": 35,
98
+ "total_flos": 3.412155785301688e+17,
99
+ "train_loss": 0.42741406985691616,
100
+ "train_runtime": 323.3094,
101
+ "train_samples_per_second": 13.619,
102
+ "train_steps_per_second": 0.108
103
+ }
104
+ ],
105
+ "logging_steps": 10,
106
+ "max_steps": 35,
107
+ "num_input_tokens_seen": 0,
108
+ "num_train_epochs": 7,
109
+ "save_steps": 500,
110
+ "total_flos": 3.412155785301688e+17,
111
+ "train_batch_size": 32,
112
+ "trial_name": null,
113
+ "trial_params": null
114
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa7deaf974521a2d932fa22fa953ad9729032ba310b2815c726e723c60658187
3
+ size 5048