abdulrahman-nuzha commited on
Commit
d0e9bbb
·
verified ·
1 Parent(s): 7d66a75

End of training

Browse files
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: intfloat/e5-large
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ - precision
10
+ - recall
11
+ - f1
12
+ model-index:
13
+ - name: intfloat-e5-large-arabic-fp16-allagree
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # intfloat-e5-large-arabic-fp16-allagree
21
+
22
+ This model is a fine-tuned version of [intfloat/e5-large](https://huggingface.co/intfloat/e5-large) on an unknown dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.4301
25
+ - Accuracy: 0.8405
26
+ - Precision: 0.8470
27
+ - Recall: 0.8405
28
+ - F1: 0.8429
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 2e-05
48
+ - train_batch_size: 64
49
+ - eval_batch_size: 64
50
+ - seed: 42
51
+ - gradient_accumulation_steps: 2
52
+ - total_train_batch_size: 128
53
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
54
+ - lr_scheduler_type: linear
55
+ - lr_scheduler_warmup_ratio: 0.3
56
+ - num_epochs: 10
57
+ - mixed_precision_training: Native AMP
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
62
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
63
+ | 1.0133 | 0.7463 | 50 | 0.6945 | 0.7127 | 0.7471 | 0.7127 | 0.6558 |
64
+ | 0.6316 | 1.4925 | 100 | 0.5125 | 0.8013 | 0.7942 | 0.8013 | 0.7899 |
65
+ | 0.5287 | 2.2388 | 150 | 0.4711 | 0.8041 | 0.8037 | 0.8041 | 0.8034 |
66
+ | 0.4814 | 2.9851 | 200 | 0.4799 | 0.7985 | 0.8288 | 0.7985 | 0.8071 |
67
+ | 0.4304 | 3.7313 | 250 | 0.4373 | 0.8256 | 0.8256 | 0.8256 | 0.8256 |
68
+ | 0.3706 | 4.4776 | 300 | 0.4595 | 0.8293 | 0.8314 | 0.8293 | 0.8283 |
69
+ | 0.3455 | 5.2239 | 350 | 0.4738 | 0.8293 | 0.8455 | 0.8293 | 0.8341 |
70
+ | 0.3053 | 5.9701 | 400 | 0.4301 | 0.8405 | 0.8470 | 0.8405 | 0.8429 |
71
+ | 0.2437 | 6.7164 | 450 | 0.4713 | 0.8358 | 0.8526 | 0.8358 | 0.8409 |
72
+ | 0.223 | 7.4627 | 500 | 0.4894 | 0.8414 | 0.8503 | 0.8414 | 0.8431 |
73
+ | 0.2093 | 8.2090 | 550 | 0.4576 | 0.8554 | 0.8572 | 0.8554 | 0.8562 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - Transformers 4.51.1
79
+ - Pytorch 2.6.0+cu124
80
+ - Datasets 3.5.0
81
+ - Tokenizers 0.21.1
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 1024,
10
+ "id2label": {
11
+ "0": "negative",
12
+ "1": "positive",
13
+ "2": "neutral"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 4096,
17
+ "label2id": {
18
+ "negative": 0,
19
+ "neutral": 2,
20
+ "positive": 1
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "max_position_embeddings": 512,
24
+ "model_type": "bert",
25
+ "num_attention_heads": 16,
26
+ "num_hidden_layers": 24,
27
+ "pad_token_id": 0,
28
+ "position_embedding_type": "absolute",
29
+ "problem_type": "single_label_classification",
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.51.1",
32
+ "type_vocab_size": 2,
33
+ "use_cache": true,
34
+ "vocab_size": 30522
35
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d11295a4d801990bfd13191d06a282a1d4fdffef52198a1b8df8090bcdbc6f0a
3
+ size 1340626860
runs/Apr15_12-07-29_1ce9172a2a99/events.out.tfevents.1744718861.1ce9172a2a99.1097.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2651be7666a0158366bfe5b833a65a1574743d7f720afc850e27e9aca9623eec
3
+ size 13035
runs/Apr15_12-07-29_1ce9172a2a99/events.out.tfevents.1744719823.1ce9172a2a99.1097.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bd24c0ba863b6a312c7cdad26d2149152a6790cc025832a4a0ee5afc8d345ad
3
+ size 560
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9a0ca98ccddbbb80cbc9d1731c25e39e52c68b9c4cd5e0dea738c412d96c7f4
3
+ size 5432