Add SetFit model
Browse files- 2_Dense/config.json +1 -0
- 2_Dense/model.safetensors +3 -0
- README.md +13 -13
- config.json +15 -16
- config_setfit.json +2 -2
- model.safetensors +2 -2
- model_head.pkl +2 -2
- modules.json +6 -0
- sentence_bert_config.json +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +8 -43
- vocab.txt +0 -0
2_Dense/config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"in_features": 768, "out_features": 512, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
|
2_Dense/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b9cb48efa0eb9798c4b961434f13f179144d9f0397138460c27b5d5d92e0e61
|
| 3 |
+
size 1575072
|
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
base_model:
|
| 3 |
library_name: setfit
|
| 4 |
metrics:
|
| 5 |
- accuracy
|
|
@@ -35,7 +35,7 @@ widget:
|
|
| 35 |
فالفندق
|
| 36 |
inference: true
|
| 37 |
model-index:
|
| 38 |
-
- name: SetFit with
|
| 39 |
results:
|
| 40 |
- task:
|
| 41 |
type: text-classification
|
|
@@ -46,13 +46,13 @@ model-index:
|
|
| 46 |
split: test
|
| 47 |
metrics:
|
| 48 |
- type: accuracy
|
| 49 |
-
value: 0.
|
| 50 |
name: Accuracy
|
| 51 |
---
|
| 52 |
|
| 53 |
-
# SetFit with
|
| 54 |
|
| 55 |
-
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [
|
| 56 |
|
| 57 |
The model has been trained using an efficient few-shot learning technique that involves:
|
| 58 |
|
|
@@ -63,9 +63,9 @@ The model has been trained using an efficient few-shot learning technique that i
|
|
| 63 |
|
| 64 |
### Model Description
|
| 65 |
- **Model Type:** SetFit
|
| 66 |
-
- **Sentence Transformer body:** [
|
| 67 |
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
| 68 |
-
- **Maximum Sequence Length:**
|
| 69 |
- **Number of Classes:** 3 classes
|
| 70 |
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
| 71 |
<!-- - **Language:** Unknown -->
|
|
@@ -89,7 +89,7 @@ The model has been trained using an efficient few-shot learning technique that i
|
|
| 89 |
### Metrics
|
| 90 |
| Label | Accuracy |
|
| 91 |
|:--------|:---------|
|
| 92 |
-
| **all** | 0.
|
| 93 |
|
| 94 |
## Uses
|
| 95 |
|
|
@@ -170,11 +170,11 @@ preds = model("مكان راحه البال . المكان نظيف جدا وم
|
|
| 170 |
### Training Results
|
| 171 |
| Epoch | Step | Training Loss | Validation Loss |
|
| 172 |
|:------:|:----:|:-------------:|:---------------:|
|
| 173 |
-
| 0.1667 | 1 | 0.
|
| 174 |
-
| 1.0 | 6 | - | 0.
|
| 175 |
-
| 2.0 | 12 | - | 0.
|
| 176 |
-
| 3.0 | 18 | - | 0.
|
| 177 |
-
| 4.0 | 24 | - | 0.
|
| 178 |
|
| 179 |
### Framework Versions
|
| 180 |
- Python: 3.10.14
|
|
|
|
| 1 |
---
|
| 2 |
+
base_model: sentence-transformers/distiluse-base-multilingual-cased-v1
|
| 3 |
library_name: setfit
|
| 4 |
metrics:
|
| 5 |
- accuracy
|
|
|
|
| 35 |
فالفندق
|
| 36 |
inference: true
|
| 37 |
model-index:
|
| 38 |
+
- name: SetFit with sentence-transformers/distiluse-base-multilingual-cased-v1
|
| 39 |
results:
|
| 40 |
- task:
|
| 41 |
type: text-classification
|
|
|
|
| 46 |
split: test
|
| 47 |
metrics:
|
| 48 |
- type: accuracy
|
| 49 |
+
value: 0.45696969696969697
|
| 50 |
name: Accuracy
|
| 51 |
---
|
| 52 |
|
| 53 |
+
# SetFit with sentence-transformers/distiluse-base-multilingual-cased-v1
|
| 54 |
|
| 55 |
+
This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/distiluse-base-multilingual-cased-v1](https://huggingface.co/sentence-transformers/distiluse-base-multilingual-cased-v1) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.
|
| 56 |
|
| 57 |
The model has been trained using an efficient few-shot learning technique that involves:
|
| 58 |
|
|
|
|
| 63 |
|
| 64 |
### Model Description
|
| 65 |
- **Model Type:** SetFit
|
| 66 |
+
- **Sentence Transformer body:** [sentence-transformers/distiluse-base-multilingual-cased-v1](https://huggingface.co/sentence-transformers/distiluse-base-multilingual-cased-v1)
|
| 67 |
- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance
|
| 68 |
+
- **Maximum Sequence Length:** 128 tokens
|
| 69 |
- **Number of Classes:** 3 classes
|
| 70 |
<!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) -->
|
| 71 |
<!-- - **Language:** Unknown -->
|
|
|
|
| 89 |
### Metrics
|
| 90 |
| Label | Accuracy |
|
| 91 |
|:--------|:---------|
|
| 92 |
+
| **all** | 0.4570 |
|
| 93 |
|
| 94 |
## Uses
|
| 95 |
|
|
|
|
| 170 |
### Training Results
|
| 171 |
| Epoch | Step | Training Loss | Validation Loss |
|
| 172 |
|:------:|:----:|:-------------:|:---------------:|
|
| 173 |
+
| 0.1667 | 1 | 0.3001 | - |
|
| 174 |
+
| 1.0 | 6 | - | 0.2727 |
|
| 175 |
+
| 2.0 | 12 | - | 0.2697 |
|
| 176 |
+
| 3.0 | 18 | - | 0.2861 |
|
| 177 |
+
| 4.0 | 24 | - | 0.2927 |
|
| 178 |
|
| 179 |
### Framework Versions
|
| 180 |
- Python: 3.10.14
|
config.json
CHANGED
|
@@ -1,25 +1,24 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "
|
|
|
|
| 3 |
"architectures": [
|
| 4 |
-
"
|
| 5 |
],
|
| 6 |
-
"
|
| 7 |
-
"
|
| 8 |
-
"
|
| 9 |
-
"
|
| 10 |
-
"hidden_size": 768,
|
| 11 |
"initializer_range": 0.02,
|
| 12 |
-
"intermediate_size": 3072,
|
| 13 |
-
"layer_norm_eps": 1e-12,
|
| 14 |
"max_position_embeddings": 512,
|
| 15 |
-
"model_type": "
|
| 16 |
-
"
|
| 17 |
-
"
|
| 18 |
"pad_token_id": 0,
|
| 19 |
-
"
|
|
|
|
|
|
|
|
|
|
| 20 |
"torch_dtype": "float32",
|
| 21 |
"transformers_version": "4.45.1",
|
| 22 |
-
"
|
| 23 |
-
"use_cache": true,
|
| 24 |
-
"vocab_size": 64000
|
| 25 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "sentence-transformers/distiluse-base-multilingual-cased-v1",
|
| 3 |
+
"activation": "gelu",
|
| 4 |
"architectures": [
|
| 5 |
+
"DistilBertModel"
|
| 6 |
],
|
| 7 |
+
"attention_dropout": 0.1,
|
| 8 |
+
"dim": 768,
|
| 9 |
+
"dropout": 0.1,
|
| 10 |
+
"hidden_dim": 3072,
|
|
|
|
| 11 |
"initializer_range": 0.02,
|
|
|
|
|
|
|
| 12 |
"max_position_embeddings": 512,
|
| 13 |
+
"model_type": "distilbert",
|
| 14 |
+
"n_heads": 12,
|
| 15 |
+
"n_layers": 6,
|
| 16 |
"pad_token_id": 0,
|
| 17 |
+
"qa_dropout": 0.1,
|
| 18 |
+
"seq_classif_dropout": 0.2,
|
| 19 |
+
"sinusoidal_pos_embds": false,
|
| 20 |
+
"tie_weights_": true,
|
| 21 |
"torch_dtype": "float32",
|
| 22 |
"transformers_version": "4.45.1",
|
| 23 |
+
"vocab_size": 119547
|
|
|
|
|
|
|
| 24 |
}
|
config_setfit.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
{
|
| 2 |
-
"normalize_embeddings": false,
|
| 3 |
"labels": [
|
| 4 |
"Mixed",
|
| 5 |
"Negative",
|
| 6 |
"Positive"
|
| 7 |
-
]
|
|
|
|
| 8 |
}
|
|
|
|
| 1 |
{
|
|
|
|
| 2 |
"labels": [
|
| 3 |
"Mixed",
|
| 4 |
"Negative",
|
| 5 |
"Positive"
|
| 6 |
+
],
|
| 7 |
+
"normalize_embeddings": false
|
| 8 |
}
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d84bc88a6b764709dd4fa7c3e29b7e36f3de9d5cec70e876f2bbb6de150c320c
|
| 3 |
+
size 538947416
|
model_head.pkl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:efddb0394f5ff4e41d62c78c799c5d0d3e8c2ce18e25a2029ccd99028a8890cb
|
| 3 |
+
size 13231
|
modules.json
CHANGED
|
@@ -10,5 +10,11 @@
|
|
| 10 |
"name": "1",
|
| 11 |
"path": "1_Pooling",
|
| 12 |
"type": "sentence_transformers.models.Pooling"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
}
|
| 14 |
]
|
|
|
|
| 10 |
"name": "1",
|
| 11 |
"path": "1_Pooling",
|
| 12 |
"type": "sentence_transformers.models.Pooling"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"idx": 2,
|
| 16 |
+
"name": "2",
|
| 17 |
+
"path": "2_Dense",
|
| 18 |
+
"type": "sentence_transformers.models.Dense"
|
| 19 |
}
|
| 20 |
]
|
sentence_bert_config.json
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
{
|
| 2 |
-
"max_seq_length":
|
| 3 |
"do_lower_case": false
|
| 4 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"max_seq_length": 128,
|
| 3 |
"do_lower_case": false
|
| 4 |
}
|
tokenizer.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
CHANGED
|
@@ -8,7 +8,7 @@
|
|
| 8 |
"single_word": false,
|
| 9 |
"special": true
|
| 10 |
},
|
| 11 |
-
"
|
| 12 |
"content": "[UNK]",
|
| 13 |
"lstrip": false,
|
| 14 |
"normalized": false,
|
|
@@ -16,7 +16,7 @@
|
|
| 16 |
"single_word": false,
|
| 17 |
"special": true
|
| 18 |
},
|
| 19 |
-
"
|
| 20 |
"content": "[CLS]",
|
| 21 |
"lstrip": false,
|
| 22 |
"normalized": false,
|
|
@@ -24,7 +24,7 @@
|
|
| 24 |
"single_word": false,
|
| 25 |
"special": true
|
| 26 |
},
|
| 27 |
-
"
|
| 28 |
"content": "[SEP]",
|
| 29 |
"lstrip": false,
|
| 30 |
"normalized": false,
|
|
@@ -32,62 +32,27 @@
|
|
| 32 |
"single_word": false,
|
| 33 |
"special": true
|
| 34 |
},
|
| 35 |
-
"
|
| 36 |
"content": "[MASK]",
|
| 37 |
"lstrip": false,
|
| 38 |
"normalized": false,
|
| 39 |
"rstrip": false,
|
| 40 |
"single_word": false,
|
| 41 |
"special": true
|
| 42 |
-
},
|
| 43 |
-
"5": {
|
| 44 |
-
"content": "[رابط]",
|
| 45 |
-
"lstrip": false,
|
| 46 |
-
"normalized": true,
|
| 47 |
-
"rstrip": false,
|
| 48 |
-
"single_word": true,
|
| 49 |
-
"special": true
|
| 50 |
-
},
|
| 51 |
-
"6": {
|
| 52 |
-
"content": "[بريد]",
|
| 53 |
-
"lstrip": false,
|
| 54 |
-
"normalized": true,
|
| 55 |
-
"rstrip": false,
|
| 56 |
-
"single_word": true,
|
| 57 |
-
"special": true
|
| 58 |
-
},
|
| 59 |
-
"7": {
|
| 60 |
-
"content": "[مستخدم]",
|
| 61 |
-
"lstrip": false,
|
| 62 |
-
"normalized": true,
|
| 63 |
-
"rstrip": false,
|
| 64 |
-
"single_word": true,
|
| 65 |
-
"special": true
|
| 66 |
}
|
| 67 |
},
|
| 68 |
-
"clean_up_tokenization_spaces":
|
| 69 |
"cls_token": "[CLS]",
|
| 70 |
"do_basic_tokenize": true,
|
| 71 |
"do_lower_case": false,
|
| 72 |
"mask_token": "[MASK]",
|
| 73 |
"max_len": 512,
|
| 74 |
-
"
|
| 75 |
-
"
|
| 76 |
-
"never_split": [
|
| 77 |
-
"[بريد]",
|
| 78 |
-
"[مستخدم]",
|
| 79 |
-
"[رابط]"
|
| 80 |
-
],
|
| 81 |
-
"pad_to_multiple_of": null,
|
| 82 |
"pad_token": "[PAD]",
|
| 83 |
-
"pad_token_type_id": 0,
|
| 84 |
-
"padding_side": "right",
|
| 85 |
"sep_token": "[SEP]",
|
| 86 |
-
"stride": 0,
|
| 87 |
"strip_accents": null,
|
| 88 |
"tokenize_chinese_chars": true,
|
| 89 |
-
"tokenizer_class": "
|
| 90 |
-
"truncation_side": "right",
|
| 91 |
-
"truncation_strategy": "longest_first",
|
| 92 |
"unk_token": "[UNK]"
|
| 93 |
}
|
|
|
|
| 8 |
"single_word": false,
|
| 9 |
"special": true
|
| 10 |
},
|
| 11 |
+
"100": {
|
| 12 |
"content": "[UNK]",
|
| 13 |
"lstrip": false,
|
| 14 |
"normalized": false,
|
|
|
|
| 16 |
"single_word": false,
|
| 17 |
"special": true
|
| 18 |
},
|
| 19 |
+
"101": {
|
| 20 |
"content": "[CLS]",
|
| 21 |
"lstrip": false,
|
| 22 |
"normalized": false,
|
|
|
|
| 24 |
"single_word": false,
|
| 25 |
"special": true
|
| 26 |
},
|
| 27 |
+
"102": {
|
| 28 |
"content": "[SEP]",
|
| 29 |
"lstrip": false,
|
| 30 |
"normalized": false,
|
|
|
|
| 32 |
"single_word": false,
|
| 33 |
"special": true
|
| 34 |
},
|
| 35 |
+
"103": {
|
| 36 |
"content": "[MASK]",
|
| 37 |
"lstrip": false,
|
| 38 |
"normalized": false,
|
| 39 |
"rstrip": false,
|
| 40 |
"single_word": false,
|
| 41 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
}
|
| 43 |
},
|
| 44 |
+
"clean_up_tokenization_spaces": false,
|
| 45 |
"cls_token": "[CLS]",
|
| 46 |
"do_basic_tokenize": true,
|
| 47 |
"do_lower_case": false,
|
| 48 |
"mask_token": "[MASK]",
|
| 49 |
"max_len": 512,
|
| 50 |
+
"model_max_length": 128,
|
| 51 |
+
"never_split": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
"pad_token": "[PAD]",
|
|
|
|
|
|
|
| 53 |
"sep_token": "[SEP]",
|
|
|
|
| 54 |
"strip_accents": null,
|
| 55 |
"tokenize_chinese_chars": true,
|
| 56 |
+
"tokenizer_class": "DistilBertTokenizer",
|
|
|
|
|
|
|
| 57 |
"unk_token": "[UNK]"
|
| 58 |
}
|
vocab.txt
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|