abolton99 commited on
Commit
fbb23d9
·
1 Parent(s): 5dbfad4

Add SetFit model

Browse files
README.md CHANGED
@@ -7,7 +7,7 @@ tags:
7
  pipeline_tag: text-classification
8
  ---
9
 
10
- # /var/folders/gr/47hycvx13rd_q25kzttvfx6h0000gn/T/tmpvau77om3/abolton99/test
11
 
12
  This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
13
 
@@ -28,7 +28,7 @@ You can then run inference as follows:
28
  from setfit import SetFitModel
29
 
30
  # Download from Hub and run inference
31
- model = SetFitModel.from_pretrained("/var/folders/gr/47hycvx13rd_q25kzttvfx6h0000gn/T/tmpvau77om3/abolton99/test")
32
  # Run inference
33
  preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"])
34
  ```
 
7
  pipeline_tag: text-classification
8
  ---
9
 
10
+ # /var/folders/gr/47hycvx13rd_q25kzttvfx6h0000gn/T/tmpf6is84p9/abolton99/test
11
 
12
  This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:
13
 
 
28
  from setfit import SetFitModel
29
 
30
  # Download from Hub and run inference
31
+ model = SetFitModel.from_pretrained("/var/folders/gr/47hycvx13rd_q25kzttvfx6h0000gn/T/tmpf6is84p9/abolton99/test")
32
  # Run inference
33
  preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"])
34
  ```
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/Users/angusbolton/.cache/torch/sentence_transformers/abolton99_test/",
3
  "architectures": [
4
  "MPNetModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "/Users/angusbolton/.cache/torch/sentence_transformers/sentence-transformers_paraphrase-mpnet-base-v2/",
3
  "architectures": [
4
  "MPNetModel"
5
  ],
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d1b19479ff9e943f63264e73bd65d37eaf00c717f0438cdd10a606156960dae
3
  size 437967672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc5f1fed63fa640aa30abc7cbc2b9b4cb7873bf2a9129ac38e0f7e8d1e346f17
3
  size 437967672
model_head.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:390430da931c0415cc0cbe87782cb2ea3ed945470d449b4b18924f9d7f77a029
3
  size 6991
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fcdde264b38f9c733ee3e53f1edaf67ad5e9d0c7d40aaea180a269fd5441be9
3
  size 6991
special_tokens_map.json CHANGED
@@ -9,7 +9,7 @@
9
  "cls_token": {
10
  "content": "<s>",
11
  "lstrip": false,
12
- "normalized": false,
13
  "rstrip": false,
14
  "single_word": false
15
  },
@@ -37,7 +37,7 @@
37
  "sep_token": {
38
  "content": "</s>",
39
  "lstrip": false,
40
- "normalized": false,
41
  "rstrip": false,
42
  "single_word": false
43
  },
 
9
  "cls_token": {
10
  "content": "<s>",
11
  "lstrip": false,
12
+ "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
 
37
  "sep_token": {
38
  "content": "</s>",
39
  "lstrip": false,
40
+ "normalized": true,
41
  "rstrip": false,
42
  "single_word": false
43
  },
tokenizer_config.json CHANGED
@@ -48,19 +48,12 @@
48
  "do_lower_case": true,
49
  "eos_token": "</s>",
50
  "mask_token": "<mask>",
51
- "max_length": 512,
52
  "model_max_length": 512,
53
  "never_split": null,
54
- "pad_to_multiple_of": null,
55
  "pad_token": "<pad>",
56
- "pad_token_type_id": 0,
57
- "padding_side": "right",
58
  "sep_token": "</s>",
59
- "stride": 0,
60
  "strip_accents": null,
61
  "tokenize_chinese_chars": true,
62
  "tokenizer_class": "MPNetTokenizer",
63
- "truncation_side": "right",
64
- "truncation_strategy": "longest_first",
65
  "unk_token": "[UNK]"
66
  }
 
48
  "do_lower_case": true,
49
  "eos_token": "</s>",
50
  "mask_token": "<mask>",
 
51
  "model_max_length": 512,
52
  "never_split": null,
 
53
  "pad_token": "<pad>",
 
 
54
  "sep_token": "</s>",
 
55
  "strip_accents": null,
56
  "tokenize_chinese_chars": true,
57
  "tokenizer_class": "MPNetTokenizer",
 
 
58
  "unk_token": "[UNK]"
59
  }