aimanfadillah commited on
Commit
8c9d8ec
·
verified ·
1 Parent(s): 64270b3

Replace model with new fine-tuned weights (clean upload)

Browse files
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -20,7 +20,7 @@ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [a
20
 
21
  ### Model Description
22
  - **Model Type:** Sentence Transformer
23
- - **Base model:** [aimanfadillah/standardized](https://huggingface.co/aimanfadillah/standardized) <!-- at revision 99342a16957cff5b91e97ff545519d79d95d3404 -->
24
  - **Maximum Sequence Length:** 256 tokens
25
  - **Output Dimensionality:** 384 dimensions
26
  - **Similarity Function:** Cosine Similarity
@@ -73,9 +73,9 @@ print(embeddings.shape)
73
  # Get the similarity scores for the embeddings
74
  similarities = model.similarity(embeddings, embeddings)
75
  print(similarities)
76
- # tensor([[1.0000, 0.6633, 0.1006],
77
- # [0.6633, 1.0000, 0.1382],
78
- # [0.1006, 0.1382, 1.0000]])
79
  ```
80
 
81
  <!--
@@ -130,8 +130,8 @@ You can finetune this model on your own dataset.
130
  * Samples:
131
  | sentence_0 | sentence_1 | label |
132
  |:---------------------------|:-------------------------------|:-----------------|
133
- | <code>Lock function</code> | <code>Control Lock</code> | <code>0.8</code> |
134
  | <code>Cooling Type</code> | <code>Cooling System</code> | <code>0.8</code> |
 
135
  | <code>Interior lamp</code> | <code>Interior Lighting</code> | <code>0.8</code> |
136
  * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
137
  ```json
 
20
 
21
  ### Model Description
22
  - **Model Type:** Sentence Transformer
23
+ - **Base model:** [aimanfadillah/standardized](https://huggingface.co/aimanfadillah/standardized) <!-- at revision 64270b32fa90ff5b0fcdc6621a9a6f1522a5df55 -->
24
  - **Maximum Sequence Length:** 256 tokens
25
  - **Output Dimensionality:** 384 dimensions
26
  - **Similarity Function:** Cosine Similarity
 
73
  # Get the similarity scores for the embeddings
74
  similarities = model.similarity(embeddings, embeddings)
75
  print(similarities)
76
+ # tensor([[1.0000, 0.6651, 0.1016],
77
+ # [0.6651, 1.0000, 0.1394],
78
+ # [0.1016, 0.1394, 1.0000]])
79
  ```
80
 
81
  <!--
 
130
  * Samples:
131
  | sentence_0 | sentence_1 | label |
132
  |:---------------------------|:-------------------------------|:-----------------|
 
133
  | <code>Cooling Type</code> | <code>Cooling System</code> | <code>0.8</code> |
134
+ | <code>Lock function</code> | <code>Control Lock</code> | <code>0.8</code> |
135
  | <code>Interior lamp</code> | <code>Interior Lighting</code> | <code>0.8</code> |
136
  * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
137
  ```json
eval/similarity_evaluation_en-dev_results.csv DELETED
@@ -1,7 +0,0 @@
1
- epoch,steps,cosine_pearson,cosine_spearman
2
- 1.0,1,1.0,0.9999999999999999
3
- 2.0,2,1.0,0.9999999999999999
4
- 1.0,1,1.0,0.9999999999999999
5
- 2.0,2,1.0,0.9999999999999999
6
- 1.0,1,1.0,0.9999999999999999
7
- 2.0,2,1.0,0.9999999999999999
 
 
 
 
 
 
 
 
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd3ba5220ff6e775cc61a49c7e32aab1da16142ef23630d0240479048010af48
3
  size 90864192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:461f5585c129900516f316afb513a5b06ea0737a0b54e6a6a514175351b145cc
3
  size 90864192
onnx/config.json DELETED
@@ -1,26 +0,0 @@
1
- {
2
- "architectures": [
3
- "BertModel"
4
- ],
5
- "attention_probs_dropout_prob": 0.1,
6
- "classifier_dropout": null,
7
- "dtype": "float32",
8
- "gradient_checkpointing": false,
9
- "hidden_act": "gelu",
10
- "hidden_dropout_prob": 0.1,
11
- "hidden_size": 384,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 1536,
14
- "layer_norm_eps": 1e-12,
15
- "max_position_embeddings": 512,
16
- "model_type": "bert",
17
- "num_attention_heads": 12,
18
- "num_hidden_layers": 6,
19
- "pad_token_id": 0,
20
- "position_embedding_type": "absolute",
21
- "torch_dtype": "float32",
22
- "transformers_version": "4.53.3",
23
- "type_vocab_size": 2,
24
- "use_cache": true,
25
- "vocab_size": 30522
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
onnx/model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77a56b55864b3adfe8567be61fa709626fbd094f7be81268bd15f47d69dff444
3
  size 90384556
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79c1e0b9d6357e926265a8eac1a517b23f65cd30efea4524d334d8058d16fe32
3
  size 90384556
onnx/special_tokens_map.json DELETED
@@ -1,37 +0,0 @@
1
- {
2
- "cls_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "mask_token": {
10
- "content": "[MASK]",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": {
17
- "content": "[PAD]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "sep_token": {
24
- "content": "[SEP]",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "unk_token": {
31
- "content": "[UNK]",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- }
37
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
onnx/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
onnx/tokenizer_config.json DELETED
@@ -1,65 +0,0 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "[PAD]",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "100": {
12
- "content": "[UNK]",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "101": {
20
- "content": "[CLS]",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
- "102": {
28
- "content": "[SEP]",
29
- "lstrip": false,
30
- "normalized": false,
31
- "rstrip": false,
32
- "single_word": false,
33
- "special": true
34
- },
35
- "103": {
36
- "content": "[MASK]",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- }
43
- },
44
- "clean_up_tokenization_spaces": false,
45
- "cls_token": "[CLS]",
46
- "do_basic_tokenize": true,
47
- "do_lower_case": true,
48
- "extra_special_tokens": {},
49
- "mask_token": "[MASK]",
50
- "max_length": 128,
51
- "model_max_length": 256,
52
- "never_split": null,
53
- "pad_to_multiple_of": null,
54
- "pad_token": "[PAD]",
55
- "pad_token_type_id": 0,
56
- "padding_side": "right",
57
- "sep_token": "[SEP]",
58
- "stride": 0,
59
- "strip_accents": null,
60
- "tokenize_chinese_chars": true,
61
- "tokenizer_class": "BertTokenizer",
62
- "truncation_side": "right",
63
- "truncation_strategy": "longest_first",
64
- "unk_token": "[UNK]"
65
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
onnx/vocab.txt DELETED
The diff for this file is too large to render. See raw diff