Labib11 commited on
Commit
c275ebf
·
verified ·
1 Parent(s): aacb05f

Upload folder using huggingface_hub

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 1024,
3
+ "pooling_mode_cls_token": true,
4
+ "pooling_mode_mean_tokens": false,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
README.md CHANGED
@@ -1,3 +1,57 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: sentence-transformers
3
+ pipeline_tag: sentence-similarity
4
+ tags:
5
+ - sentence-transformers
6
+ - feature-extraction
7
+ - sentence-similarity
8
+
9
+ ---
10
+
11
+ # {MODEL_NAME}
12
+
13
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search.
14
+
15
+ <!--- Describe your model here -->
16
+
17
+ ## Usage (Sentence-Transformers)
18
+
19
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
20
+
21
+ ```
22
+ pip install -U sentence-transformers
23
+ ```
24
+
25
+ Then you can use the model like this:
26
+
27
+ ```python
28
+ from sentence_transformers import SentenceTransformer
29
+ sentences = ["This is an example sentence", "Each sentence is converted"]
30
+
31
+ model = SentenceTransformer('{MODEL_NAME}')
32
+ embeddings = model.encode(sentences)
33
+ print(embeddings)
34
+ ```
35
+
36
+
37
+
38
+ ## Evaluation Results
39
+
40
+ <!--- Describe how your model was evaluated -->
41
+
42
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
43
+
44
+
45
+
46
+ ## Full Model Architecture
47
+ ```
48
+ SentenceTransformer(
49
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
50
+ (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
51
+ (2): Normalize()
52
+ )
53
+ ```
54
+
55
+ ## Citing & Authors
56
+
57
+ <!--- Describe where people can find more information -->
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./output_final_bge/checkpoint-1600",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 4096,
17
+ "label2id": {
18
+ "LABEL_0": 0
19
+ },
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "bert",
23
+ "num_attention_heads": 16,
24
+ "num_hidden_layers": 24,
25
+ "pad_token_id": 0,
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.40.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 30522
32
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.7.0",
4
+ "transformers": "4.40.2",
5
+ "pytorch": "2.2.0+cu121"
6
+ },
7
+ "prompts": {},
8
+ "default_prompt_name": null
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa9c8d33d4a75ec79eee12edad70edc5fce3a49332604d5211ed5dbae30cee2e
3
+ size 1340612432
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19a10c6a1e7dcb1f7668fa8bdf0ff40ad9db6deb3cde8ca4a45d7d63696073ce
3
+ size 2673063841
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a3d253909d46c30158737382971501f3e075fc208b9496101ccb69908bb3e0
3
+ size 15984
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b868aac597c798c1e10948b0494bfd4d580309159a86f5157c162d97f12e295
3
+ size 15984
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5f5163efa1e323085d4bf282843646e078964b4458c8f2d77af9d64c8fc1b78
3
+ size 15984
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e2cf4e4983f11de7fe56883059b5665b571feff0b789baa0c2040ef90c10273
3
+ size 15984
rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf56f8480df876f1ec7b9b4e4321292cf1013e4b22ea52c61ec00b3234eef0e4
3
+ size 15984
rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcaae17ca290e776d450a6f6ce0a1fa1f2fa8d170d725a2a90b0d907cec52ca6
3
+ size 15984
rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c12c03c99f53c9a7192230ae45206237f1a67e22b5bd0b9445c92d92dafd0473
3
+ size 15984
rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4d3cdb156a69c8d9e49ea49bb834598bb7a8736c3c74923144fac6b98b9c1ea
3
+ size 15984
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:166c070a1eed4da8d7b039b8187bd37aff8165f700e5070e8a8d4c177a33ef4a
3
+ size 1064
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "never_split": null,
51
+ "pad_token": "[PAD]",
52
+ "sep_token": "[SEP]",
53
+ "strip_accents": null,
54
+ "tokenize_chinese_chars": true,
55
+ "tokenizer_class": "BertTokenizer",
56
+ "unk_token": "[UNK]"
57
+ }
trainer_state.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 11.940298507462687,
5
+ "eval_steps": 500,
6
+ "global_step": 1600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.746268656716418,
13
+ "grad_norm": 0.2015659064054489,
14
+ "learning_rate": 3.73134328358209e-06,
15
+ "loss": 0.1198,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 1.4925373134328357,
20
+ "grad_norm": 0.16025546193122864,
21
+ "learning_rate": 7.46268656716418e-06,
22
+ "loss": 0.0693,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 2.2388059701492535,
27
+ "grad_norm": 0.1018073558807373,
28
+ "learning_rate": 9.867330016583748e-06,
29
+ "loss": 0.0496,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 2.9850746268656714,
34
+ "grad_norm": 0.100601427257061,
35
+ "learning_rate": 9.45273631840796e-06,
36
+ "loss": 0.0341,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 3.7313432835820897,
41
+ "grad_norm": 0.0992753654718399,
42
+ "learning_rate": 9.038142620232173e-06,
43
+ "loss": 0.0216,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 4.477611940298507,
48
+ "grad_norm": 0.08199736475944519,
49
+ "learning_rate": 8.623548922056384e-06,
50
+ "loss": 0.0187,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 5.223880597014926,
55
+ "grad_norm": 0.08397415280342102,
56
+ "learning_rate": 8.208955223880599e-06,
57
+ "loss": 0.0166,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 5.970149253731344,
62
+ "grad_norm": 0.13193649053573608,
63
+ "learning_rate": 7.79436152570481e-06,
64
+ "loss": 0.013,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 6.7164179104477615,
69
+ "grad_norm": 0.06592541933059692,
70
+ "learning_rate": 7.3797678275290215e-06,
71
+ "loss": 0.0109,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 7.462686567164179,
76
+ "grad_norm": 0.0223555825650692,
77
+ "learning_rate": 6.965174129353235e-06,
78
+ "loss": 0.0105,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 8.208955223880597,
83
+ "grad_norm": 0.021410465240478516,
84
+ "learning_rate": 6.550580431177447e-06,
85
+ "loss": 0.0098,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 8.955223880597014,
90
+ "grad_norm": 0.09453420341014862,
91
+ "learning_rate": 6.135986733001659e-06,
92
+ "loss": 0.0091,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 9.701492537313433,
97
+ "grad_norm": 0.06435668468475342,
98
+ "learning_rate": 5.721393034825871e-06,
99
+ "loss": 0.0084,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 10.447761194029852,
104
+ "grad_norm": 0.09910629689693451,
105
+ "learning_rate": 5.306799336650083e-06,
106
+ "loss": 0.0088,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 11.194029850746269,
111
+ "grad_norm": 0.03590040281414986,
112
+ "learning_rate": 4.892205638474296e-06,
113
+ "loss": 0.0071,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 11.940298507462687,
118
+ "grad_norm": 0.04040609300136566,
119
+ "learning_rate": 4.477611940298508e-06,
120
+ "loss": 0.0073,
121
+ "step": 1600
122
+ }
123
+ ],
124
+ "logging_steps": 100,
125
+ "max_steps": 2680,
126
+ "num_input_tokens_seen": 0,
127
+ "num_train_epochs": 20,
128
+ "save_steps": 800,
129
+ "total_flos": 0.0,
130
+ "train_batch_size": 8,
131
+ "trial_name": null,
132
+ "trial_params": null
133
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3da4c15b601d6f576dcd179cd8b78a831ce9b72d07fa3dd500671fb02c60c38b
3
+ size 5240
vocab.txt ADDED
The diff for this file is too large to render. See raw diff