Upload 9 files
Browse files- README.md +1 -10
- added_tokens.json +3 -0
- bpe.codes +0 -0
- config.json +30 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +9 -0
- tokenizer_config.json +12 -0
- vocab.txt +0 -0
README.md
CHANGED
|
@@ -1,10 +1 @@
|
|
| 1 |
-
|
| 2 |
-
license: apache-2.0
|
| 3 |
-
language:
|
| 4 |
-
- vi
|
| 5 |
-
metrics:
|
| 6 |
-
- recall
|
| 7 |
-
pipeline_tag: sentence-similarity
|
| 8 |
-
tags:
|
| 9 |
-
- general
|
| 10 |
-
---
|
|
|
|
| 1 |
+
Storing intermediate result in here only. For long term, it should be stored in model repository separately. Besides binary model, you should also store model metadata such as date, size of training data.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
added_tokens.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"<mask>": 64000
|
| 3 |
+
}
|
bpe.codes
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
config.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "linhpn/LinhCSE_training",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"BiencoderRobertaModel"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.1,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"classifier_dropout": null,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"finetuning_task": "word-level",
|
| 11 |
+
"gradient_checkpointing": false,
|
| 12 |
+
"hidden_act": "gelu",
|
| 13 |
+
"hidden_dropout_prob": 0.1,
|
| 14 |
+
"hidden_size": 768,
|
| 15 |
+
"initializer_range": 0.02,
|
| 16 |
+
"intermediate_size": 3072,
|
| 17 |
+
"layer_norm_eps": 1e-05,
|
| 18 |
+
"max_position_embeddings": 258,
|
| 19 |
+
"model_type": "roberta",
|
| 20 |
+
"num_attention_heads": 12,
|
| 21 |
+
"num_hidden_layers": 12,
|
| 22 |
+
"pad_token_id": 1,
|
| 23 |
+
"position_embedding_type": "absolute",
|
| 24 |
+
"tokenizer_class": "PhobertTokenizer",
|
| 25 |
+
"torch_dtype": "float32",
|
| 26 |
+
"transformers_version": "4.35.2",
|
| 27 |
+
"type_vocab_size": 1,
|
| 28 |
+
"use_cache": true,
|
| 29 |
+
"vocab_size": 64001
|
| 30 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bfe21aeeedbee344b7004ff116173ed595a5a11d9cc2bfca853dd85bd80b805d
|
| 3 |
+
size 542641988
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8582283d5194cc25e734de966a732f1020caef3cd64ec13f8d433597fdb79853
|
| 3 |
+
size 542688167
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<s>",
|
| 3 |
+
"cls_token": "<s>",
|
| 4 |
+
"eos_token": "</s>",
|
| 5 |
+
"mask_token": "<mask>",
|
| 6 |
+
"pad_token": "<pad>",
|
| 7 |
+
"sep_token": "</s>",
|
| 8 |
+
"unk_token": "<unk>"
|
| 9 |
+
}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<s>",
|
| 3 |
+
"clean_up_tokenization_spaces": true,
|
| 4 |
+
"cls_token": "<s>",
|
| 5 |
+
"eos_token": "</s>",
|
| 6 |
+
"mask_token": "<mask>",
|
| 7 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 8 |
+
"pad_token": "<pad>",
|
| 9 |
+
"sep_token": "</s>",
|
| 10 |
+
"tokenizer_class": "PhobertTokenizer",
|
| 11 |
+
"unk_token": "<unk>"
|
| 12 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|