bongsoo commited on
Commit
c039ea6
·
1 Parent(s): fcb9f03

Bert-base-kor-v1 처음 업로드

Browse files
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[BOS]": 30000,
3
+ "[EOS]": 30001,
4
+ "[UNK0]": 30002,
5
+ "[UNK1]": 30003,
6
+ "[UNK2]": 30004,
7
+ "[UNK3]": 30005,
8
+ "[UNK4]": 30006,
9
+ "[UNK5]": 30007,
10
+ "[UNK6]": 30008,
11
+ "[UNK7]": 30009,
12
+ "[UNK8]": 30010,
13
+ "[UNK9]": 30011,
14
+ "[unused0]": 30012,
15
+ "[unused1]": 30013,
16
+ "[unused2]": 30014,
17
+ "[unused3]": 30015,
18
+ "[unused4]": 30016,
19
+ "[unused5]": 30017,
20
+ "[unused6]": 30018,
21
+ "[unused7]": 30019,
22
+ "[unused8]": 30020,
23
+ "[unused9]": 30021
24
+ }
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-12,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "bert",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 12,
17
+ "pad_token_id": 0,
18
+ "position_embedding_type": "absolute",
19
+ "torch_dtype": "float32",
20
+ "transformers_version": "4.21.2",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 30022
24
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebbc598cc90a9b72b01580050d0302587ffe0c2b96f0822d3c64819220ef56fd
3
+ size 438960547
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[BOS]",
4
+ "[EOS]",
5
+ "[UNK0]",
6
+ "[UNK1]",
7
+ "[UNK2]",
8
+ "[UNK3]",
9
+ "[UNK4]",
10
+ "[UNK5]",
11
+ "[UNK6]",
12
+ "[UNK7]",
13
+ "[UNK8]",
14
+ "[UNK9]",
15
+ "[unused0]",
16
+ "[unused1]",
17
+ "[unused2]",
18
+ "[unused3]",
19
+ "[unused4]",
20
+ "[unused5]",
21
+ "[unused6]",
22
+ "[unused7]",
23
+ "[unused8]",
24
+ "[unused9]"
25
+ ],
26
+ "cls_token": "[CLS]",
27
+ "mask_token": "[MASK]",
28
+ "pad_token": "[PAD]",
29
+ "sep_token": "[SEP]",
30
+ "unk_token": "[UNK]"
31
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_basic_tokenize": true,
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
6
+ "max_len": 128,
7
+ "name_or_path": "../../data11/ai_hub/vocab/tl1-1줄-mecab-30000",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "special_tokens_map_file": "../../data11/ai_hub/vocab/tl1-1줄-mecab-30000/special_tokens_map.json",
12
+ "strip_accents": false,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff