muruga778 commited on
Commit
3970f96
·
verified ·
1 Parent(s): ca4ae78

Upload 4 files

Browse files
fusion_config (2).json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "fusion_weight_image": 0.8
3
+ }
label_map (4).json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "classes": [
3
+ "acneiform",
4
+ "allergic",
5
+ "drug_rash",
6
+ "infectious",
7
+ "inflammatory",
8
+ "other",
9
+ "pigmentary_vascular",
10
+ "unknown"
11
+ ],
12
+ "label2idx": {
13
+ "acneiform": 0,
14
+ "allergic": 1,
15
+ "drug_rash": 2,
16
+ "infectious": 3,
17
+ "inflammatory": 4,
18
+ "other": 5,
19
+ "pigmentary_vascular": 6,
20
+ "unknown": 7
21
+ },
22
+ "idx2label": {
23
+ "0": "acneiform",
24
+ "1": "allergic",
25
+ "2": "drug_rash",
26
+ "3": "infectious",
27
+ "4": "inflammatory",
28
+ "5": "other",
29
+ "6": "pigmentary_vascular",
30
+ "7": "unknown"
31
+ }
32
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": true,
5
+ "is_local": false,
6
+ "mask_token": "[MASK]",
7
+ "model_max_length": 512,
8
+ "pad_token": "[PAD]",
9
+ "sep_token": "[SEP]",
10
+ "strip_accents": null,
11
+ "tokenize_chinese_chars": true,
12
+ "tokenizer_class": "BertTokenizer",
13
+ "unk_token": "[UNK]"
14
+ }