areegtarek commited on
Commit
3d86bad
·
verified ·
1 Parent(s): 9489a13

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/yu.xin/weishao/Med3DVLM/output/DCFormer_SigLIP",
3
+ "architectures": [
4
+ "DEC_CLIP"
5
+ ],
6
+ "bias": 0.0,
7
+ "depth": 12,
8
+ "dim": 768,
9
+ "efficient_loss": false,
10
+ "gather_loss": true,
11
+ "hidden_size": 768,
12
+ "input_size": [
13
+ 256,
14
+ 256,
15
+ 128
16
+ ],
17
+ "language_model_name_or_path": "medicalai/ClinicalBERT",
18
+ "local_loss": false,
19
+ "loss_type": "sigmoid",
20
+ "mlp_depth": 2,
21
+ "model_type": "dec_clip",
22
+ "pretrained_model": null,
23
+ "siglip_margin": 0.1,
24
+ "t_prime": 2.659260036932778,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.48.3",
27
+ "vision_encoder": "dcformer"
28
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97724fdf2f3b95d871032d71c9799e156b5dd0dc70ed4ff361563588e1f5318b
3
+ size 616820064
pretrained_ViT.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d423383d8ecd375304602e0fdfd3dabcc8144d66c5a5546d6db5bb4c898c415d
3
+ size 73248672
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "full_tokenizer_file": null,
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "DistilBertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
vocab.txt ADDED
The diff for this file is too large to render. See raw diff