shallowblueQAQ commited on
Commit
754bfcf
·
verified ·
1 Parent(s): 900fbdb

init upload

Browse files
relevance_model/config.json ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/hpc_stor03/sjtu_home/minghao.lv/bert-large-uncased",
3
+ "architectures": [
4
+ "BertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "Anger_Irritability",
14
+ "1": "Anxious_Mood",
15
+ "2": "Autonomic_Respiratory_Cardiovascular_symptoms",
16
+ "3": "Decreased_energy_tiredness_fatigue",
17
+ "4": "Depressed_Mood",
18
+ "5": "Gastrointestinal_symptoms",
19
+ "6": "Genitourinary_sexual_symptoms",
20
+ "7": "Hyperactivity_agitation",
21
+ "8": "Impulsivity",
22
+ "9": "Inattention",
23
+ "10": "Suicidal_ideas",
24
+ "11": "Worthlessness_and_guilty",
25
+ "12": "avoidance_of_stimuli",
26
+ "13": "compensatory_behaviors_to_prevent_weight_gain",
27
+ "14": "compulsions",
28
+ "15": "diminished_emotional_expression",
29
+ "16": "risky_behaviors",
30
+ "17": "drastical_shift_in_mood_and_energy",
31
+ "18": "fear_of_gaining_weight",
32
+ "19": "fears_of_being_negatively_evaluated",
33
+ "20": "flight_of_ideas",
34
+ "21": "intrusion_symptoms",
35
+ "22": "loss_of_interest_or_motivation",
36
+ "23": "more_talktive",
37
+ "24": "obsession",
38
+ "25": "panic_fear",
39
+ "26": "poor_memory",
40
+ "27": "sleep_disturbance",
41
+ "28": "somatic_muscle",
42
+ "29": "Derealization&dissociation",
43
+ "30": "somatic_symptoms_sensory",
44
+ "31": "weight_and_appetite_change"
45
+ },
46
+ "initializer_range": 0.02,
47
+ "intermediate_size": 4096,
48
+ "label2id": {
49
+ "Anger_Irritability": 0,
50
+ "Anxious_Mood": 1,
51
+ "Autonomic_Respiratory_Cardiovascular_symptoms": 2,
52
+ "Decreased_energy_tiredness_fatigue": 3,
53
+ "Depressed_Mood": 4,
54
+ "Derealization&dissociation": 29,
55
+ "Gastrointestinal_symptoms": 5,
56
+ "Genitourinary_sexual_symptoms": 6,
57
+ "Hyperactivity_agitation": 7,
58
+ "Impulsivity": 8,
59
+ "Inattention": 9,
60
+ "Suicidal_ideas": 10,
61
+ "Worthlessness_and_guilty": 11,
62
+ "avoidance_of_stimuli": 12,
63
+ "compensatory_behaviors_to_prevent_weight_gain": 13,
64
+ "compulsions": 14,
65
+ "diminished_emotional_expression": 15,
66
+ "drastical_shift_in_mood_and_energy": 17,
67
+ "fear_of_gaining_weight": 18,
68
+ "fears_of_being_negatively_evaluated": 19,
69
+ "flight_of_ideas": 20,
70
+ "intrusion_symptoms": 21,
71
+ "loss_of_interest_or_motivation": 22,
72
+ "more_talktive": 23,
73
+ "obsession": 24,
74
+ "panic_fear": 25,
75
+ "poor_memory": 26,
76
+ "risky_behaviors": 16,
77
+ "sleep_disturbance": 27,
78
+ "somatic_muscle": 28,
79
+ "somatic_symptoms_sensory": 30,
80
+ "weight_and_appetite_change": 31
81
+ },
82
+ "layer_norm_eps": 1e-12,
83
+ "max_position_embeddings": 512,
84
+ "model_type": "bert",
85
+ "num_attention_heads": 16,
86
+ "num_hidden_layers": 24,
87
+ "pad_token_id": 0,
88
+ "position_embedding_type": "absolute",
89
+ "transformers_version": "4.32.0",
90
+ "type_vocab_size": 2,
91
+ "use_cache": true,
92
+ "vocab_size": 30522
93
+ }
relevance_model/model.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from transformers import AutoModel, PreTrainedModel, AutoConfig
4
+
5
+ # 你可以直接复用你原来的类,稍微改一下使其兼容 HF 的 save_pretrained 更好
6
+ # 但为了保持和你训练时完全一致,最简单的就是保留你原本的写法
7
+ class BERTDiseaseClassifier(nn.Module):
8
+ def __init__(self, model_type, num_symps) -> None:
9
+ super().__init__()
10
+ self.model_type = model_type
11
+ self.num_symps = num_symps
12
+ # multi-label binary classification
13
+ self.encoder = AutoModel.from_pretrained(model_type)
14
+ self.dropout = nn.Dropout(self.encoder.config.hidden_dropout_prob)
15
+ self.clf = nn.Linear(self.encoder.config.hidden_size, num_symps)
16
+
17
+ def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, **kwargs):
18
+ outputs = self.encoder(input_ids, attention_mask, token_type_ids)
19
+ # 保持和你训练时完全一致的逻辑
20
+ x = outputs.last_hidden_state[:, 0, :] # [CLS] pooling
21
+ x = self.dropout(x)
22
+ logits = self.clf(x)
23
+ return logits
relevance_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6eba915776904acff969aa0d0d9783c4ca201a77c43c312b12b0637809192a4
3
+ size 1340854767
relevance_model/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
relevance_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
relevance_model/tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
+ }
relevance_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
status_model/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mental/mental-bert-base-uncased",
3
+ "architectures": [
4
+ "BertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "uncertainty"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 3072,
17
+ "label2id": {
18
+ "uncertainty": 0
19
+ },
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "bert",
23
+ "num_attention_heads": 12,
24
+ "num_hidden_layers": 12,
25
+ "pad_token_id": 0,
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.46.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 30522
32
+ }
status_model/model.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from transformers import AutoModel
4
+
5
+ class BERTDiseaseClassifier(nn.Module):
6
+ def __init__(self, model_type, num_symps) -> None:
7
+ super().__init__()
8
+ self.model_type = model_type
9
+ self.num_symps = num_symps
10
+ self.encoder = AutoModel.from_pretrained(model_type)
11
+ self.dropout = nn.Dropout(self.encoder.config.hidden_dropout_prob)
12
+ self.clf = nn.Linear(self.encoder.config.hidden_size, num_symps)
13
+
14
+ def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, **kwargs):
15
+ outputs = self.encoder(input_ids, attention_mask, token_type_ids)
16
+ x = outputs.last_hidden_state[:, 0, :] # [CLS] pooling
17
+ x = self.dropout(x)
18
+ logits = self.clf(x)
19
+ return logits
status_model/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9354ffb5cc6245b402257af26070a6bf0a120ce417060c9a03a612e36e71b8b0
3
+ size 438012913
status_model/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
status_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
status_model/tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
status_model/vocab.txt ADDED
The diff for this file is too large to render. See raw diff