SkywalkerLu commited on
Commit
f98d04c
·
verified ·
1 Parent(s): 79e0f00

Update modeling_transhla2.py

Browse files
Files changed (1) hide show
  1. modeling_transhla2.py +176 -176
modeling_transhla2.py CHANGED
@@ -1,176 +1,176 @@
1
- import torch
2
- import torch.nn as nn
3
- from transformers import PreTrainedModel, PretrainedConfig
4
-
5
- from peft import LoraConfig, get_peft_model, TaskType
6
- from transformers import EsmModel
7
-
8
- class TransHLA2Config(PretrainedConfig):
9
- model_type = "transhla2"
10
- def __init__(self, d_model=480, **kwargs):
11
- super().__init__(**kwargs)
12
- self.d_model = d_model
13
- # 可加入其它自定义参数
14
-
15
- class LoraESM(nn.Module):
16
- def __init__(self, d_model=480):
17
- super().__init__()
18
- self.model_name_or_path = "facebook/esm2_t12_35M_UR50D"
19
- self.tokenizer_name_or_path = "facebook/esm2_t12_35M_UR50D"
20
- self.peft_config = LoraConfig(
21
- target_modules=['query', 'out_proj', 'value', 'key', 'dense', 'regression'],
22
- task_type=TaskType.FEATURE_EXTRACTION,
23
- inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
24
- )
25
- self.esm = EsmModel.from_pretrained(self.model_name_or_path)
26
- self.lora_esm = get_peft_model(self.esm, self.peft_config)
27
- self.fc_task = nn.Sequential(
28
- nn.Linear(d_model, d_model // 4),
29
- nn.BatchNorm1d(d_model // 4),
30
- nn.Dropout(0.2),
31
- nn.SiLU(),
32
- nn.Linear(d_model // 4, 32),
33
- nn.BatchNorm1d(32),
34
- )
35
- self.classifier = nn.Linear(32, 2)
36
-
37
- def forward(self, x_in):
38
- lora_outputs = self.lora_esm(x_in)
39
- last_hidden_state = lora_outputs.last_hidden_state
40
- out_linear = last_hidden_state.mean(dim=1)
41
- H = self.fc_task(out_linear)
42
- output = self.classifier(H)
43
- return output, last_hidden_state
44
- lora_esm = LoraESM()
45
- lora_esm.load_state_dict(torch.load('Lora_ESM.pt'))
46
- class TransHLA2(PreTrainedModel):
47
- config_class = TransHLA2Config
48
-
49
- def __init__(self, config):
50
- super().__init__(config)
51
- n_layers = 4
52
- n_head = 8
53
- d_model = config.d_model
54
- d_ff = 64
55
- cnn_num_channel = 256
56
- region_embedding_size = 3
57
- cnn_kernel_size = 3
58
- cnn_padding_size = 1
59
- cnn_stride = 1
60
- pooling_size = 2
61
-
62
- self.lora_esm = lora_esm
63
-
64
- self.region_cnn1 = nn.Conv1d(d_model, cnn_num_channel, region_embedding_size)
65
- self.region_cnn2 = nn.Conv1d(d_model, cnn_num_channel, region_embedding_size)
66
- self.padding1 = nn.ConstantPad1d((1, 1), 0)
67
- self.padding2 = nn.ConstantPad1d((0, 1), 0)
68
- self.relu = nn.SiLU()
69
- self.cnn1 = nn.Conv1d(cnn_num_channel, cnn_num_channel, kernel_size=cnn_kernel_size,
70
- padding=cnn_padding_size, stride=cnn_stride)
71
- self.cnn2 = nn.Conv1d(cnn_num_channel, cnn_num_channel, kernel_size=cnn_kernel_size,
72
- padding=cnn_padding_size, stride=cnn_stride)
73
- self.maxpooling = nn.MaxPool1d(kernel_size=pooling_size)
74
- self.epitope_transformer_layers = nn.TransformerEncoderLayer(
75
- d_model=d_model, nhead=n_head, dim_feedforward=d_ff, dropout=0.2)
76
- self.epitope_transformer_encoder = nn.TransformerEncoder(
77
- self.epitope_transformer_layers, num_layers=n_layers)
78
- self.hla_transformer_layers = nn.TransformerEncoderLayer(
79
- d_model=d_model, nhead=n_head, dim_feedforward=d_ff, dropout=0.2)
80
- self.hla_transformer_encoder = nn.TransformerEncoder(
81
- self.hla_transformer_layers, num_layers=n_layers)
82
-
83
- # Cross Attention layers
84
- self.cross_attention_epitope_layers = nn.ModuleList(
85
- [nn.MultiheadAttention(d_model, n_head, dropout=0.2) for _ in range(4)])
86
- self.cross_attention_hla_layers = nn.ModuleList(
87
- [nn.MultiheadAttention(d_model, n_head, dropout=0.2) for _ in range(4)])
88
-
89
- self.bn1 = nn.BatchNorm1d(cnn_num_channel)
90
- self.bn2 = nn.BatchNorm1d(cnn_num_channel)
91
- self.fc_task = nn.Sequential(
92
- nn.Linear(2*d_model + 2*cnn_num_channel, 2 * (d_model + cnn_num_channel) // 4),
93
- nn.BatchNorm1d(2 * (d_model + cnn_num_channel) // 4),
94
- nn.Dropout(0.2),
95
- nn.SiLU(),
96
- nn.Linear(2 * (d_model + cnn_num_channel) // 4, 96),
97
- nn.BatchNorm1d(96),
98
- )
99
- self.classifier = nn.Linear(96, 2)
100
-
101
- def cnn_block1(self, x):
102
- return self.cnn1(self.relu(x))
103
-
104
- def cnn_block2(self, x):
105
- x = self.padding2(x)
106
- px = self.maxpooling(x)
107
- x = self.relu(px)
108
- x = self.cnn1(x)
109
- x = self.relu(x)
110
- x = self.cnn1(x)
111
- x = px + x
112
- return x
113
-
114
- def structure_block1(self, x):
115
- return self.cnn2(self.relu(x))
116
-
117
- def structure_block2(self, x):
118
- x = self.padding2(x)
119
- px = self.maxpooling(x)
120
- x = self.relu(px)
121
- x = self.cnn2(x)
122
- x = self.relu(x)
123
- x = self.cnn2(x)
124
- x = px + x
125
- return x
126
-
127
- def forward(self, epitope_in, hla_in):
128
- _, epitope_emb = self.lora_esm(epitope_in)
129
- _, hla_emb = self.lora_esm(hla_in)
130
-
131
- epitope_trans = self.epitope_transformer_encoder(epitope_emb.transpose(0, 1))
132
- hla_trans = self.hla_transformer_encoder(hla_emb.transpose(0, 1))
133
-
134
- # Cross Attention layers
135
- for cross_attention_epitope, cross_attention_hla in zip(self.cross_attention_epitope_layers, self.cross_attention_hla_layers):
136
- epitope_trans, _ = cross_attention_epitope(epitope_trans, hla_trans, hla_trans)
137
- hla_trans, _ = cross_attention_hla(hla_trans, epitope_trans, epitope_trans)
138
-
139
- # Mean Pooling
140
- epitope_mean = epitope_trans.mean(dim=0)
141
- hla_mean = hla_trans.mean(dim=0)
142
-
143
- epitope_cnn_emb = self.region_cnn1(epitope_emb.transpose(1, 2))
144
- epitope_cnn_emb = self.padding1(epitope_cnn_emb)
145
- conv = epitope_cnn_emb + self.cnn_block1(self.cnn_block1(epitope_cnn_emb))
146
- while conv.size(-1) >= 2:
147
- conv = self.cnn_block2(conv)
148
- epitope_cnn_out = torch.squeeze(conv, dim=-1)
149
- epitope_cnn_out = self.bn1(epitope_cnn_out)
150
-
151
- hla_cnn_emb = self.region_cnn2(hla_emb.transpose(1, 2))
152
- hla_cnn_emb = self.padding1(hla_cnn_emb)
153
- hla_conv = hla_cnn_emb + self.structure_block1(self.structure_block1(hla_cnn_emb))
154
- while hla_conv.size(-1) >= 2:
155
- hla_conv = self.structure_block2(hla_conv)
156
-
157
- hla_cnn_out = torch.squeeze(hla_conv, dim=-1)
158
- hla_cnn_out = self.bn2(hla_cnn_out)
159
-
160
- representation = torch.cat((epitope_mean, hla_mean, epitope_cnn_out, hla_cnn_out), dim=1)
161
- reduction_feature = self.fc_task(representation)
162
- logits_clsf = self.classifier(reduction_feature)
163
- logits_clsf = torch.nn.functional.softmax(logits_clsf, dim=1)
164
- return logits_clsf, reduction_feature
165
-
166
-
167
- # config = TransHLA2Config(d_model=480)
168
- # model = TransHLA2(config)
169
-
170
- # model.load_state_dict(torch.load('pytorch_model.pt'))
171
- # # 2. 保存为 transformers 兼容格式
172
- # model.save_pretrained('pytorch_model.bin', safe_serialization=False)
173
- from transformers import AutoConfig, AutoModel, CONFIG_MAPPING, MODEL_MAPPING
174
-
175
- CONFIG_MAPPING.register("transhla2", TransHLA2Config)
176
- MODEL_MAPPING.register(TransHLA2Config, TransHLA2)
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import PreTrainedModel, PretrainedConfig
4
+
5
+ from peft import LoraConfig, get_peft_model, TaskType
6
+ from transformers import EsmModel
7
+
8
+ class TransHLA2Config(PretrainedConfig):
9
+ model_type = "transhla2"
10
+ def __init__(self, d_model=480, **kwargs):
11
+ super().__init__(**kwargs)
12
+ self.d_model = d_model
13
+ # 可加入其它自定义参数
14
+
15
+ class LoraESM(nn.Module):
16
+ def __init__(self, d_model=480):
17
+ super().__init__()
18
+ self.model_name_or_path = "facebook/esm2_t12_35M_UR50D"
19
+ self.tokenizer_name_or_path = "facebook/esm2_t12_35M_UR50D"
20
+ self.peft_config = LoraConfig(
21
+ target_modules=['query', 'out_proj', 'value', 'key', 'dense', 'regression'],
22
+ task_type=TaskType.FEATURE_EXTRACTION,
23
+ inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
24
+ )
25
+ self.esm = EsmModel.from_pretrained(self.model_name_or_path)
26
+ self.lora_esm = get_peft_model(self.esm, self.peft_config)
27
+ self.fc_task = nn.Sequential(
28
+ nn.Linear(d_model, d_model // 4),
29
+ nn.BatchNorm1d(d_model // 4),
30
+ nn.Dropout(0.2),
31
+ nn.SiLU(),
32
+ nn.Linear(d_model // 4, 32),
33
+ nn.BatchNorm1d(32),
34
+ )
35
+ self.classifier = nn.Linear(32, 2)
36
+
37
+ def forward(self, x_in):
38
+ lora_outputs = self.lora_esm(x_in)
39
+ last_hidden_state = lora_outputs.last_hidden_state
40
+ out_linear = last_hidden_state.mean(dim=1)
41
+ H = self.fc_task(out_linear)
42
+ output = self.classifier(H)
43
+ return output, last_hidden_state
44
+ lora_esm = LoraESM()
45
+ lora_esm.load_state_dict(torch.load('Lora_ESM.pt'))
46
+ class TransHLA2(PreTrainedModel):
47
+ config_class = TransHLA2Config
48
+
49
+ def __init__(self, config):
50
+ super().__init__(config)
51
+ n_layers = 4
52
+ n_head = 8
53
+ d_model = config.d_model
54
+ d_ff = 64
55
+ cnn_num_channel = 256
56
+ region_embedding_size = 3
57
+ cnn_kernel_size = 3
58
+ cnn_padding_size = 1
59
+ cnn_stride = 1
60
+ pooling_size = 2
61
+
62
+ self.lora_esm = lora_esm
63
+
64
+ self.region_cnn1 = nn.Conv1d(d_model, cnn_num_channel, region_embedding_size)
65
+ self.region_cnn2 = nn.Conv1d(d_model, cnn_num_channel, region_embedding_size)
66
+ self.padding1 = nn.ConstantPad1d((1, 1), 0)
67
+ self.padding2 = nn.ConstantPad1d((0, 1), 0)
68
+ self.relu = nn.SiLU()
69
+ self.cnn1 = nn.Conv1d(cnn_num_channel, cnn_num_channel, kernel_size=cnn_kernel_size,
70
+ padding=cnn_padding_size, stride=cnn_stride)
71
+ self.cnn2 = nn.Conv1d(cnn_num_channel, cnn_num_channel, kernel_size=cnn_kernel_size,
72
+ padding=cnn_padding_size, stride=cnn_stride)
73
+ self.maxpooling = nn.MaxPool1d(kernel_size=pooling_size)
74
+ self.epitope_transformer_layers = nn.TransformerEncoderLayer(
75
+ d_model=d_model, nhead=n_head, dim_feedforward=d_ff, dropout=0.2)
76
+ self.epitope_transformer_encoder = nn.TransformerEncoder(
77
+ self.epitope_transformer_layers, num_layers=n_layers)
78
+ self.hla_transformer_layers = nn.TransformerEncoderLayer(
79
+ d_model=d_model, nhead=n_head, dim_feedforward=d_ff, dropout=0.2)
80
+ self.hla_transformer_encoder = nn.TransformerEncoder(
81
+ self.hla_transformer_layers, num_layers=n_layers)
82
+
83
+ # Cross Attention layers
84
+ self.cross_attention_epitope_layers = nn.ModuleList(
85
+ [nn.MultiheadAttention(d_model, n_head, dropout=0.2) for _ in range(4)])
86
+ self.cross_attention_hla_layers = nn.ModuleList(
87
+ [nn.MultiheadAttention(d_model, n_head, dropout=0.2) for _ in range(4)])
88
+
89
+ self.bn1 = nn.BatchNorm1d(cnn_num_channel)
90
+ self.bn2 = nn.BatchNorm1d(cnn_num_channel)
91
+ self.fc_task = nn.Sequential(
92
+ nn.Linear(2*d_model + 2*cnn_num_channel, 2 * (d_model + cnn_num_channel) // 4),
93
+ nn.BatchNorm1d(2 * (d_model + cnn_num_channel) // 4),
94
+ nn.Dropout(0.2),
95
+ nn.SiLU(),
96
+ nn.Linear(2 * (d_model + cnn_num_channel) // 4, 96),
97
+ nn.BatchNorm1d(96),
98
+ )
99
+ self.classifier = nn.Linear(96, 2)
100
+
101
+ def cnn_block1(self, x):
102
+ return self.cnn1(self.relu(x))
103
+
104
+ def cnn_block2(self, x):
105
+ x = self.padding2(x)
106
+ px = self.maxpooling(x)
107
+ x = self.relu(px)
108
+ x = self.cnn1(x)
109
+ x = self.relu(x)
110
+ x = self.cnn1(x)
111
+ x = px + x
112
+ return x
113
+
114
+ def structure_block1(self, x):
115
+ return self.cnn2(self.relu(x))
116
+
117
+ def structure_block2(self, x):
118
+ x = self.padding2(x)
119
+ px = self.maxpooling(x)
120
+ x = self.relu(px)
121
+ x = self.cnn2(x)
122
+ x = self.relu(x)
123
+ x = self.cnn2(x)
124
+ x = px + x
125
+ return x
126
+
127
+ def forward(self, epitope_in, hla_in):
128
+ _, epitope_emb = self.lora_esm(epitope_in)
129
+ _, hla_emb = self.lora_esm(hla_in)
130
+
131
+ epitope_trans = self.epitope_transformer_encoder(epitope_emb.transpose(0, 1))
132
+ hla_trans = self.hla_transformer_encoder(hla_emb.transpose(0, 1))
133
+
134
+ # Cross Attention layers
135
+ for cross_attention_epitope, cross_attention_hla in zip(self.cross_attention_epitope_layers, self.cross_attention_hla_layers):
136
+ epitope_trans, _ = cross_attention_epitope(epitope_trans, hla_trans, hla_trans)
137
+ hla_trans, _ = cross_attention_hla(hla_trans, epitope_trans, epitope_trans)
138
+
139
+ # Mean Pooling
140
+ epitope_mean = epitope_trans.mean(dim=0)
141
+ hla_mean = hla_trans.mean(dim=0)
142
+
143
+ epitope_cnn_emb = self.region_cnn1(epitope_emb.transpose(1, 2))
144
+ epitope_cnn_emb = self.padding1(epitope_cnn_emb)
145
+ conv = epitope_cnn_emb + self.cnn_block1(self.cnn_block1(epitope_cnn_emb))
146
+ while conv.size(-1) >= 2:
147
+ conv = self.cnn_block2(conv)
148
+ epitope_cnn_out = torch.squeeze(conv, dim=-1)
149
+ epitope_cnn_out = self.bn1(epitope_cnn_out)
150
+
151
+ hla_cnn_emb = self.region_cnn2(hla_emb.transpose(1, 2))
152
+ hla_cnn_emb = self.padding1(hla_cnn_emb)
153
+ hla_conv = hla_cnn_emb + self.structure_block1(self.structure_block1(hla_cnn_emb))
154
+ while hla_conv.size(-1) >= 2:
155
+ hla_conv = self.structure_block2(hla_conv)
156
+
157
+ hla_cnn_out = torch.squeeze(hla_conv, dim=-1)
158
+ hla_cnn_out = self.bn2(hla_cnn_out)
159
+
160
+ representation = torch.cat((epitope_mean, hla_mean, epitope_cnn_out, hla_cnn_out), dim=1)
161
+ reduction_feature = self.fc_task(representation)
162
+ logits_clsf = self.classifier(reduction_feature)
163
+ logits_clsf = torch.nn.functional.softmax(logits_clsf, dim=1)
164
+ return logits_clsf, reduction_feature
165
+
166
+
167
+ # config = TransHLA2Config(d_model=480)
168
+ # model = TransHLA2(config)
169
+
170
+ # model.load_state_dict(torch.load('pytorch_model.pt'))
171
+ # # 2. 保存为 transformers 兼容格式
172
+ # model.save_pretrained('pytorch_model.bin', safe_serialization=False)
173
+ # from transformers import AutoConfig, AutoModel, CONFIG_MAPPING, MODEL_MAPPING
174
+
175
+ # CONFIG_MAPPING.register("transhla2", TransHLA2Config)
176
+ # MODEL_MAPPING.register(TransHLA2Config, TransHLA2)