SkywalkerLu commited on
Commit
1e7969f
·
verified ·
1 Parent(s): b26ea9e

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +14 -3
  2. TransHLA2.0.pt +3 -0
  3. TransHLA2.0.py +163 -0
  4. config.json +5 -0
README.md CHANGED
@@ -1,3 +1,14 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TransHLA2.0
2
+
3
+ **TransHLA2.0** 是一个基于 ESM2 + LoRA + CNN-Transformer 融合的蛋白质序列建模模型。
4
+ 支持 Hugging Face Hub 上的 `trust_remote_code` 加载和自定义微调。
5
+
6
+ ## 快速加载
7
+
8
+ ```python
9
+ from transformers import AutoModel
10
+
11
+ model = AutoModel.from_pretrained(
12
+ "your-username/TransHLA2.0",
13
+ trust_remote_code=True
14
+ )
TransHLA2.0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c97063d5a89f8d2541f0429a1361bece44eaa81d5a905c9b0ad52f45cd51a7e3
3
+ size 215833895
TransHLA2.0.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import PreTrainedModel, PretrainedConfig
4
+
5
+ from peft import LoraConfig, get_peft_model, TaskType
6
+ from transformers import EsmModel
7
+
8
+ class TransHLA2Config(PretrainedConfig):
9
+ model_type = "transhla2"
10
+ def __init__(self, d_model=480, **kwargs):
11
+ super().__init__(**kwargs)
12
+ self.d_model = d_model
13
+ # 可加入其它自定义参数
14
+
15
+ class LoraESM(nn.Module):
16
+ def __init__(self, d_model=480):
17
+ super().__init__()
18
+ self.model_name_or_path = "facebook/esm2_t12_35M_UR50D"
19
+ self.tokenizer_name_or_path = "facebook/esm2_t12_35M_UR50D"
20
+ self.peft_config = LoraConfig(
21
+ target_modules=['query', 'out_proj', 'value', 'key', 'dense', 'regression'],
22
+ task_type=TaskType.FEATURE_EXTRACTION,
23
+ inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
24
+ )
25
+ self.esm = EsmModel.from_pretrained(self.model_name_or_path)
26
+ self.lora_esm = get_peft_model(self.esm, self.peft_config)
27
+ self.fc_task = nn.Sequential(
28
+ nn.Linear(d_model, d_model // 4),
29
+ nn.BatchNorm1d(d_model // 4),
30
+ nn.Dropout(0.2),
31
+ nn.SiLU(),
32
+ nn.Linear(d_model // 4, 32),
33
+ nn.BatchNorm1d(32),
34
+ )
35
+ self.classifier = nn.Linear(32, 2)
36
+
37
+ def forward(self, x_in):
38
+ lora_outputs = self.lora_esm(x_in)
39
+ last_hidden_state = lora_outputs.last_hidden_state
40
+ out_linear = last_hidden_state.mean(dim=1)
41
+ H = self.fc_task(out_linear)
42
+ output = self.classifier(H)
43
+ return output, last_hidden_state
44
+
45
+ class TransHLA2(PreTrainedModel):
46
+ config_class = TransHLA2Config
47
+
48
+ def __init__(self, config):
49
+ super().__init__(config)
50
+ n_layers = 4
51
+ n_head = 8
52
+ d_model = config.d_model
53
+ d_ff = 64
54
+ cnn_num_channel = 256
55
+ region_embedding_size = 3
56
+ cnn_kernel_size = 3
57
+ cnn_padding_size = 1
58
+ cnn_stride = 1
59
+ pooling_size = 2
60
+
61
+ self.lora_esm = LoraESM(d_model=d_model)
62
+
63
+ self.region_cnn1 = nn.Conv1d(d_model, cnn_num_channel, region_embedding_size)
64
+ self.region_cnn2 = nn.Conv1d(d_model, cnn_num_channel, region_embedding_size)
65
+ self.padding1 = nn.ConstantPad1d((1, 1), 0)
66
+ self.padding2 = nn.ConstantPad1d((0, 1), 0)
67
+ self.relu = nn.SiLU()
68
+ self.cnn1 = nn.Conv1d(cnn_num_channel, cnn_num_channel, kernel_size=cnn_kernel_size,
69
+ padding=cnn_padding_size, stride=cnn_stride)
70
+ self.cnn2 = nn.Conv1d(cnn_num_channel, cnn_num_channel, kernel_size=cnn_kernel_size,
71
+ padding=cnn_padding_size, stride=cnn_stride)
72
+ self.maxpooling = nn.MaxPool1d(kernel_size=pooling_size)
73
+ self.epitope_transformer_layers = nn.TransformerEncoderLayer(
74
+ d_model=d_model, nhead=n_head, dim_feedforward=d_ff, dropout=0.2)
75
+ self.epitope_transformer_encoder = nn.TransformerEncoder(
76
+ self.epitope_transformer_layers, num_layers=n_layers)
77
+ self.hla_transformer_layers = nn.TransformerEncoderLayer(
78
+ d_model=d_model, nhead=n_head, dim_feedforward=d_ff, dropout=0.2)
79
+ self.hla_transformer_encoder = nn.TransformerEncoder(
80
+ self.hla_transformer_layers, num_layers=n_layers)
81
+
82
+ # Cross Attention layers
83
+ self.cross_attention_epitope_layers = nn.ModuleList(
84
+ [nn.MultiheadAttention(d_model, n_head, dropout=0.2) for _ in range(4)])
85
+ self.cross_attention_hla_layers = nn.ModuleList(
86
+ [nn.MultiheadAttention(d_model, n_head, dropout=0.2) for _ in range(4)])
87
+
88
+ self.bn1 = nn.BatchNorm1d(cnn_num_channel)
89
+ self.bn2 = nn.BatchNorm1d(cnn_num_channel)
90
+ self.fc_task = nn.Sequential(
91
+ nn.Linear(2*d_model + 2*cnn_num_channel, 2 * (d_model + cnn_num_channel) // 4),
92
+ nn.BatchNorm1d(2 * (d_model + cnn_num_channel) // 4),
93
+ nn.Dropout(0.2),
94
+ nn.SiLU(),
95
+ nn.Linear(2 * (d_model + cnn_num_channel) // 4, 96),
96
+ nn.BatchNorm1d(96),
97
+ )
98
+ self.classifier = nn.Linear(96, 2)
99
+
100
+ def cnn_block1(self, x):
101
+ return self.cnn1(self.relu(x))
102
+
103
+ def cnn_block2(self, x):
104
+ x = self.padding2(x)
105
+ px = self.maxpooling(x)
106
+ x = self.relu(px)
107
+ x = self.cnn1(x)
108
+ x = self.relu(x)
109
+ x = self.cnn1(x)
110
+ x = px + x
111
+ return x
112
+
113
+ def structure_block1(self, x):
114
+ return self.cnn2(self.relu(x))
115
+
116
+ def structure_block2(self, x):
117
+ x = self.padding2(x)
118
+ px = self.maxpooling(x)
119
+ x = self.relu(px)
120
+ x = self.cnn2(x)
121
+ x = self.relu(x)
122
+ x = self.cnn2(x)
123
+ x = px + x
124
+ return x
125
+
126
+ def forward(self, epitope_in, hla_in):
127
+ _, epitope_emb = self.lora_esm(epitope_in)
128
+ _, hla_emb = self.lora_esm(hla_in)
129
+
130
+ epitope_trans = self.epitope_transformer_encoder(epitope_emb.transpose(0, 1))
131
+ hla_trans = self.hla_transformer_encoder(hla_emb.transpose(0, 1))
132
+
133
+ # Cross Attention layers
134
+ for cross_attention_epitope, cross_attention_hla in zip(self.cross_attention_epitope_layers, self.cross_attention_hla_layers):
135
+ epitope_trans, _ = cross_attention_epitope(epitope_trans, hla_trans, hla_trans)
136
+ hla_trans, _ = cross_attention_hla(hla_trans, epitope_trans, epitope_trans)
137
+
138
+ # Mean Pooling
139
+ epitope_mean = epitope_trans.mean(dim=0)
140
+ hla_mean = hla_trans.mean(dim=0)
141
+
142
+ epitope_cnn_emb = self.region_cnn1(epitope_emb.transpose(1, 2))
143
+ epitope_cnn_emb = self.padding1(epitope_cnn_emb)
144
+ conv = epitope_cnn_emb + self.cnn_block1(self.cnn_block1(epitope_cnn_emb))
145
+ while conv.size(-1) >= 2:
146
+ conv = self.cnn_block2(conv)
147
+ epitope_cnn_out = torch.squeeze(conv, dim=-1)
148
+ epitope_cnn_out = self.bn1(epitope_cnn_out)
149
+
150
+ hla_cnn_emb = self.region_cnn2(hla_emb.transpose(1, 2))
151
+ hla_cnn_emb = self.padding1(hla_cnn_emb)
152
+ hla_conv = hla_cnn_emb + self.structure_block1(self.structure_block1(hla_cnn_emb))
153
+ while hla_conv.size(-1) >= 2:
154
+ hla_conv = self.structure_block2(hla_conv)
155
+
156
+ hla_cnn_out = torch.squeeze(hla_conv, dim=-1)
157
+ hla_cnn_out = self.bn2(hla_cnn_out)
158
+
159
+ representation = torch.cat((epitope_mean, hla_mean, epitope_cnn_out, hla_cnn_out), dim=1)
160
+ reduction_feature = self.fc_task(representation)
161
+ logits_clsf = self.classifier(reduction_feature)
162
+ logits_clsf = torch.nn.functional.softmax(logits_clsf, dim=1)
163
+ return logits_clsf, reduction_feature
config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "architectures": ["TransHLA2"],
3
+ "model_type": "transhla2",
4
+ "d_model": 480
5
+ }