Upload Pretraining_pdeeppp.py with huggingface_hub
Browse files- Pretraining_pdeeppp.py +119 -0
Pretraining_pdeeppp.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class PretrainingPDeepPP:
|
| 7 |
+
def __init__(self, embedding_dim=1280, target_length=33, esm_ratio=None, device=None):
|
| 8 |
+
"""
|
| 9 |
+
初始化 PretrainingPDeepPP 类。
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
embedding_dim: 嵌入维度大小。
|
| 13 |
+
target_length: 目标序列长度。
|
| 14 |
+
esm_ratio: ESM 表征与嵌入表示的权重比例(由外部赋值)。
|
| 15 |
+
device: 设备信息。
|
| 16 |
+
"""
|
| 17 |
+
self.embedding_dim = embedding_dim
|
| 18 |
+
self.target_length = target_length
|
| 19 |
+
self.esm_ratio = esm_ratio # 仅存储 esm_ratio,不赋默认值
|
| 20 |
+
self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 21 |
+
|
| 22 |
+
def extract_esm_representations(self, sequences, esm_model, batch_converter, batch_size=32):
|
| 23 |
+
"""
|
| 24 |
+
提取 ESM 表征,并直接返回形状为 (batch_size, target_length, embedding_dim) 的结果。
|
| 25 |
+
"""
|
| 26 |
+
sequence_representations = []
|
| 27 |
+
print("Sequences to process:", sequences)
|
| 28 |
+
print("Batch size:", batch_size)
|
| 29 |
+
|
| 30 |
+
# 为每个序列添加一个“伪标签”以满足 batch_converter 要求
|
| 31 |
+
labeled_sequences = [(None, seq) for seq in sequences]
|
| 32 |
+
|
| 33 |
+
for i in range(0, len(labeled_sequences), batch_size):
|
| 34 |
+
batch = labeled_sequences[i:i + batch_size]
|
| 35 |
+
if len(batch) == 0:
|
| 36 |
+
continue
|
| 37 |
+
# 调用 batch_converter 将序列转换为 batch_tokens
|
| 38 |
+
_, batch_strs, batch_tokens = batch_converter(batch)
|
| 39 |
+
batch_tokens = batch_tokens.to(self.device)
|
| 40 |
+
|
| 41 |
+
# 使用 ESM 模型提取表示
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
results = esm_model(batch_tokens, repr_layers=[33], return_contacts=False)
|
| 44 |
+
|
| 45 |
+
# 提取每个序列的表示
|
| 46 |
+
for token_repr in results["representations"][33]: # 获取第 33 层的表示
|
| 47 |
+
sequence_representations.append(token_repr[:self.target_length])
|
| 48 |
+
|
| 49 |
+
if len(sequence_representations) == 0:
|
| 50 |
+
raise ValueError("No ESM representations were generated. Check your input sequences and batch processing logic.")
|
| 51 |
+
|
| 52 |
+
# 将所有序列的表示堆叠起来,形状为 (batch_size, 33, 1280)
|
| 53 |
+
return torch.stack(sequence_representations)
|
| 54 |
+
|
| 55 |
+
def pad_sequences(self, sequences, max_len=None, pad_value=0):
|
| 56 |
+
if max_len is None:
|
| 57 |
+
max_len = max(len(seq) for seq in sequences)
|
| 58 |
+
padded_sequences = torch.zeros((len(sequences), max_len), dtype=torch.long)
|
| 59 |
+
for i, seq in enumerate(sequences):
|
| 60 |
+
padded_sequences[i, :len(seq)] = torch.tensor(seq)
|
| 61 |
+
return padded_sequences
|
| 62 |
+
|
| 63 |
+
def seq_to_indices(self, seq, vocab_dict):
|
| 64 |
+
return [vocab_dict.get(char, 0) for char in seq]
|
| 65 |
+
|
| 66 |
+
def create_embeddings(self, sequences, vocab, esm_model, esm_alphabet, batch_size=16):
|
| 67 |
+
"""
|
| 68 |
+
创建嵌入向量,使用类的 esm_ratio 属性动态控制权重分配。
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
sequences: 输入序列列表。
|
| 72 |
+
vocab: 字符词汇表。
|
| 73 |
+
esm_model: 预训练的 ESM 模型。
|
| 74 |
+
esm_alphabet: ESM 模型的字母表。
|
| 75 |
+
batch_size: 批量大小。
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
结合 ESM 表征与嵌入表示的嵌入结果。
|
| 79 |
+
"""
|
| 80 |
+
if self.esm_ratio is None:
|
| 81 |
+
raise ValueError("esm_ratio is not set. Please assign a value before creating embeddings.")
|
| 82 |
+
|
| 83 |
+
# 构建词汇表字典
|
| 84 |
+
vocab_dict = {char: i for i, char in enumerate(vocab)}
|
| 85 |
+
|
| 86 |
+
# 将序列转为索引
|
| 87 |
+
indices = [self.seq_to_indices(seq, vocab_dict) for seq in sequences]
|
| 88 |
+
indices_padded = self.pad_sequences(indices, max_len=self.target_length)
|
| 89 |
+
|
| 90 |
+
# 定义嵌入模型
|
| 91 |
+
class EmbeddingPretrainedModel(nn.Module):
|
| 92 |
+
def __init__(self, vocab_size, embedding_dim, max_len):
|
| 93 |
+
super(EmbeddingPretrainedModel, self).__init__()
|
| 94 |
+
self.embedding = nn.Embedding(vocab_size, embedding_dim)
|
| 95 |
+
self.fc = nn.Linear(embedding_dim, embedding_dim)
|
| 96 |
+
|
| 97 |
+
def forward(self, x):
|
| 98 |
+
x = self.embedding(x)
|
| 99 |
+
x = self.fc(x)
|
| 100 |
+
return x
|
| 101 |
+
|
| 102 |
+
embedding_model = EmbeddingPretrainedModel(len(vocab), self.embedding_dim, self.target_length).to(self.device)
|
| 103 |
+
|
| 104 |
+
# 提取 ESM 表示
|
| 105 |
+
esm_representations = self.extract_esm_representations(
|
| 106 |
+
sequences,
|
| 107 |
+
esm_model,
|
| 108 |
+
esm_alphabet.get_batch_converter(),
|
| 109 |
+
batch_size=batch_size
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
# 获取嵌入表示
|
| 113 |
+
with torch.no_grad():
|
| 114 |
+
embedding_output = embedding_model(indices_padded.to(self.device))
|
| 115 |
+
|
| 116 |
+
# 合并 ESM 和嵌入表示,动态使用 esm_ratio
|
| 117 |
+
combined_representations = self.esm_ratio * esm_representations + (1 - self.esm_ratio) * embedding_output
|
| 118 |
+
|
| 119 |
+
return combined_representations
|