LocPred-Prok / app.py
wangleiofficial's picture
Update app.py
b3298fd verified
raw
history blame
9.32 kB
import os, shutil, json, re
import torch
import torch.nn as nn
import torch.nn.functional as F
import gradio as gr
from transformers import AutoTokenizer, AutoModel
# ==========================
# 🚧 0. 防止 Hugging Face 缓存溢出 (保持不变)
# ==========================
os.environ["HF_HOME"] = "/tmp/hf_cache"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf_cache"
os.environ["HF_HUB_DISABLE_SYMLINKS_WARNING"] = "1"
for path in ["/tmp/hf_cache", os.path.expanduser("~/.cache/huggingface")]:
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path, exist_ok=True)
# ==========================
# 1. Model Definition (保持不变)
# ==========================
class AttentionPooling(nn.Module):
def __init__(self, d_model):
super().__init__()
self.attention_net = nn.Linear(d_model, 1)
def forward(self, x, mask):
attn_logits = self.attention_net(x).squeeze(2)
attn_logits.masked_fill_(mask == 0, -float('inf'))
attn_weights = F.softmax(attn_logits, dim=1)
return torch.bmm(attn_weights.unsqueeze(1), x).squeeze(1)
class ProtDualBranchEnhancedClassifier(nn.Module):
def __init__(self, d_model, projection_dim, num_classes, dropout, kernel_size):
super().__init__()
self.cls_projector = nn.Linear(d_model, projection_dim)
self.token_refiner = nn.Sequential(
nn.Conv1d(d_model, d_model, kernel_size, padding='same'),
nn.ReLU()
)
self.attention_pooling = AttentionPooling(d_model)
self.tok_projector = nn.Linear(d_model, projection_dim)
fused_dim = projection_dim * 2
self.gate = nn.Sequential(
nn.Linear(fused_dim, fused_dim),
nn.Sigmoid()
)
self.classifier_head = nn.Sequential(
nn.LayerNorm(fused_dim),
nn.Linear(fused_dim, fused_dim * 2),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(fused_dim * 2, num_classes)
)
def forward(self, cls_embedding, token_embeddings, mask):
z_cls = self.cls_projector(cls_embedding)
tok_emb_permuted = token_embeddings.permute(0, 2, 1)
refined_tok_emb = self.token_refiner(tok_emb_permuted).permute(0, 2, 1)
z_tok_pooled = self.attention_pooling(refined_tok_emb, mask)
z_tok = self.tok_projector(z_tok_pooled)
z_fused_concat = torch.cat([z_cls, z_tok], dim=1)
gate_values = self.gate(z_fused_concat)
z_fused_gated = z_fused_concat * gate_values
return self.classifier_head(z_fused_gated)
# ==========================
# 2. Load Models and Files (保持不变)
# ==========================
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
PLM_MODEL_NAME = "facebook/esm2_t30_150M_UR50D"
CLASSIFIER_PATH = "best_model_esm2_t30_150M_UR50D.pth"
LABEL_MAP_PATH = "label_map.json"
if not os.path.exists(LABEL_MAP_PATH):
raise FileNotFoundError(f"Error: Missing '{LABEL_MAP_PATH}'.")
with open(LABEL_MAP_PATH, 'r') as f:
label_to_idx = json.load(f)
idx_to_label = {v: k for k, v in label_to_idx.items()}
NUM_CLASSES = len(idx_to_label)
D_MODEL = 640
print("🔹 Loading Protein Language Model...")
tokenizer = AutoTokenizer.from_pretrained(PLM_MODEL_NAME)
plm_model = AutoModel.from_pretrained(PLM_MODEL_NAME).to(DEVICE)
plm_model.eval()
print("✅ PLM loaded.")
print("🔹 Loading classifier...")
classifier = ProtDualBranchEnhancedClassifier(
d_model=D_MODEL, projection_dim=32, num_classes=NUM_CLASSES,
dropout=0.3, kernel_size=3
).to(DEVICE)
if not os.path.exists(CLASSIFIER_PATH):
raise FileNotFoundError(f"Error: Could not find '{CLASSIFIER_PATH}'.")
classifier.load_state_dict(torch.load(CLASSIFIER_PATH, map_location=DEVICE))
classifier.eval()
print("✅ System Ready.")
# ==========================
# 3. Prediction Function (微调)
# ==========================
def predict(sequence_input):
if not sequence_input or sequence_input.isspace():
# 返回 None 而不是字典,让 Label 组件显示更干净
raise gr.Error("Sequence cannot be empty.")
sequence = "".join(sequence_input.split('\n')[1:]) if sequence_input.startswith('>') else sequence_input
sequence = re.sub(r'[^A-Z]', '', sequence.upper())
if not sequence:
raise gr.Error("Invalid sequence format.")
with torch.no_grad():
inputs = tokenizer(sequence, return_tensors="pt", truncation=True, max_length=1024).to(DEVICE)
outputs = plm_model(**inputs)
hidden_states = outputs.last_hidden_state
cls_embedding = hidden_states[:, 0, :]
token_embeddings = hidden_states[:, 1:-1, :]
token_mask = inputs['attention_mask'][:, 1:-1]
logits = classifier(cls_embedding, token_embeddings, token_mask)
probabilities = F.softmax(logits, dim=1)[0]
confidences = {idx_to_label[i]: float(prob) for i, prob in enumerate(probabilities)}
return confidences
# ==========================
# 4. Modernized Gradio Interface
# ==========================
# 自定义 CSS:增加渐变标题、阴影、圆角
custom_css = """
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.main-header {
text-align: center;
background: linear-gradient(135deg, #3b82f6 0%, #06b6d4 100%);
color: white;
padding: 2rem;
border-radius: 12px;
margin-bottom: 1.5rem;
box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
}
.main-header h1 {
color: white;
margin-bottom: 0.5rem;
font-size: 2.2rem;
}
.main-header p {
color: #e0f2fe;
font-size: 1.1rem;
}
.input-card, .output-card {
border: 1px solid #e5e7eb;
border-radius: 12px;
padding: 1.5rem;
background: white;
box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.1);
}
"""
# 使用更清爽的 Teal (青色) 主题,符合生物信息学特征
theme = gr.themes.Soft(
primary_hue="teal",
secondary_hue="blue",
neutral_hue="slate",
font=[gr.themes.GoogleFont("IBM Plex Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
).set(
button_primary_background_fill="*primary_600",
button_primary_background_fill_hover="*primary_700",
block_shadow="*shadow_drop_lg"
)
with gr.Blocks(theme=theme, css=custom_css, title="LocPred-Prok") as app:
# --- 顶部 Header ---
with gr.Column(elem_classes="main-header"):
gr.Markdown(
"""
# 🧬 Prokaryotic Subcellular Localization
### Dual-Branch Architecture with Protein Language Models
Identify where your protein functions using State-of-the-Art Deep Learning.
"""
)
# --- 主体内容 ---
with gr.Row(equal_height=False):
# 左侧:输入区
with gr.Column(scale=5, elem_classes="input-card"):
gr.Markdown("### 📥 Input Sequence")
gr.Markdown("Paste your amino acid sequence (FASTA format supported).")
sequence_input = gr.Textbox(
lines=8,
label="",
placeholder=">Example Header\nMKFKLTAGCLAVAGVLLASSFGADAEIVV...",
show_label=False
)
with gr.Row():
clear_btn = gr.ClearButton(components=[sequence_input], value="Clear")
submit_btn = gr.Button("✨ Run Prediction", variant="primary", scale=2)
gr.Markdown("#### 💡 Example Sequences")
gr.Examples(
examples=[
[">sp|P27361|PBP2_ECOLI Penicillin-binding protein 2\nMKFKLTAGCLAVAGVLLASSFGADAEIVVNAIYDQVARTEDGVYTQGQLTGRRIELLNKLGIEPEDSLASTVIHEFVARVGDDHGIETIIDEFYRQHPSASL"],
["MSKLVKTLTISEISKAQNNGGKPAWCWYTLAMCGAGYDSGTCDYMYSHCFGIKHHSSGSSSYHC"],
],
inputs=sequence_input,
label=None
)
# 右侧:输出区
with gr.Column(scale=4, elem_classes="output-card"):
gr.Markdown("### 📊 Prediction Results")
output_label = gr.Label(
num_top_classes=NUM_CLASSES,
label="Probability Distribution",
show_label=False
)
# 信息折叠面板
with gr.Accordion("📘 Model Architecture & Details", open=False):
gr.Markdown(
"""
This model utilizes a **Dual-Branch Architecture**:
1. **Semantic Branch**: Extracts global features using `ESM-2 (150M)` CLS token.
2. **Structural Branch**: Refines residue-level embeddings via CNN and Attention Pooling.
**Citation:**
*LocPred-Prok: Prokaryotic protein subcellular localization prediction with a dual-branch architecture.*
"""
)
# --- 底部 Footer ---
gr.Markdown(
"""
<div style="text-align: center; margin-top: 2rem; color: #64748b; font-size: 0.9rem;">
© 2025 iSysLab HUST | Powered by ESM-2 & PyTorch
</div>
"""
)
# --- 绑定事件 ---
submit_btn.click(fn=predict, inputs=sequence_input, outputs=output_label)
clear_btn.click(lambda: None, outputs=[output_label])
# 启动
app.launch()