Single_Cell_Classifier / unified_cell_classifier.py
vincenzocivale
Add snapshot_download import for model snapshot functionality
f99829b
import torch
import torch.nn as nn
import torch.nn.functional as F
import json
import os
from typing import Dict, Optional, Tuple, List
from huggingface_hub import hf_hub_download
from transformers.modeling_outputs import SequenceClassifierOutput
from safetensors.torch import load_file
from huggingface_hub import snapshot_download
class MLPBlock(nn.Module):
def __init__(self, input_dim: int, output_dim: int, dropout_rate: float = 0.2, use_residual: bool = False):
super().__init__()
self.use_residual = use_residual and (input_dim == output_dim)
self.linear = nn.Linear(input_dim, output_dim)
self.bn = nn.BatchNorm1d(output_dim)
self.activation = nn.GELU()
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x: torch.Tensor) -> torch.Tensor:
identity = x
x = self.linear(x)
x = self.bn(x)
x = self.activation(x)
x = self.dropout(x)
if self.use_residual:
x = x + identity
return x
class AdvancedMLPClassifier(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dims: List[int],
output_dim: int,
dropout_rate: float = 0.2,
use_residual_in_hidden: bool = True,
loss_fn: Optional[nn.Module] = None
):
super().__init__()
self.initial_bn = nn.BatchNorm1d(input_dim)
all_dims = [input_dim] + hidden_dims
mlp_layers = []
for i in range(len(all_dims) - 1):
mlp_layers.append(
MLPBlock(
input_dim=all_dims[i],
output_dim=all_dims[i + 1],
dropout_rate=dropout_rate,
use_residual=use_residual_in_hidden and (all_dims[i] == all_dims[i + 1])
)
)
self.hidden_network = nn.Sequential(*mlp_layers)
self.output_projection = nn.Linear(all_dims[-1], output_dim)
self.loss_fn = loss_fn if loss_fn is not None else nn.CrossEntropyLoss()
self._initialize_weights()
def forward(
self,
input_ids: torch.Tensor,
labels: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = True,
**kwargs
) -> SequenceClassifierOutput:
if input_ids.ndim > 2:
input_ids = input_ids.view(input_ids.size(0), -1) # Flatten if necessary
x = self.initial_bn(input_ids)
x = self.hidden_network(x)
logits = self.output_projection(x)
loss = self.loss_fn(logits, labels) if labels is not None else None
if not return_dict:
return (logits, loss) if loss is not None else (logits,)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=None,
attentions=None
)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class UnifiedCellClassifier(nn.Module):
def __init__(self,
sub_classifier_names: list = None,
main_classifier_config: Dict = None,
sub_classifiers_config: Dict = None,
**kwargs):
"""
Args:
sub_classifier_names: Lista nomi sub-classificatori
main_classifier_config: Configurazione per il classificatore principale
sub_classifiers_config: Configurazioni per i sub-classificatori
"""
super().__init__()
# Salva configurazione
self.sub_classifier_names = sub_classifier_names or []
self.main_classifier_config = main_classifier_config or {}
self.sub_classifiers_config = sub_classifiers_config or {}
# Inizializza placeholder (verranno caricati in from_pretrained)
self.main_classifier = None
self.sub_classifiers = nn.ModuleDict()
self.main_labels = {}
self.sub_labels = {}
# Mapping macrocategoria -> sub-classificatore
self.macro_to_sub = self._build_default_macro_to_sub_mapping()
def _build_default_macro_to_sub_mapping(self):
"""Mapping di default - può essere sovrascritto dal file macro_to_sub.json"""
return {
"0": "B_cells_classifier",
"1": "CD4plus_T_cells_classifier",
"4": "Myeloid_cells_classifier",
"5": "NK_cells_classifier",
"7": "TRAV1_2_CD8plus_T_cells_classifier",
"8": "gd_T_cells_classifier"
}
def _create_classifier_from_config(self, config: Dict):
"""Crea un AdvancedMLPClassifier dalla configurazione"""
return AdvancedMLPClassifier(
input_dim=config['input_dim'],
hidden_dims=config['hidden_dims'],
output_dim=config['output_dim'],
dropout_rate=config.get('dropout_rate', 0.2),
use_residual_in_hidden=config.get('use_residual_in_hidden', True),
loss_fn=nn.CrossEntropyLoss()
)
def forward(self, x: torch.Tensor, return_probabilities: bool = False):
"""
Forward pass gerarchico
Args:
x: Input embeddings [batch_size, embedding_dim]
return_probabilities: Se True, restituisce anche le probabilità
Returns:
Dict con macro_prediction, sub_prediction, final_prediction
"""
if self.main_classifier is None:
raise RuntimeError("Modello non caricato. Usa from_pretrained() per caricare il modello.")
# Classificazione principale - usa il metodo del classificatore per avere solo logits
with torch.no_grad():
main_output = self.main_classifier(x, return_dict=True)
main_logits = main_output.logits
main_probs = torch.softmax(main_logits, dim=-1)
main_pred = torch.argmax(main_logits, dim=-1)
# Classificazione secondaria
batch_size = x.shape[0]
sub_predictions = []
sub_probabilities = [] if return_probabilities else None
for i in range(batch_size):
macro_idx = str(main_pred[i].item())
macro_label = self.main_labels.get(macro_idx, f"unknown_{macro_idx}")
# Controlla se esiste sub-classificatore per questa macro
if macro_idx in self.macro_to_sub:
sub_classifier_name = self.macro_to_sub[macro_idx]
if sub_classifier_name in self.sub_classifiers:
# Usa sub-classificatore
with torch.no_grad():
sub_output = self.sub_classifiers[sub_classifier_name](x[i:i+1], return_dict=True)
sub_logits = sub_output.logits
sub_probs = torch.softmax(sub_logits, dim=-1)
sub_pred = torch.argmax(sub_logits, dim=-1)
sub_idx = str(sub_pred.item())
sub_label = self.sub_labels[sub_classifier_name].get(sub_idx, f"unknown_{sub_idx}")
final_prediction = f"{macro_label}_{sub_label}"
if return_probabilities:
sub_probabilities.append(sub_probs[0])
else:
# Sub-classificatore non trovato, usa solo macro
final_prediction = macro_label
if return_probabilities:
sub_probabilities.append(None)
else:
# Nessun sub-classificatore per questa macro, usa solo macro
final_prediction = macro_label
if return_probabilities:
sub_probabilities.append(None)
sub_predictions.append(final_prediction)
result = {
'macro_predictions': [self.main_labels.get(str(idx.item()), f"unknown_{idx.item()}")
for idx in main_pred],
'final_predictions': sub_predictions
}
if return_probabilities:
result['macro_probabilities'] = main_probs
result['sub_probabilities'] = sub_probabilities
return result
def predict(self, x: torch.Tensor):
"""Metodo semplificato per predizione"""
return self.forward(x, return_probabilities=False)['final_predictions']
@classmethod
@classmethod
def from_pretrained(cls, repo_id_or_path: str, **kwargs):
"""
Carica il modello da HuggingFace Hub o da un path locale in modo robusto.
"""
# 1. Ottieni un path locale unificato
if os.path.isdir(repo_id_or_path):
local_model_path = repo_id_or_path
else:
local_model_path = snapshot_download(repo_id=repo_id_or_path)
# 2. Carica la configurazione generale
config_path = os.path.join(local_model_path, "config.json")
with open(config_path) as f:
config = json.load(f)
# 3. Istanzia il "contenitore" del modello
model = cls(**config)
# 4. Carica il classificatore principale
# --- PASSAGGIO MANCANTE INSERITO QUI ---
# a) Crea l'architettura del classificatore principale
if 'main_classifier_config' in config:
main_config = config['main_classifier_config']
# Assumo che tu abbia un metodo per creare un classificatore dalla sua config
model.main_classifier = model._create_classifier_from_config(main_config)
# b) Ora carica i pesi, perché model.main_classifier esiste
main_weights_path = os.path.join(local_model_path, "main_classifier/main_classifier.safetensors")
main_state_dict = load_file(main_weights_path)
model.main_classifier.load_state_dict(main_state_dict, strict=False)
main_labels_path = os.path.join(local_model_path, "main_classifier/id2label_main.json")
with open(main_labels_path) as f:
model.main_labels = json.load(f)
# 5. Carica i sub-classificatori
if 'sub_classifiers_config' in config:
for sub_name in model.sub_classifier_names:
try:
# --- PASSAGGIO MANCANTE INSERITO QUI ---
# a) Crea l'architettura del sub-classificatore
sub_config = config['sub_classifiers_config'][sub_name]
model.sub_classifiers[sub_name] = model._create_classifier_from_config(sub_config)
# b) Ora carica i suoi pesi
sub_weights_path = os.path.join(local_model_path, f"sub_classifiers/{sub_name}/{sub_name}.safetensors")
sub_state_dict = load_file(sub_weights_path)
model.sub_classifiers[sub_name].load_state_dict(sub_state_dict, strict=False)
# c) Carica le sue label
sub_labels_path = os.path.join(local_model_path, f"sub_classifiers/{sub_name}/{sub_name}_id2label.json")
with open(sub_labels_path) as f:
model.sub_labels[sub_name] = json.load(f)
except Exception as e:
print(f"⚠️ Avviso: impossibile caricare il sub-classificatore {sub_name}. Errore: {e}")
continue
# 6. Carica il mapping macro_to_sub se esiste
macro_to_sub_path = os.path.join(local_model_path, "macro_to_sub.json")
if os.path.exists(macro_to_sub_path):
with open(macro_to_sub_path) as f:
model.macro_to_sub = json.load(f)
else:
print("File macro_to_sub.json non trovato, uso mapping di default.")
model.eval()
return model
def save_pretrained(self, save_directory: str):
"""
Salva il modello in formato HuggingFace
"""
os.makedirs(save_directory, exist_ok=True)
# Salva configurazione
config = {
'sub_classifier_names': self.sub_classifier_names,
'main_classifier_config': self.main_classifier_config,
'sub_classifiers_config': self.sub_classifiers_config
}
with open(os.path.join(save_directory, "config.json"), 'w') as f:
json.dump(config, f, indent=2)
# Salva main classifier
if self.main_classifier is not None:
torch.save(self.main_classifier.state_dict(),
os.path.join(save_directory, "main_classifier.bin"))
with open(os.path.join(save_directory, "id2label_main.json"), 'w') as f:
json.dump(self.main_labels, f, indent=2)
# Salva sub-classifiers
sub_classifiers_dir = os.path.join(save_directory, "sub_classifiers")
os.makedirs(sub_classifiers_dir, exist_ok=True)
for name, classifier in self.sub_classifiers.items():
torch.save(classifier.state_dict(),
os.path.join(sub_classifiers_dir, f"{name}.bin"))
with open(os.path.join(sub_classifiers_dir, f"{name}_id2label.json"), 'w') as f:
json.dump(self.sub_labels[name], f, indent=2)
# Salva mapping
with open(os.path.join(save_directory, "macro_to_sub.json"), 'w') as f:
json.dump(self.macro_to_sub, f, indent=2)