loinc2sdtm-training-scripts / train_sapbert_extended_fixed.py
panikos's picture
Upload train_sapbert_extended_fixed.py with huggingface_hub
d08b211 verified
# /// script
# dependencies = [
# "transformers>=4.38.0",
# "datasets>=2.16.0",
# "torch>=2.1.0",
# "scikit-learn>=1.3.0",
# "accelerate>=0.26.0",
# ]
# ///
"""
SAPBERT Training on Extended FDA LOINC2SDTM Dataset
Multi-label classification for 8 SDTM fields
FIXED VERSION with better error handling and logging
"""
import os
import sys
import json
import traceback
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModel,
TrainingArguments,
Trainer,
)
import torch
import torch.nn as nn
def log(msg):
"""Print with flush to ensure immediate output"""
print(msg, flush=True)
try:
log("=" * 80)
log("SAPBERT TRAINING - Extended FDA Dataset (8 SDTM Fields)")
log("FIXED VERSION - Enhanced error handling and logging")
log("=" * 80)
# Configuration
BASE_MODEL = "cambridgeltl/SapBERT-from-PubMedBERT-fulltext"
DATASET_NAME = "panikos/loinc2sdtm-fda-extended"
OUTPUT_DIR = "loinc2sdtm-sapbert-extended-model"
HF_USERNAME = "panikos"
# Fields to train on (using only the 8 core SDTM fields)
TRAIN_FIELDS = [
'lbtestcd',
'lbtest',
'lbspec',
'lbstresu',
'lbmethod',
'lbptfl',
'lbrestyp',
'lbresscl',
]
log("\n[1/7] Loading extended FDA structured dataset...")
log(f" Dataset: {DATASET_NAME}")
try:
dataset = load_dataset(DATASET_NAME, split="train")
log(f" βœ“ Loaded {len(dataset)} examples from FDA source")
log(f" βœ“ Training on {len(TRAIN_FIELDS)} SDTM fields")
log(f" βœ“ Dataset features: {list(dataset.features.keys())}")
except Exception as e:
log(f" βœ— FAILED to load dataset!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
# Build vocabularies
log("\n[2/7] Building field vocabularies...")
vocabularies = {field: set() for field in TRAIN_FIELDS}
try:
for i, example in enumerate(dataset):
if i % 500 == 0:
log(f" Processing example {i}/{len(dataset)}...")
for field in TRAIN_FIELDS:
value = example.get(field, '')
if value and value.strip():
vocabularies[field].add(value.upper().strip())
vocabularies = {k: sorted(list(v)) for k, v in vocabularies.items()}
log(" βœ“ Vocabulary sizes:")
for field, vocab in vocabularies.items():
log(f" {field.upper()}: {len(vocab)} unique values")
except Exception as e:
log(f" βœ— FAILED to build vocabularies!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
# Create label mappings
try:
label2id = {
field: {label: idx for idx, label in enumerate(vocab)}
for field, vocab in vocabularies.items()
}
id2label = {
field: {idx: label for label, idx in mapping.items()}
for field, mapping in label2id.items()
}
log(" βœ“ Label mappings created")
except Exception as e:
log(f" βœ— FAILED to create label mappings!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
log("\n[3/7] Loading SAPBERT model...")
log(f" Base model: {BASE_MODEL}")
try:
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
log(" βœ“ Tokenizer loaded")
base_model = AutoModel.from_pretrained(BASE_MODEL)
log(" βœ“ Base model loaded successfully!")
except Exception as e:
log(f" βœ— FAILED to load SAPBERT model!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
# Multi-label classifier with LOINC metadata as input
class LOINC2SDTMClassifier(nn.Module):
def __init__(self, base_model, num_classes_dict):
super().__init__()
self.encoder = base_model
self.config = base_model.config
self.hidden_size = base_model.config.hidden_size
self.classifiers = nn.ModuleDict({
field: nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size // 2),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(self.hidden_size // 2, num_classes)
)
for field, num_classes in num_classes_dict.items()
})
def forward(self, input_ids, attention_mask, labels=None):
outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
cls_embedding = outputs.last_hidden_state[:, 0, :]
logits = {
field: classifier(cls_embedding)
for field, classifier in self.classifiers.items()
}
loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
losses = []
for field in logits.keys():
if field in labels:
field_loss = loss_fct(logits[field], labels[field])
if not torch.isnan(field_loss):
losses.append(field_loss)
if losses:
loss = sum(losses) / len(losses)
return {'loss': loss, 'logits': logits}
try:
num_classes_dict = {field: len(vocab) for field, vocab in vocabularies.items()}
model = LOINC2SDTMClassifier(base_model, num_classes_dict)
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
log(f"\n[4/7] Classifier created:")
log(f" Total parameters: {total_params:,}")
log(f" Trainable parameters: {trainable_params:,}")
log(f" βœ“ Model architecture initialized")
except Exception as e:
log(f" βœ— FAILED to create classifier!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
# Prepare dataset
class LOINC2SDTMDataset(torch.utils.data.Dataset):
def __init__(self, dataset, tokenizer, label2id, train_fields):
self.examples = []
log(f" Creating dataset wrapper for {len(dataset)} examples...")
for i, example in enumerate(dataset):
if i % 500 == 0:
log(f" Processed {i}/{len(dataset)} examples...")
# Create rich input combining LOINC code and metadata
loinc_code = example['loinc_code']
component = example.get('component', '')
property_val = example.get('property', '')
system = example.get('system', '')
# Rich input: LOINC code + key metadata
input_text = f"{loinc_code} {component} {property_val} {system}"
# Tokenize input
encoding = tokenizer(
input_text,
padding='max_length',
truncation=True,
max_length=64,
return_tensors='pt'
)
# Get labels for trained fields
labels = {}
for field in train_fields:
value = example.get(field, '')
if value and value.strip():
value_upper = value.upper().strip()
if value_upper in label2id[field]:
labels[field] = label2id[field][value_upper]
else:
labels[field] = -100
else:
labels[field] = -100
self.examples.append({
'input_ids': encoding['input_ids'].squeeze(0),
'attention_mask': encoding['attention_mask'].squeeze(0),
'labels': labels
})
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
log("\n[5/7] Preparing training data...")
try:
train_dataset = LOINC2SDTMDataset(dataset, tokenizer, label2id, TRAIN_FIELDS)
log(f" βœ“ Prepared {len(train_dataset)} training examples")
except Exception as e:
log(f" βœ— FAILED to prepare training data!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
# Custom collator
def collate_fn(batch):
input_ids = torch.stack([item['input_ids'] for item in batch])
attention_mask = torch.stack([item['attention_mask'] for item in batch])
labels = {
field: torch.tensor([item['labels'][field] for item in batch])
for field in TRAIN_FIELDS
}
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': labels
}
# Training args
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
num_train_epochs=10,
per_device_train_batch_size=32,
gradient_accumulation_steps=1,
learning_rate=2e-5,
lr_scheduler_type="cosine",
warmup_ratio=0.1,
logging_steps=10, # More frequent logging
logging_first_step=True,
save_strategy="epoch",
save_total_limit=2,
fp16=False,
bf16=True,
report_to="none",
push_to_hub=True,
hub_model_id=f"{HF_USERNAME}/{OUTPUT_DIR}",
hub_strategy="end",
)
log("\n[6/7] Training configuration:")
log(f" Epochs: {training_args.num_train_epochs}")
log(f" Batch size: {training_args.per_device_train_batch_size}")
log(f" Learning rate: {training_args.learning_rate}")
log(f" Steps per epoch: ~{len(train_dataset) // training_args.per_device_train_batch_size}")
log(f" Total steps: ~{(len(train_dataset) // training_args.per_device_train_batch_size) * training_args.num_train_epochs}")
log(f" Input: LOINC code + metadata (component, property, system)")
log(f" Output: {len(TRAIN_FIELDS)} SDTM fields")
log(f" Mixed precision: {'BF16' if training_args.bf16 else 'FP16' if training_args.fp16 else 'FP32'}")
# Custom trainer
class MultiLabelTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
labels = inputs.pop("labels")
outputs = model(**inputs, labels=labels)
loss = outputs["loss"]
# Log loss periodically
if self.state.global_step % 10 == 0:
log(f" Step {self.state.global_step}: loss = {loss.item():.4f}")
return (loss, outputs) if return_outputs else loss
def get_train_dataloader(self):
from torch.utils.data import DataLoader
return DataLoader(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
collate_fn=collate_fn,
shuffle=True
)
try:
trainer = MultiLabelTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
)
log(" βœ“ Trainer initialized")
except Exception as e:
log(f" βœ— FAILED to initialize trainer!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
log("\n[7/7] Starting training...")
log("=" * 80)
log("This will take approximately 15-20 minutes on A10G GPU")
log("=" * 80)
try:
trainer.train()
log("\n" + "=" * 80)
log("βœ“ Training completed successfully!")
log("=" * 80)
except Exception as e:
log(f"\nβœ— TRAINING FAILED!")
log(f"Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
log("\nSaving model and vocabularies...")
try:
trainer.save_model(OUTPUT_DIR)
log(" βœ“ Model saved")
tokenizer.save_pretrained(OUTPUT_DIR)
log(" βœ“ Tokenizer saved")
# Save vocabularies and metadata
vocab_file = os.path.join(OUTPUT_DIR, "vocabularies.json")
with open(vocab_file, 'w') as f:
json.dump({
'vocabularies': vocabularies,
'label2id': label2id,
'id2label': id2label,
'train_fields': TRAIN_FIELDS
}, f, indent=2)
log(" βœ“ Vocabularies saved")
except Exception as e:
log(f" βœ— FAILED to save model!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
log("\nPushing to Hub...")
try:
trainer.push_to_hub()
log(" βœ“ Model pushed to Hub")
except Exception as e:
log(f" βœ— FAILED to push to Hub!")
log(f" Error: {str(e)}")
traceback.print_exc()
sys.exit(1)
log("\n" + "=" * 80)
log("βœ“ SUCCESS! Model training and upload complete!")
log("=" * 80)
log(f"Model available at: https://huggingface.co/{HF_USERNAME}/{OUTPUT_DIR}")
log(f"Trained on {len(TRAIN_FIELDS)} SDTM fields with rich LOINC metadata")
log(f"Total examples: {len(train_dataset)}")
log("=" * 80)
except Exception as e:
log("\n" + "=" * 80)
log("βœ— FATAL ERROR - Training script crashed!")
log("=" * 80)
log(f"Error: {str(e)}")
log("\nFull traceback:")
traceback.print_exc()
sys.exit(1)