bert_eng / app.py
Wen1201's picture
Upload app.py
b916dc9 verified
import gradio as gr
import pandas as pd
import torch
from torch import nn
from transformers import (
BertTokenizer,
BertForSequenceClassification,
TrainingArguments,
Trainer
)
from datasets import Dataset
from sklearn.metrics import (
accuracy_score,
precision_recall_fscore_support,
roc_auc_score,
confusion_matrix
)
import numpy as np
from datetime import datetime
import json
import os
import gc
import random
# Set Gradio to use English
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
# ==================== 🎲 Random Seed Setup ====================
RANDOM_SEED = 42
def set_seed(seed=42):
"""
⭐ Set all random seeds to ensure complete reproducibility ⭐
"""
print(f"\n{'='*70}")
print(f"🎲 Setting random seed: {seed}")
print(f"{'='*70}")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
try:
torch.use_deterministic_algorithms(True)
except:
pass
print(f"✅ Random seed setup complete - Results should be fully reproducible")
print(f" - Python random seed: {seed}")
print(f" - NumPy seed: {seed}")
print(f" - PyTorch seed: {seed}")
print(f" - CUDA deterministic mode: ON")
print(f"{'='*70}\n")
# Set seed immediately on program startup
set_seed(RANDOM_SEED)
# PEFT related imports (LoRA and AdaLoRA)
try:
from peft import (
LoraConfig,
AdaLoraConfig,
get_peft_model,
TaskType,
PeftModel
)
PEFT_AVAILABLE = True
except ImportError:
PEFT_AVAILABLE = False
print("⚠️ PEFT not installed, LoRA and AdaLoRA features will be unavailable")
# Check GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_MODEL_PATH = None
LAST_TOKENIZER = None
LAST_TUNING_METHOD = None
# ==================== Your Original Functions - Completely Unchanged ====================
def evaluate_baseline_bert(eval_dataset, df_clean):
"""
Evaluate original BERT (never seen the data)
This part is extracted from your cell 5 baseline comparison logic
"""
print("\n" + "=" * 80)
print("Evaluating Baseline Pure BERT (Never Seen Data)")
print("=" * 80)
# Load pure BERT
baseline_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
baseline_model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased",
num_labels=2
).to(device)
baseline_model.eval()
print(" ⚠️ This model has not been trained with your data at all")
# Reprocess validation set
baseline_dataset = Dataset.from_pandas(df_clean[['text', 'label']])
def baseline_preprocess(examples):
return baseline_tokenizer(examples['text'], truncation=True, padding='max_length', max_length=256)
baseline_tokenized = baseline_dataset.map(baseline_preprocess, batched=True)
baseline_split = baseline_tokenized.train_test_split(test_size=0.2, seed=42)
baseline_eval_dataset = baseline_split['test']
# Create Baseline Trainer
baseline_trainer_args = TrainingArguments(
output_dir='./temp_baseline',
per_device_eval_batch_size=32,
report_to="none"
)
baseline_trainer = Trainer(
model=baseline_model,
args=baseline_trainer_args,
)
# Evaluate Baseline
print("📄 Evaluating pure BERT...")
predictions_output = baseline_trainer.predict(baseline_eval_dataset)
all_preds = predictions_output.predictions.argmax(-1)
all_labels = predictions_output.label_ids
probs = torch.nn.functional.softmax(torch.tensor(predictions_output.predictions), dim=-1)[:, 1].numpy()
# Calculate metrics
precision, recall, f1, _ = precision_recall_fscore_support(
all_labels, all_preds, average='binary', pos_label=1, zero_division=0
)
acc = accuracy_score(all_labels, all_preds)
try:
auc = roc_auc_score(all_labels, probs)
except:
auc = 0.0
cm = confusion_matrix(all_labels, all_preds)
if cm.shape == (2, 2):
tn, fp, fn, tp = cm.ravel()
sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
specificity = tn / (tn + fp) if (tn + fp) > 0 else 0
else:
sensitivity = specificity = 0
tn = fp = fn = tp = 0
baseline_results = {
'f1': float(f1),
'accuracy': float(acc),
'precision': float(precision),
'recall': float(recall),
'sensitivity': float(sensitivity),
'specificity': float(specificity),
'auc': float(auc),
'tp': int(tp),
'tn': int(tn),
'fp': int(fp),
'fn': int(fn)
}
print("✅ Baseline evaluation complete")
# Cleanup
del baseline_model
del baseline_trainer
torch.cuda.empty_cache()
gc.collect()
return baseline_results
def run_original_code_with_tuning(
file_path,
weight_multiplier,
epochs,
batch_size,
learning_rate,
warmup_steps,
tuning_method,
best_metric,
# LoRA parameters
lora_r,
lora_alpha,
lora_dropout,
lora_modules,
# AdaLoRA parameters
adalora_init_r,
adalora_target_r,
adalora_tinit,
adalora_tfinal,
adalora_delta_t,
# New: whether this is second fine-tuning
is_second_finetuning=False,
base_model_path=None
):
"""
Your original code + different tuning methods + Baseline comparison
Core logic unchanged, just added conditional logic in model initialization
New parameters:
- is_second_finetuning: whether this is second fine-tuning
- base_model_path: path to first fine-tuning model (only used for second fine-tuning)
"""
# ⭐⭐⭐ Re-set random seed before training to ensure reproducibility ⭐⭐⭐
print("\n" + "="*80)
print("🔄 Re-confirming random seed before training...")
print("="*80)
set_seed(RANDOM_SEED)
global LAST_MODEL_PATH, LAST_TOKENIZER, LAST_TUNING_METHOD
# ==================== Clear memory (before training) ====================
torch.cuda.empty_cache()
gc.collect()
print("🧹 Memory cleared")
# ==================== Your original code starts here ====================
# Read uploaded file
df_original = pd.read_csv(file_path)
df_clean = pd.DataFrame({
'text': df_original['Text'],
'label': df_original['label']
})
df_clean = df_clean.dropna()
training_type = "Second Fine-tuning" if is_second_finetuning else "First Fine-tuning"
print("\n" + "=" * 80)
print(f"Breast Cancer Survival Prediction BERT {training_type} - {tuning_method} Method")
print("=" * 80)
print(f"Start Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"Training Type: {training_type}")
print(f"Fine-tuning Method: {tuning_method}")
print(f"Optimization Metric: {best_metric}")
if is_second_finetuning:
print(f"Base Model: {base_model_path}")
print("=" * 80)
# Load Tokenizer
print("\n📦 Loading BERT Tokenizer...")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
print("✅ Tokenizer loaded")
# Evaluation function - completely your original code, unchanged
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
probs = torch.nn.functional.softmax(torch.tensor(pred.predictions), dim=-1)[:, 1].numpy()
precision, recall, f1, _ = precision_recall_fscore_support(
labels, preds, average='binary', pos_label=1, zero_division=0
)
acc = accuracy_score(labels, preds)
try:
auc = roc_auc_score(labels, probs)
except:
auc = 0.0
cm = confusion_matrix(labels, preds)
if cm.shape == (2, 2):
tn, fp, fn, tp = cm.ravel()
else:
if len(np.unique(preds)) == 1:
if preds[0] == 0:
tn, fp, fn, tp = sum(labels == 0), 0, sum(labels == 1), 0
else:
tn, fp, fn, tp = 0, sum(labels == 0), 0, sum(labels == 1)
else:
tn = fp = fn = tp = 0
sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
specificity = tn / (tn + fp) if (tn + fp) > 0 else 0
return {
'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall,
'auc': auc, 'sensitivity': sensitivity, 'specificity': specificity,
'tp': int(tp), 'tn': int(tn), 'fp': int(fp), 'fn': int(fn)
}
# ============================================================================
# Step 1: Prepare data (no balancing) - Your original code
# ============================================================================
print("\n" + "=" * 80)
print("Step 1: Prepare data (maintain original ratio)")
print("=" * 80)
print(f"\nOriginal data distribution:")
print(f" Survival (0): {sum(df_clean['label']==0)} samples ({sum(df_clean['label']==0)/len(df_clean)*100:.1f}%)")
print(f" Death (1): {sum(df_clean['label']==1)} samples ({sum(df_clean['label']==1)/len(df_clean)*100:.1f}%)")
ratio = sum(df_clean['label']==0) / sum(df_clean['label']==1)
print(f" Imbalance ratio: {ratio:.1f}:1")
# ============================================================================
# Step 2: Tokenization - Your original code
# ============================================================================
print("\n" + "=" * 80)
print("Step 2: Tokenization")
print("=" * 80)
dataset = Dataset.from_pandas(df_clean[['text', 'label']])
def preprocess_function(examples):
return tokenizer(examples['text'], truncation=True, padding='max_length', max_length=256)
tokenized_dataset = dataset.map(preprocess_function, batched=True)
train_test_split = tokenized_dataset.train_test_split(test_size=0.2, seed=42)
train_dataset = train_test_split['train']
eval_dataset = train_test_split['test']
print(f"\n✅ Dataset preparation complete:")
print(f" Training set: {len(train_dataset)} samples")
print(f" Validation set: {len(eval_dataset)} samples")
# ============================================================================
# Step 3: Set weights - Your original code
# ============================================================================
print("\n" + "=" * 80)
print(f"Step 3: Set class weights({weight_multiplier}x multiplier)")
print("=" * 80)
weight_0 = 1.0
weight_1 = ratio * weight_multiplier
print(f"\nWeight configuration:")
print(f" Multiplier: {weight_multiplier}x")
print(f" Survival class weight: {weight_0:.3f}")
print(f" Death class weight: {weight_1:.3f} (= {ratio:.1f} × {weight_multiplier})")
class_weights = torch.tensor([weight_0, weight_1], dtype=torch.float).to(device)
# ============================================================================
# Step 4: Trainingmodel - Add Second Fine-tuning logic here
# ============================================================================
print("\n" + "=" * 80)
print(f"Step 4: Training {tuning_method} BERT model ({training_type})")
print("=" * 80)
print(f"\n🔄 Initializing model ({tuning_method})...")
# 【New】Second Fine-tuning: Load First Fine-tuning model
if is_second_finetuning and base_model_path:
print(f"📦 Loading First Fine-tuning model: {base_model_path}")
# Reading first model info
with open('./saved_models_list.json', 'r') as f:
models_list = json.load(f)
base_model_info = None
for model_info in models_list:
if model_info['model_path'] == base_model_path:
base_model_info = model_info
break
if base_model_info is None:
raise ValueError(f"Cannot find base model info: {base_model_path}")
base_tuning_method = base_model_info['tuning_method']
print(f" First Fine-tuningMethod: {base_tuning_method}")
# Loading model based on first method
if base_tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE:
# Loading PEFT model
base_bert = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
model = PeftModel.from_pretrained(base_bert, base_model_path)
print(f" ✅ Loaded {base_tuning_method} model")
else:
# Loading regular model
model = BertForSequenceClassification.from_pretrained(base_model_path, num_labels=2)
print(f" ✅ Loaded Full Fine-tuning model")
model = model.to(device)
print(f" ⚠️ Note: Second Fine-tuning will use the same method as first fine-tuning ({base_tuning_method})")
# Second Fine-tuningForce use of same method during
tuning_method = base_tuning_method
else:
# 【Original Logic】First Fine-tuning: Start from pure BERT
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", num_labels=2, problem_type="single_label_classification"
)
# Setup model based on selected fine-tuning method
if tuning_method == "Full Fine-tuning":
# Your original method - unchanged
model = model.to(device)
print("✅ Using Full Fine-tuning (all parameters trainable)")
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
all_params = sum(p.numel() for p in model.parameters())
print(f" Trainable parameters: {trainable_params:,} / {all_params:,} ({100 * trainable_params / all_params:.2f}%)")
elif tuning_method == "LoRA" and PEFT_AVAILABLE:
# LoRA configuration
target_modules = lora_modules.split(",") if lora_modules else ["query", "value"]
target_modules = [m.strip() for m in target_modules]
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
r=int(lora_r),
lora_alpha=int(lora_alpha),
lora_dropout=float(lora_dropout),
target_modules=target_modules
)
model = get_peft_model(model, peft_config)
model = model.to(device)
print("✅ Using LoRA fine-tuning")
print(f" LoRA rank (r): {lora_r}")
print(f" LoRA alpha: {lora_alpha}")
print(f" LoRA dropout: {lora_dropout}")
print(f" Target Modules: {target_modules}")
model.print_trainable_parameters()
elif tuning_method == "AdaLoRA" and PEFT_AVAILABLE:
# AdaLoRA configuration
target_modules = lora_modules.split(",") if lora_modules else ["query", "value"]
target_modules = [m.strip() for m in target_modules]
peft_config = AdaLoraConfig(
task_type=TaskType.SEQ_CLS,
init_r=int(adalora_init_r),
target_r=int(adalora_target_r),
tinit=int(adalora_tinit),
tfinal=int(adalora_tfinal),
deltaT=int(adalora_delta_t),
lora_alpha=int(lora_alpha),
lora_dropout=float(lora_dropout),
target_modules=target_modules
)
model = get_peft_model(model, peft_config)
model = model.to(device)
print("✅ Using AdaLoRA fine-tuning")
print(f" Initial rank: {adalora_init_r}")
print(f" Target rank: {adalora_target_r}")
print(f" Tinit: {adalora_tinit}, Tfinal: {adalora_tfinal}, DeltaT: {adalora_delta_t}")
model.print_trainable_parameters()
else:
# Default to Full Fine-tuning
model = model.to(device)
print("⚠️ PEFT not installed or invalid method, using Full Fine-tuning")
# Custom Trainer (using weights) - Your original code
class WeightedTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
labels = inputs.pop("labels")
outputs = model(**inputs)
loss_fct = nn.CrossEntropyLoss(weight=class_weights)
loss = loss_fct(outputs.logits.view(-1, 2), labels.view(-1))
return (loss, outputs) if return_outputs else loss
# Training configuration - adjusted based on selected metric
metric_map = {
"f1": "f1",
"accuracy": "accuracy",
"precision": "precision",
"recall": "recall",
"sensitivity": "sensitivity",
"specificity": "specificity",
"auc": "auc"
}
training_args = TrainingArguments(
output_dir='./results_weight',
num_train_epochs=epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size*2,
warmup_steps=warmup_steps,
weight_decay=0.01,
learning_rate=learning_rate,
logging_steps=50,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
metric_for_best_model=metric_map.get(best_metric, "f1"),
report_to="none",
greater_is_better=True,
seed=RANDOM_SEED, # ⭐ Add random seed
data_seed=RANDOM_SEED, # ⭐ Data loading seed
dataloader_num_workers=0 # ⭐ Single thread to ensure reproducibility
)
trainer = WeightedTrainer(
model=model, args=training_args,
train_dataset=train_dataset, eval_dataset=eval_dataset,
compute_metrics=compute_metrics
)
print(f"\n🚀 Starting training ({epochs} epochs)...")
print(f" Optimization Metric: {best_metric}")
print("-" * 80)
trainer.train()
print("\n✅ model training complete!")
# Evaluating model
print("\n📊 Evaluating model...")
results = trainer.evaluate()
print(f"\n{training_type} {tuning_method} BERT ({weight_multiplier}x weights) Performance:")
print(f" F1 Score: {results['eval_f1']:.4f}")
print(f" Accuracy: {results['eval_accuracy']:.4f}")
print(f" Precision: {results['eval_precision']:.4f}")
print(f" Recall: {results['eval_recall']:.4f}")
print(f" Sensitivity: {results['eval_sensitivity']:.4f}")
print(f" Specificity: {results['eval_specificity']:.4f}")
print(f" AUC: {results['eval_auc']:.4f}")
print(f" Confusion Matrix: Tp={results['eval_tp']}, Tn={results['eval_tn']}, "
f"Fp={results['eval_fp']}, Fn={results['eval_fn']}")
# ============================================================================
# 步驟 5:Baseline comparison(Pure BERT) - onlyFirst Fine-tuningduringexecution
# ============================================================================
if not is_second_finetuning:
print("\n" + "=" * 80)
print("Step 5: Baseline comparison - Pure BERT (never seen data)")
print("=" * 80)
baseline_results = evaluate_baseline_bert(eval_dataset, df_clean)
# ============================================================================
# 步驟 6:comparisonresults
# ============================================================================
print("\n" + "=" * 80)
print(f"📊 【Comparison Results】Pure BERT vs {tuning_method} BERT")
print("=" * 80)
print("\n📋 Detailed Comparison Table:")
print("-" * 100)
print(f"{'metric':<15} {'Pure BERT':<20} {tuning_method:<20} {'Improvement':<20}")
print("-" * 100)
metrics_to_compare = [
('F1 Score', 'f1', 'eval_f1'),
('Accuracy', 'accuracy', 'eval_accuracy'),
('Precision', 'precision', 'eval_precision'),
('Recall', 'recall', 'eval_recall'),
('Sensitivity', 'sensitivity', 'eval_sensitivity'),
('Specificity', 'specificity', 'eval_specificity'),
('AUC', 'auc', 'eval_auc')
]
for name, baseline_key, finetuned_key in metrics_to_compare:
baseline_val = baseline_results[baseline_key]
finetuned_val = results[finetuned_key]
improvement = ((finetuned_val - baseline_val) / baseline_val * 100) if baseline_val > 0 else 0
print(f"{name:<15} {baseline_val:<20.4f} {finetuned_val:<20.4f} {improvement:>+18.1f}%")
print("-" * 100)
else:
baseline_results = None
# Savingmodel
training_label = "second" if is_second_finetuning else "first"
save_dir = f'./breast_cancer_bert_{tuning_method.lower().replace(" ", "_")}_{training_label}_{datetime.now().strftime("%Y%m%d_%H%M%S")}'
if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE:
# PEFT modelSaving方式
model.save_pretrained(save_dir)
tokenizer.save_pretrained(save_dir)
else:
# regularmodelSaving方式
model.save_pretrained(save_dir)
tokenizer.save_pretrained(save_dir)
# Savingmodelinformationto JSON 檔案(用at預測pageSelect)
model_info = {
'model_path': save_dir,
'tuning_method': tuning_method,
'training_type': training_type,
'best_metric': best_metric,
'best_metric_value': float(results[f'eval_{metric_map.get(best_metric, "f1")}']),
'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'weight_multiplier': weight_multiplier,
'epochs': epochs,
'is_second_finetuning': is_second_finetuning,
'base_model_path': base_model_path if is_second_finetuning else None
}
# Readingexistingmodellist
models_list_file = './saved_models_list.json'
if os.path.exists(models_list_file):
with open(models_list_file, 'r') as f:
models_list = json.load(f)
else:
models_list = []
# Addnewmodelinformation
models_list.append(model_info)
# Saving更newafterlist
with open(models_list_file, 'w') as f:
json.dump(models_list, f, indent=2)
# Savingto global variable for predictionUsing
LAST_MODEL_PATH = save_dir
LAST_TOKENIZER = tokenizer
LAST_TUNING_METHOD = tuning_method
print(f"\n💾 modelalreadysaved to: {save_dir}")
print("\n" + "=" * 80)
print("🎉 Training complete!")
print("=" * 80)
print(f"Completion time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
# ==================== Clear Memory (After Training) ====================
del model
del trainer
torch.cuda.empty_cache()
gc.collect()
print("🧹 Post-training memory cleared")
# Add all info to results
results['tuning_method'] = tuning_method
results['training_type'] = training_type
results['best_metric'] = best_metric
results['best_metric_value'] = results[f'eval_{metric_map.get(best_metric, "f1")}']
results['baseline_results'] = baseline_results
results['model_path'] = save_dir
results['is_second_finetuning'] = is_second_finetuning
return results
# ==================== New: Test on New Data Function ====================
def test_on_new_data(test_file_path, baseline_model_path, first_model_path, second_model_path):
"""
innewtestdataupcomparisonthreemodelperformance:
1. Pure BERT (baseline)
2. First Fine-tuningmodel
3. Second Fine-tuningmodel
"""
print("\n" + "=" * 80)
print("📊 newdatatest - threemodelcomparison")
print("=" * 80)
# Loading test data
df_test = pd.read_csv(test_file_path)
df_clean = pd.DataFrame({
'text': df_test['Text'],
'label': df_test['label']
})
df_clean = df_clean.dropna()
print(f"\nTest data:")
print(f" Total samples: {len(df_clean)}")
print(f" Survival (0): {sum(df_clean['label']==0)} samples")
print(f" Death (1): {sum(df_clean['label']==1)} samples")
# Preparing test data
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
test_dataset = Dataset.from_pandas(df_clean[['text', 'label']])
def preprocess_function(examples):
return tokenizer(examples['text'], truncation=True, padding='max_length', max_length=256)
test_tokenized = test_dataset.map(preprocess_function, batched=True)
# Evaluation function
def evaluate_model(model, dataset_name):
model.eval()
trainer_args = TrainingArguments(
output_dir='./temp_test',
per_device_eval_batch_size=32,
report_to="none"
)
trainer = Trainer(
model=model,
args=trainer_args,
)
predictions_output = trainer.predict(test_tokenized)
all_preds = predictions_output.predictions.argmax(-1)
all_labels = predictions_output.label_ids
probs = torch.nn.functional.softmax(torch.tensor(predictions_output.predictions), dim=-1)[:, 1].numpy()
precision, recall, f1, _ = precision_recall_fscore_support(
all_labels, all_preds, average='binary', pos_label=1, zero_division=0
)
acc = accuracy_score(all_labels, all_preds)
try:
auc = roc_auc_score(all_labels, probs)
except:
auc = 0.0
cm = confusion_matrix(all_labels, all_preds)
if cm.shape == (2, 2):
tn, fp, fn, tp = cm.ravel()
sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
specificity = tn / (tn + fp) if (tn + fp) > 0 else 0
else:
sensitivity = specificity = 0
tn = fp = fn = tp = 0
results = {
'f1': float(f1),
'accuracy': float(acc),
'precision': float(precision),
'recall': float(recall),
'sensitivity': float(sensitivity),
'specificity': float(specificity),
'auc': float(auc),
'tp': int(tp),
'tn': int(tn),
'fp': int(fp),
'fn': int(fn)
}
print(f"\n✅ Evaluation complete")
del trainer
torch.cuda.empty_cache()
gc.collect()
return results
all_results = {}
# 1. Evaluate Pure BERT
if baseline_model_path != "Skip":
print("\n" + "-" * 80)
print("1️⃣ Evaluate Pure BERT (Baseline)")
print("-" * 80)
baseline_model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased",
num_labels=2
).to(device)
all_results['baseline'] = evaluate_model(baseline_model, "Pure BERT")
del baseline_model
torch.cuda.empty_cache()
else:
all_results['baseline'] = None
# 2. evaluationFirst Fine-tuningmodel
if first_model_path != "Please Select":
print("\n" + "-" * 80)
print("2️⃣ evaluationFirst Fine-tuningmodel")
print("-" * 80)
# Reading model info
with open('./saved_models_list.json', 'r') as f:
models_list = json.load(f)
first_model_info = None
for model_info in models_list:
if model_info['model_path'] == first_model_path:
first_model_info = model_info
break
if first_model_info:
tuning_method = first_model_info['tuning_method']
if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE:
base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
first_model = PeftModel.from_pretrained(base_model, first_model_path)
first_model = first_model.to(device)
else:
first_model = BertForSequenceClassification.from_pretrained(first_model_path).to(device)
all_results['first'] = evaluate_model(first_model, "First Fine-tuningmodel")
del first_model
torch.cuda.empty_cache()
else:
all_results['first'] = None
else:
all_results['first'] = None
# 3. evaluationSecond Fine-tuningmodel
if second_model_path != "Please Select":
print("\n" + "-" * 80)
print("3️⃣ evaluationSecond Fine-tuningmodel")
print("-" * 80)
# Reading model info
with open('./saved_models_list.json', 'r') as f:
models_list = json.load(f)
second_model_info = None
for model_info in models_list:
if model_info['model_path'] == second_model_path:
second_model_info = model_info
break
if second_model_info:
tuning_method = second_model_info['tuning_method']
if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE:
base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
second_model = PeftModel.from_pretrained(base_model, second_model_path)
second_model = second_model.to(device)
else:
second_model = BertForSequenceClassification.from_pretrained(second_model_path).to(device)
all_results['second'] = evaluate_model(second_model, "Second Fine-tuningmodel")
del second_model
torch.cuda.empty_cache()
else:
all_results['second'] = None
else:
all_results['second'] = None
print("\n" + "=" * 80)
print("✅ New data test complete")
print("=" * 80)
return all_results
# ==================== 預測function(keep as is) ====================
def predict_text(model_choice, text_input):
"""
預測功能 - 支持Selectalreadytrainingmodel,andsameduring顯示未fine-tuningandfine-tuning預測results
"""
if not text_input or text_input.strip() == "":
return "Please input text", "Please input text"
try:
# ==================== 未fine-tuning BERT 預測 ====================
print("\nUsingNon-finetuned BERT 預測...")
baseline_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
baseline_model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased",
num_labels=2
).to(device)
baseline_model.eval()
# Tokenize Input(未fine-tuning)
baseline_inputs = baseline_tokenizer(
text_input,
truncation=True,
padding='max_length',
max_length=256,
return_tensors='pt'
).to(device)
# 預測(未fine-tuning)
with torch.no_grad():
baseline_outputs = baseline_model(**baseline_inputs)
baseline_probs = torch.nn.functional.softmax(baseline_outputs.logits, dim=-1)
baseline_pred_class = baseline_probs.argmax(-1).item()
baseline_confidence = baseline_probs[0][baseline_pred_class].item()
baseline_result = "Survival" if baseline_pred_class == 0 else "Death"
baseline_prob_survive = baseline_probs[0][0].item()
baseline_prob_death = baseline_probs[0][1].item()
baseline_output = f"""
# 🔵 Non-finetuned BERT 預測results
## Prediction class: **{baseline_result}**
## Confidence: **{baseline_confidence:.1%}**
## Probability distribution:
- 🟢 **Survival Probability**: {baseline_prob_survive:.2%}
- 🔴 **Death Probability**: {baseline_prob_death:.2%}
---
**Note**: This is the original BERT model, not trained on any domain-specific data.
"""
# Clearing memory
del baseline_model
del baseline_tokenizer
torch.cuda.empty_cache()
# ==================== fine-tuningafter BERT 預測 ====================
if model_choice == "Please train model first":
finetuned_output = """
# 🟢 Fine-tuned BERT Prediction Results
❌ No model trained yet. Please train a model first in the training page.
"""
return baseline_output, finetuned_output
# 解析SelectmodelPath
model_path = model_choice.split(" | ")[0].replace("Path: ", "")
# from JSON Reading model info
with open('./saved_models_list.json', 'r') as f:
models_list = json.load(f)
selected_model_info = None
for model_info in models_list:
if model_info['model_path'] == model_path:
selected_model_info = model_info
break
if selected_model_info is None:
finetuned_output = f"""
# 🟢 Fine-tuned BERT 預測results
❌ Cannot find model: {model_path}
"""
return baseline_output, finetuned_output
print(f"\nUsingfine-tuningmodel: {model_path}")
# Loading tokenizer
finetuned_tokenizer = BertTokenizer.from_pretrained(model_path)
# Loadingmodel
tuning_method = selected_model_info['tuning_method']
if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE:
# Loading PEFT model
base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
finetuned_model = PeftModel.from_pretrained(base_model, model_path)
finetuned_model = finetuned_model.to(device)
else:
# Loading regular model
finetuned_model = BertForSequenceClassification.from_pretrained(model_path).to(device)
finetuned_model.eval()
# Tokenize Input(fine-tuning)
finetuned_inputs = finetuned_tokenizer(
text_input,
truncation=True,
padding='max_length',
max_length=256,
return_tensors='pt'
).to(device)
# 預測(fine-tuning)
with torch.no_grad():
finetuned_outputs = finetuned_model(**finetuned_inputs)
finetuned_probs = torch.nn.functional.softmax(finetuned_outputs.logits, dim=-1)
finetuned_pred_class = finetuned_probs.argmax(-1).item()
finetuned_confidence = finetuned_probs[0][finetuned_pred_class].item()
finetuned_result = "Survival" if finetuned_pred_class == 0 else "Death"
finetuned_prob_survive = finetuned_probs[0][0].item()
finetuned_prob_death = finetuned_probs[0][1].item()
training_type_label = "Second Fine-tuning" if selected_model_info.get('is_second_finetuning', False) else "First Fine-tuning"
finetuned_output = f"""
# 🟢 Fine-tuned BERT 預測results
## Prediction class: **{finetuned_result}**
## Confidence: **{finetuned_confidence:.1%}**
## Probability distribution:
- 🟢 **Survival機率**: {finetuned_prob_survive:.2%}
- 🔴 **Death機率**: {finetuned_prob_death:.2%}
---
### modelinformation:
- **Training Type**: {training_type_label}
- **fine-tuningMethod**: {selected_model_info['tuning_method']}
- **mostoptimization metric**: {selected_model_info['best_metric']}
- **trainingduringbetween**: {selected_model_info['timestamp']}
- **modelPath**: {model_path}
---
**Note**: This prediction is for reference only. Actual medical decisions should be made by professional physicians.
"""
# Clearing memory
del finetuned_model
del finetuned_tokenizer
torch.cuda.empty_cache()
return baseline_output, finetuned_output
except Exception as e:
import traceback
error_msg = f"❌ Prediction error: {str(e)}\n\nDetailed error message:\n{traceback.format_exc()}"
return error_msg, error_msg
def get_available_models():
"""
Getallalreadytrainingmodellist
"""
models_list_file = './saved_models_list.json'
if not os.path.exists(models_list_file):
return ["請先trainingmodel"]
with open(models_list_file, 'r') as f:
models_list = json.load(f)
if len(models_list) == 0:
return ["請先trainingmodel"]
# 格式化modeloption
model_choices = []
for i, model_info in enumerate(models_list, 1):
training_type = model_info.get('training_type', 'First Fine-tuning')
choice = f"Path: {model_info['model_path']} | Type: {training_type} | Method: {model_info['tuning_method']} | Trained at: {model_info['timestamp']}"
model_choices.append(choice)
return model_choices
def get_first_finetuning_models():
"""
GetallFirst Fine-tuningmodel(用atSecond Fine-tuningSelect)
"""
models_list_file = './saved_models_list.json'
if not os.path.exists(models_list_file):
return ["請先performFirst Fine-tuning"]
with open(models_list_file, 'r') as f:
models_list = json.load(f)
# only returnFirst Fine-tuningmodel
first_models = [m for m in models_list if not m.get('is_second_finetuning', False)]
if len(first_models) == 0:
return ["請先performFirst Fine-tuning"]
model_choices = []
for model_info in first_models:
choice = f"{model_info['model_path']}"
model_choices.append(choice)
return model_choices
# ==================== Wrapper function ====================
def train_first_wrapper(
file, tuning_method, weight_mult, epochs, batch_size, lr, warmup, best_metric,
lora_r, lora_alpha, lora_dropout, lora_modules,
adalora_init_r, adalora_target_r, adalora_tinit, adalora_tfinal, adalora_delta_t
):
"""First Fine-tuning wrapper function"""
if file is None:
return "Please upload CSV file", "", ""
try:
results = run_original_code_with_tuning(
file_path=file.name,
weight_multiplier=weight_mult,
epochs=int(epochs),
batch_size=int(batch_size),
learning_rate=lr,
warmup_steps=int(warmup),
tuning_method=tuning_method,
best_metric=best_metric,
lora_r=lora_r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
lora_modules=lora_modules,
adalora_init_r=adalora_init_r,
adalora_target_r=adalora_target_r,
adalora_tinit=adalora_tinit,
adalora_tfinal=adalora_tfinal,
adalora_delta_t=adalora_delta_t,
is_second_finetuning=False
)
baseline_results = results['baseline_results']
# 格式化output
data_info = f"""
# 📊 datainformation (First Fine-tuning)
## 🔧 trainingconfigurationplace
- **fine-tuningMethod**: {results['tuning_method']}
- **mostoptimization metric**: {results['best_metric']}
- **mostbest metric value**: {results['best_metric_value']:.4f}
## ⚙️ trainingparameters
- **Weight Multiplier**: {weight_mult}x
- **Training Epochs**: {epochs}
- **Batch Size**: {batch_size}
- **Learning Rate**: {lr}
- **Warmup Steps**: {warmup}
✅ First Fine-tuning complete! You can now perform second fine-tuning or prediction!
"""
baseline_output = f"""
# 🔵 Pure BERT (Baseline)
### 📈 evaluationmetric
| metric | 數值 |
|------|------|
| **F1 Score** | {baseline_results['f1']:.4f} |
| **Accuracy** | {baseline_results['accuracy']:.4f} |
| **Precision** | {baseline_results['precision']:.4f} |
| **Recall** | {baseline_results['recall']:.4f} |
| **Sensitivity** | {baseline_results['sensitivity']:.4f} |
| **Specificity** | {baseline_results['specificity']:.4f} |
| **AUC** | {baseline_results['auc']:.4f} |
### 📈 Confusion Matrix
| | 預測:Survival | 預測:Death |
|---|-----------|-----------|
| **actual:Survival** | TN={baseline_results['tn']} | FP={baseline_results['fp']} |
| **actual:Death** | FN={baseline_results['fn']} | TP={baseline_results['tp']} |
"""
finetuned_output = f"""
# 🟢 First Fine-tuning BERT
### 📈 evaluationmetric
| Metric | Value |
|------|------|
| **F1 Score** | {results['eval_f1']:.4f} |
| **Accuracy** | {results['eval_accuracy']:.4f} |
| **Precision** | {results['eval_precision']:.4f} |
| **Recall** | {results['eval_recall']:.4f} |
| **Sensitivity** | {results['eval_sensitivity']:.4f} |
| **Specificity** | {results['eval_specificity']:.4f} |
| **AUC** | {results['eval_auc']:.4f} |
### 📈 Confusion Matrix
| | Predicted: Survival | Predicted: Death |
|---|-----------|-----------|
| **actual:Survival** | TN={results['eval_tn']} | FP={results['eval_fp']} |
| **actual:Death** | FN={results['eval_fn']} | TP={results['eval_tp']} |
"""
return data_info, baseline_output, finetuned_output
except Exception as e:
import traceback
error_msg = f"❌ error:{str(e)}\n\ndetailederror訊息:\n{traceback.format_exc()}"
return error_msg, "", ""
def train_second_wrapper(
base_model_choice, file, weight_mult, epochs, batch_size, lr, warmup, best_metric
):
"""Second Fine-tuning wrapper function"""
if base_model_choice == "Please perform first fine-tuning first":
return "Please train a model in the 'First Fine-tuning' page first", ""
if file is None:
return "Please upload new training data CSV file", ""
try:
# 解析basemodelPath
base_model_path = base_model_choice
# Reading first model info
with open('./saved_models_list.json', 'r') as f:
models_list = json.load(f)
base_model_info = None
for model_info in models_list:
if model_info['model_path'] == base_model_path:
base_model_info = model_info
break
if base_model_info is None:
return "Cannot find base model information", ""
# Usingfirstparameters(Second Fine-tuningcannot changeMethod)
tuning_method = base_model_info['tuning_method']
# 獲取first PEFT parameters
lora_r = 16
lora_alpha = 32
lora_dropout = 0.1
lora_modules = "query,value"
adalora_init_r = 12
adalora_target_r = 8
adalora_tinit = 0
adalora_tfinal = 0
adalora_delta_t = 1
results = run_original_code_with_tuning(
file_path=file.name,
weight_multiplier=weight_mult,
epochs=int(epochs),
batch_size=int(batch_size),
learning_rate=lr,
warmup_steps=int(warmup),
tuning_method=tuning_method,
best_metric=best_metric,
lora_r=lora_r,
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
lora_modules=lora_modules,
adalora_init_r=adalora_init_r,
adalora_target_r=adalora_target_r,
adalora_tinit=adalora_tinit,
adalora_tfinal=adalora_tfinal,
adalora_delta_t=adalora_delta_t,
is_second_finetuning=True,
base_model_path=base_model_path
)
data_info = f"""
# 📊 Second Fine-tuningresults
## 🔧 trainingconfigurationplace
- **basemodel**: {base_model_path}
- **Fine-tuning Method**: {results['tuning_method']} (inherited from first)
- **mostoptimization metric**: {results['best_metric']}
- **mostbest metric value**: {results['best_metric_value']:.4f}
## ⚙️ trainingparameters
- **Weight Multiplier**: {weight_mult}x
- **Training Epochs**: {epochs}
- **Batch Size**: {batch_size}
- **Learning Rate**: {lr}
- **Warmup Steps**: {warmup}
✅ Second Fine-tuningcomplete!can make predictionsornewdatatest!
"""
finetuned_output = f"""
# 🟢 Second Fine-tuning BERT
### 📈 evaluationmetric
| metric | 數值 |
|------|------|
| **F1 Score** | {results['eval_f1']:.4f} |
| **Accuracy** | {results['eval_accuracy']:.4f} |
| **Precision** | {results['eval_precision']:.4f} |
| **Recall** | {results['eval_recall']:.4f} |
| **Sensitivity** | {results['eval_sensitivity']:.4f} |
| **Specificity** | {results['eval_specificity']:.4f} |
| **AUC** | {results['eval_auc']:.4f} |
### 📈 Confusion Matrix
| | 預測:Survival | 預測:Death |
|---|-----------|-----------|
| **actual:Survival** | TN={results['eval_tn']} | FP={results['eval_fp']} |
| **actual:Death** | FN={results['eval_fn']} | TP={results['eval_tp']} |
"""
return data_info, finetuned_output
except Exception as e:
import traceback
error_msg = f"❌ error:{str(e)}\n\ndetailederror訊息:\n{traceback.format_exc()}"
return error_msg, ""
def test_new_data_wrapper(test_file, baseline_choice, first_choice, second_choice):
"""Wrapper function for testing on new data"""
if test_file is None:
return "Please upload test data CSV file", "", ""
try:
all_results = test_on_new_data(
test_file.name,
baseline_choice,
first_choice,
second_choice
)
# 格式化output
outputs = []
# 1. Pure BERT
if all_results['baseline']:
r = all_results['baseline']
baseline_output = f"""
# 🔵 Pure BERT (Baseline)
| metric | 數值 |
|------|------|
| **F1 Score** | {r['f1']:.4f} |
| **Accuracy** | {r['accuracy']:.4f} |
| **Precision** | {r['precision']:.4f} |
| **Recall** | {r['recall']:.4f} |
| **Sensitivity** | {r['sensitivity']:.4f} |
| **Specificity** | {r['specificity']:.4f} |
| **AUC** | {r['auc']:.4f} |
### Confusion Matrix
| | 預測:Survival | 預測:Death |
|---|-----------|-----------|
| **actual:Survival** | TN={r['tn']} | FP={r['fp']} |
| **actual:Death** | FN={r['fn']} | TP={r['tp']} |
"""
else:
baseline_output = "Not selected to evaluate Pure BERT"
outputs.append(baseline_output)
# 2. First Fine-tuning
if all_results['first']:
r = all_results['first']
first_output = f"""
# 🟢 First Fine-tuningmodel
| metric | 數值 |
|------|------|
| **F1 Score** | {r['f1']:.4f} |
| **Accuracy** | {r['accuracy']:.4f} |
| **Precision** | {r['precision']:.4f} |
| **Recall** | {r['recall']:.4f} |
| **Sensitivity** | {r['sensitivity']:.4f} |
| **Specificity** | {r['specificity']:.4f} |
| **AUC** | {r['auc']:.4f} |
### Confusion Matrix
| | 預測:Survival | 預測:Death |
|---|-----------|-----------|
| **actual:Survival** | TN={r['tn']} | FP={r['fp']} |
| **actual:Death** | FN={r['fn']} | TP={r['tp']} |
"""
else:
first_output = "No first fine-tuning model selected"
outputs.append(first_output)
# 3. Second Fine-tuning
if all_results['second']:
r = all_results['second']
second_output = f"""
# 🟡 Second Fine-tuningmodel
| metric | 數值 |
|------|------|
| **F1 Score** | {r['f1']:.4f} |
| **Accuracy** | {r['accuracy']:.4f} |
| **Precision** | {r['precision']:.4f} |
| **Recall** | {r['recall']:.4f} |
| **Sensitivity** | {r['sensitivity']:.4f} |
| **Specificity** | {r['specificity']:.4f} |
| **AUC** | {r['auc']:.4f} |
### Confusion Matrix
| | 預測:Survival | 預測:Death |
|---|-----------|-----------|
| **actual:Survival** | TN={r['tn']} | FP={r['fp']} |
| **actual:Death** | FN={r['fn']} | TP={r['tp']} |
"""
else:
second_output = "No second fine-tuning model selected"
outputs.append(second_output)
return outputs[0], outputs[1], outputs[2]
except Exception as e:
import traceback
error_msg = f"❌ Error: {str(e)}\n\nDetailed error message:\n{traceback.format_exc()}"
return error_msg, "", ""
# ============================================================================
# Gradio interface
# ============================================================================
with gr.Blocks(title="BERT Second Fine-tuning Platform", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🥼 Smart Colon Health Chatbox
### 🌟 features:
- 🎯 First Fine-tuning:fromPure BERT Startingtraining
- 🔄 Second Fine-tuning: Continue training based on first model with new data
- 📊 Test on New Data: Compare three models' performance on new data
- 🔮 Prediction: Use trained models to make predictions
""")
# Tab 1: First Fine-tuning
with gr.Tab("1️⃣ First Fine-tuning"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 📤 Upload Training Data")
file_input_first = gr.File(
label="Upload Training Data CSV",
file_types=[".csv"],
file_count="single"
)
gr.Markdown("### 🔧 Select Fine-tuning Method")
tuning_method_first = gr.Radio(
choices=["Full Fine-tuning", "LoRA", "AdaLoRA"],
value="Full Fine-tuning",
label="Select Fine-tuning Method"
)
gr.Markdown("### 🎯 Select Optimization Metric")
best_metric_first = gr.Dropdown(
choices=["f1", "accuracy", "precision", "recall", "sensitivity", "specificity", "auc"],
value="f1",
label="Select Optimization Metric"
)
gr.Markdown("### ⚙️ Training Parameters")
weight_slider_first = gr.Slider(0.1, 2.0, value=0.8, step=0.1, label="Weight Multiplier")
epochs_input_first = gr.Number(value=3, label="Training Epochs")
batch_size_input_first = gr.Number(value=16, label="Batch Size")
lr_input_first = gr.Number(value=2e-5, label="Learning Rate")
warmup_input_first = gr.Number(value=200, label="Warmup Steps")
# LoRA parameters
with gr.Column(visible=False) as lora_params_first:
gr.Markdown("### 🔷 LoRA parameters")
lora_r_first = gr.Slider(4, 64, value=16, step=4, label="LoRA Rank (r)")
lora_alpha_first = gr.Slider(8, 128, value=32, step=8, label="LoRA Alpha")
lora_dropout_first = gr.Slider(0.0, 0.5, value=0.1, step=0.05, label="LoRA Dropout")
lora_modules_first = gr.Textbox(value="query,value", label="Target Modules")
# AdaLoRA parameters
with gr.Column(visible=False) as adalora_params_first:
gr.Markdown("### 🔶 AdaLoRA parameters")
adalora_init_r_first = gr.Slider(4, 64, value=12, step=4, label="Initial Rank")
adalora_target_r_first = gr.Slider(4, 64, value=8, step=4, label="Target Rank")
adalora_tinit_first = gr.Number(value=0, label="Tinit")
adalora_tfinal_first = gr.Number(value=0, label="Tfinal")
adalora_delta_t_first = gr.Number(value=1, label="Delta T")
train_button_first = gr.Button("🚀 Start First Fine-tuning", variant="primary", size="lg")
with gr.Column(scale=2):
gr.Markdown("### 📊 First Fine-tuning Results")
data_info_output_first = gr.Markdown(value="Waiting for training...")
with gr.Row():
baseline_output_first = gr.Markdown(value="### Pure BERT\nWaiting for training...")
finetuned_output_first = gr.Markdown(value="### First Fine-tuning\nWaiting for training...")
# Tab 2: Second Fine-tuning
with gr.Tab("2️⃣ Second Fine-tuning"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 🔄 Select Base Model")
base_model_dropdown = gr.Dropdown(
label="Select First Fine-tuning model",
choices=["Please perform first fine-tuning first"],
value="Please perform first fine-tuning first"
)
refresh_base_models = gr.Button("🔄 Refresh Model List", size="sm")
gr.Markdown("### 📤 Upload New Training Data")
file_input_second = gr.File(
label="Upload New Training Data CSV",
file_types=[".csv"],
file_count="single"
)
gr.Markdown("### ⚙️ Training Parameters")
gr.Markdown("⚠️ Fine-tuning method will automatically inherit from first fine-tuning")
best_metric_second = gr.Dropdown(
choices=["f1", "accuracy", "precision", "recall", "sensitivity", "specificity", "auc"],
value="f1",
label="Select Optimization Metric"
)
weight_slider_second = gr.Slider(0.1, 2.0, value=0.8, step=0.1, label="Weight Multiplier")
epochs_input_second = gr.Number(value=3, label="Training Epochs", info="Recommend less than first")
batch_size_input_second = gr.Number(value=16, label="Batch Size")
lr_input_second = gr.Number(value=1e-5, label="Learning Rate", info="Recommend smaller than first")
warmup_input_second = gr.Number(value=100, label="Warmup Steps")
train_button_second = gr.Button("🚀 Start Second Fine-tuning", variant="primary", size="lg")
with gr.Column(scale=2):
gr.Markdown("### 📊 Second Fine-tuning Results")
data_info_output_second = gr.Markdown(value="Waiting for training...")
finetuned_output_second = gr.Markdown(value="### Second Fine-tuning\nWaiting for training...")
# Tab 3: newdatatest
with gr.Tab("3️⃣ Test on New Data"):
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 📤 Upload Test Data")
test_file_input = gr.File(
label="Upload Test Data CSV",
file_types=[".csv"],
file_count="single"
)
gr.Markdown("### 🎯 Select Models to Compare")
gr.Markdown("Select 1-3 models for comparison")
baseline_test_choice = gr.Radio(
choices=["Evaluate Pure BERT", "Skip"],
value="Evaluate Pure BERT",
label="Pure BERT (Baseline)"
)
first_model_test_dropdown = gr.Dropdown(
label="First Fine-tuning Model",
choices=["Please Select"],
value="Please Select"
)
second_model_test_dropdown = gr.Dropdown(
label="Second Fine-tuning Model",
choices=["Please Select"],
value="Please Select"
)
refresh_test_models = gr.Button("🔄 Refresh Model List", size="sm")
test_button = gr.Button("📊 Start Testing", variant="primary", size="lg")
with gr.Column(scale=2):
gr.Markdown("### 📊 newdatatestresults - threemodelcomparison")
with gr.Row():
baseline_test_output = gr.Markdown(value="### Pure BERT\nWaiting for testing...")
first_test_output = gr.Markdown(value="### First Fine-tuning\nWaiting for testing...")
second_test_output = gr.Markdown(value="### Second Fine-tuning\nWaiting for testing...")
# Tab 4: 預測
with gr.Tab("4️⃣ Model Prediction"):
gr.Markdown("""
### Use Trained Model for Prediction
Select a trained model and input medical text for prediction.
""")
with gr.Row():
with gr.Column():
model_dropdown = gr.Dropdown(
label="Selectmodel",
choices=["Please train model first"],
value="Please train model first"
)
refresh_predict_models = gr.Button("🔄 Refresh Model List", size="sm")
text_input = gr.Textbox(
label="Input Medical Text",
placeholder="Please enter patient medical description (English)...",
lines=10
)
predict_button = gr.Button("🔮 Start Prediction", variant="primary", size="lg")
with gr.Column():
gr.Markdown("### Prediction Results Comparison")
baseline_prediction_output = gr.Markdown(label="Non-finetuned BERT", value="Waiting for prediction...")
finetuned_prediction_output = gr.Markdown(label="Fine-tuned BERT", value="Waiting for prediction...")
# Tab 5: Usingdescription
with gr.Tab("📖 Instructions"):
gr.Markdown("""
## 🔄 Second Fine-tuning step description
### Step 1: First Fine-tuning
1. Upload training data A (CSV format: Text, label)
2. Select fine-tuning method (Full Fine-tuning / LoRA / AdaLoRA)
3. Adjust training parameters
4. Start training
5. System will automatically compare Pure BERT vs First Fine-tuning performance
### Step 2: Second Fine-tuning
1. Select trained first fine-tuning model
2. Upload new training data B
3. Adjust training parameters (Recommend fewer epochs, smaller learning rate)
4. Start training (Method automatically inherited from first)
5. Model will continue learning based on first fine-tuning weights
### Step 3: Test on New Data
1. Upload test data C
2. Select models to compare (Pure BERT / First / Second)
3. System will display all three models' performance side by side
### Step 4: Prediction
1. Select any trained model
2. Input Medical Text
3. View prediction results
## ⚠️ Important Notes
- CSV format must contain `Text` and `label` columns
- Second fine-tuning will automatically use first fine-tuning method
- Recommend smaller learning rate for second fine-tuning to avoid catastrophic forgetting of learned knowledge
- New data test can evaluate up to 3 models simultaneously
## 📊 Metrics Explanation
- **F1 Score**: Balanced metric, considers both precision and recall
- **Accuracy**: Overall accuracy
- **Precision**: Accuracy of death predictions
- **Recall/Sensitivity**: Proportion of actual deaths correctly identified
- **Specificity**: Proportion of actual survivals correctly identified
- **AUC**: Area under ROC curve, overall classification ability
""")
# ==================== Event Bindings ====================
# First Fine-tuning - parameters面板顯示/隱藏
def update_first_params(method):
if method == "LoRA":
return gr.update(visible=True), gr.update(visible=False)
elif method == "AdaLoRA":
return gr.update(visible=True), gr.update(visible=True)
else:
return gr.update(visible=False), gr.update(visible=False)
tuning_method_first.change(
fn=update_first_params,
inputs=[tuning_method_first],
outputs=[lora_params_first, adalora_params_first]
)
# First Fine-tuningbutton
train_button_first.click(
fn=train_first_wrapper,
inputs=[
file_input_first, tuning_method_first, weight_slider_first,
epochs_input_first, batch_size_input_first, lr_input_first,
warmup_input_first, best_metric_first,
lora_r_first, lora_alpha_first, lora_dropout_first, lora_modules_first,
adalora_init_r_first, adalora_target_r_first, adalora_tinit_first,
adalora_tfinal_first, adalora_delta_t_first
],
outputs=[data_info_output_first, baseline_output_first, finetuned_output_first]
)
# refreshnewbasemodellist
def refresh_base_models_list():
choices = get_first_finetuning_models()
return gr.update(choices=choices, value=choices[0])
refresh_base_models.click(
fn=refresh_base_models_list,
outputs=[base_model_dropdown]
)
# Second Fine-tuningbutton
train_button_second.click(
fn=train_second_wrapper,
inputs=[
base_model_dropdown, file_input_second, weight_slider_second,
epochs_input_second, batch_size_input_second, lr_input_second,
warmup_input_second, best_metric_second
],
outputs=[data_info_output_second, finetuned_output_second]
)
# refreshnewtestmodellist
def refresh_test_models_list():
all_models = get_available_models()
first_models = get_first_finetuning_models()
# 篩選Second Fine-tuningmodel
with open('./saved_models_list.json', 'r') as f:
models_list = json.load(f)
second_models = [m['model_path'] for m in models_list if m.get('is_second_finetuning', False)]
if len(second_models) == 0:
second_models = ["Please Select"]
return (
gr.update(choices=first_models if first_models[0] != "Please perform first fine-tuning first" else ["Please Select"], value="Please Select"),
gr.update(choices=second_models, value="Please Select")
)
refresh_test_models.click(
fn=refresh_test_models_list,
outputs=[first_model_test_dropdown, second_model_test_dropdown]
)
# testbutton
test_button.click(
fn=test_new_data_wrapper,
inputs=[test_file_input, baseline_test_choice, first_model_test_dropdown, second_model_test_dropdown],
outputs=[baseline_test_output, first_test_output, second_test_output]
)
# refreshnew預測modellist
def refresh_predict_models_list():
choices = get_available_models()
return gr.update(choices=choices, value=choices[0])
refresh_predict_models.click(
fn=refresh_predict_models_list,
outputs=[model_dropdown]
)
# 預測button
predict_button.click(
fn=predict_text,
inputs=[model_dropdown, text_input],
outputs=[baseline_prediction_output, finetuned_prediction_output]
)
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)