Spaces:
Paused
Paused
| import gradio as gr | |
| import pandas as pd | |
| import torch | |
| from torch import nn | |
| from transformers import ( | |
| BertTokenizer, | |
| BertForSequenceClassification, | |
| TrainingArguments, | |
| Trainer | |
| ) | |
| from datasets import Dataset | |
| from sklearn.metrics import ( | |
| accuracy_score, | |
| precision_recall_fscore_support, | |
| roc_auc_score, | |
| confusion_matrix | |
| ) | |
| import numpy as np | |
| from datetime import datetime | |
| import json | |
| import os | |
| import gc | |
| import random | |
| # ==================== 🎲 Random Seed Setup ==================== | |
| RANDOM_SEED = 42 | |
| def set_seed(seed=42): | |
| """ | |
| ⭐ 設定所有隨機種子以確保results完全可重現 ⭐ | |
| """ | |
| print(f"\n{'='*70}") | |
| print(f"🎲 Setting random seed: {seed}") | |
| print(f"{'='*70}") | |
| random.seed(seed) | |
| np.random.seed(seed) | |
| torch.manual_seed(seed) | |
| torch.cuda.manual_seed(seed) | |
| torch.cuda.manual_seed_all(seed) | |
| torch.backends.cudnn.deterministic = True | |
| torch.backends.cudnn.benchmark = False | |
| os.environ['PYTHONHASHSEED'] = str(seed) | |
| os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' | |
| try: | |
| torch.use_deterministic_algorithms(True) | |
| except: | |
| pass | |
| print(f"✅ Random Seed Setupcomplete - results應該完全可重現") | |
| print(f" - Python random seed: {seed}") | |
| print(f" - NumPy seed: {seed}") | |
| print(f" - PyTorch seed: {seed}") | |
| print(f" - CUDA deterministic mode: ON") | |
| print(f"{'='*70}\n") | |
| # Set seed immediately on program startup | |
| set_seed(RANDOM_SEED) | |
| # PEFT related imports(LoRA and AdaLoRA) | |
| try: | |
| from peft import ( | |
| LoraConfig, | |
| AdaLoraConfig, | |
| get_peft_model, | |
| TaskType, | |
| PeftModel | |
| ) | |
| PEFT_AVAILABLE = True | |
| except ImportError: | |
| PEFT_AVAILABLE = False | |
| print("⚠️ PEFT not installed,LoRA and AdaLoRA features will be unavailable") | |
| # Check GPU | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| _MODEL_PATH = None | |
| LAST_TOKENIZER = None | |
| LAST_TUNING_METHOD = None | |
| # ==================== Your original functions - Unchanged ==================== | |
| def evaluate_baseline_bert(eval_dataset, df_clean): | |
| """ | |
| evaluation原始 BERT(完全沒看過data) performance | |
| This part is extracted from your 5 baseline comparison logic | |
| """ | |
| print("\n" + "=" * 80) | |
| print("evaluation Baseline Pure BERT(完全沒看過data)") | |
| print("=" * 80) | |
| # LoadingPure BERT | |
| baseline_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") | |
| baseline_model = BertForSequenceClassification.from_pretrained( | |
| "bert-base-uncased", | |
| num_labels=2 | |
| ).to(device) | |
| baseline_model.eval() | |
| print(" ⚠️ 這models完全沒有using您的datatraining") | |
| # Reprocess validation set | |
| baseline_dataset = Dataset.from_pandas(df_clean[['text', 'label']]) | |
| def baseline_preprocess(examples): | |
| return baseline_tokenizer(examples['text'], truncation=True, padding='max_length', max_length=256) | |
| baseline_tokenized = baseline_dataset.map(baseline_preprocess, batched=True) | |
| baseline_split = baseline_tokenized.train_test_split(test_size=0.2, seed=42) | |
| baseline_eval_dataset = baseline_split['test'] | |
| # Create Baseline Trainer | |
| baseline_trainer_args = TrainingArguments( | |
| output_dir='./temp_baseline', | |
| per_device_eval_batch_size=32, | |
| report_to="none" | |
| ) | |
| baseline_trainer = Trainer( | |
| model=baseline_model, | |
| args=baseline_trainer_args, | |
| ) | |
| # evaluation Baseline | |
| print("📄 Evaluate pure BERT...") | |
| predictions_output = baseline_trainer.predict(baseline_eval_dataset) | |
| all_preds = predictions_output.predictions.argmax(-1) | |
| all_labels = predictions_output.label_ids | |
| probs = torch.nn.functional.softmax(torch.tensor(predictions_output.predictions), dim=-1)[:, 1].numpy() | |
| # 計算metrics | |
| precision, recall, f1, _ = precision_recall_fscore_support( | |
| all_labels, all_preds, average='binary', pos_label=1, zero_division=0 | |
| ) | |
| acc = accuracy_score(all_labels, all_preds) | |
| try: | |
| auc = roc_auc_score(all_labels, probs) | |
| except: | |
| auc = 0.0 | |
| cm = confusion_matrix(all_labels, all_preds) | |
| if cm.shape == (2, 2): | |
| tn, fp, fn, tp = cm.ravel() | |
| sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0 | |
| specificity = tn / (tn + fp) if (tn + fp) > 0 else 0 | |
| else: | |
| sensitivity = specificity = 0 | |
| tn = fp = fn = tp = 0 | |
| baseline_results = { | |
| 'f1': float(f1), | |
| 'accuracy': float(acc), | |
| 'precision': float(precision), | |
| 'recall': float(recall), | |
| 'sensitivity': float(sensitivity), | |
| 'specificity': float(specificity), | |
| 'auc': float(auc), | |
| 'tp': int(tp), | |
| 'tn': int(tn), | |
| 'fp': int(fp), | |
| 'fn': int(fn) | |
| } | |
| print("✅ Baseline evaluationcomplete") | |
| # Cleanup | |
| del baseline_model | |
| del baseline_trainer | |
| torch.cuda.empty_cache() | |
| gc.collect() | |
| return baseline_results | |
| def run_original_code_with_tuning( | |
| file_path, | |
| weight_multiplier, | |
| epochs, | |
| batch_size, | |
| learning_rate, | |
| warmup_steps, | |
| tuning_method, | |
| best_metric, | |
| # LoRA parameters | |
| lora_r, | |
| lora_alpha, | |
| lora_dropout, | |
| lora_modules, | |
| # AdaLoRA parameters | |
| adalora_init_r, | |
| adalora_target_r, | |
| adalora_tinit, | |
| adalora_tfinal, | |
| adalora_delta_t, | |
| # 新增:是否為Second Fine-tuning | |
| is_second_finetuning=False, | |
| base_model_path=None | |
| ): | |
| """ | |
| Your original code + 不同Fine-tuning Method的選項 + Baseline 比較 | |
| 核心邏輯完全不變,只是在model初始化部分加入條件判斷 | |
| 新增parameters: | |
| - is_second_finetuning: 是否為Second Fine-tuning | |
| - base_model_path: First Fine-tuningmodel的路徑(僅Second Fine-tuning時using) | |
| """ | |
| # ⭐⭐⭐ training前重新Setting random seed以確保可重現性 ⭐⭐⭐ | |
| print("\n" + "="*80) | |
| print("🔄 training前重新確認隨機種子...") | |
| print("="*80) | |
| set_seed(RANDOM_SEED) | |
| global LAST_MODEL_PATH, LAST_TOKENIZER, LAST_TUNING_METHOD | |
| # ==================== Clear memory(training前) ==================== | |
| torch.cuda.empty_cache() | |
| gc.collect() | |
| print("🧹 Memory cleared") | |
| # ==================== Your original codestarts ==================== | |
| # Read uploaded file | |
| df_original = pd.read_csv(file_path) | |
| df_clean = pd.DataFrame({ | |
| 'text': df_original['Text'], | |
| 'label': df_original['label'] | |
| }) | |
| df_clean = df_clean.dropna() | |
| training_type = "Second Fine-tuning" if is_second_finetuning else "First Fine-tuning" | |
| print("\n" + "=" * 80) | |
| print(f"Breast Cancer Survival Prediction BERT {training_type} - {tuning_method} method") | |
| print("=" * 80) | |
| print(f"starts時間: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") | |
| print(f"training類型: {training_type}") | |
| print(f"Fine-tuning Method: {tuning_method}") | |
| print(f"最佳化metrics: {best_metric}") | |
| if is_second_finetuning: | |
| print(f"基礎model: {base_model_path}") | |
| print("=" * 80) | |
| # Loading Tokenizer | |
| print("\n📦 Loading BERT Tokenizer...") | |
| tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") | |
| print("✅ Tokenizer Loadingcomplete") | |
| # evaluation函數 - 完全是Your original code,不動 | |
| def compute_metrics(pred): | |
| labels = pred.label_ids | |
| preds = pred.predictions.argmax(-1) | |
| probs = torch.nn.functional.softmax(torch.tensor(pred.predictions), dim=-1)[:, 1].numpy() | |
| precision, recall, f1, _ = precision_recall_fscore_support( | |
| labels, preds, average='binary', pos_label=1, zero_division=0 | |
| ) | |
| acc = accuracy_score(labels, preds) | |
| try: | |
| auc = roc_auc_score(labels, probs) | |
| except: | |
| auc = 0.0 | |
| cm = confusion_matrix(labels, preds) | |
| if cm.shape == (2, 2): | |
| tn, fp, fn, tp = cm.ravel() | |
| else: | |
| if len(np.unique(preds)) == 1: | |
| if preds[0] == 0: | |
| tn, fp, fn, tp = sum(labels == 0), 0, sum(labels == 1), 0 | |
| else: | |
| tn, fp, fn, tp = 0, sum(labels == 0), 0, sum(labels == 1) | |
| else: | |
| tn = fp = fn = tp = 0 | |
| sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0 | |
| specificity = tn / (tn + fp) if (tn + fp) > 0 else 0 | |
| return { | |
| 'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall, | |
| 'auc': auc, 'sensitivity': sensitivity, 'specificity': specificity, | |
| 'tp': int(tp), 'tn': int(tn), 'fp': int(fp), 'fn': int(fn) | |
| } | |
| # ============================================================================ | |
| # Step 1:準備data(不做平衡) - Your original code | |
| # ============================================================================ | |
| print("\n" + "=" * 80) | |
| print("Step 1:準備data(保持原始比例)") | |
| print("=" * 80) | |
| print(f"\n原始data分布:") | |
| print(f" Survival (0): {sum(df_clean['label']==0)} 筆 ({sum(df_clean['label']==0)/len(df_clean)*100:.1f}%)") | |
| print(f" Death (1): {sum(df_clean['label']==1)} 筆 ({sum(df_clean['label']==1)/len(df_clean)*100:.1f}%)") | |
| ratio = sum(df_clean['label']==0) / sum(df_clean['label']==1) | |
| print(f" 不平衡比例: {ratio:.1f}:1") | |
| # ============================================================================ | |
| # Step 2:Tokenization - Your original code | |
| # ============================================================================ | |
| print("\n" + "=" * 80) | |
| print("Step 2:Tokenization") | |
| print("=" * 80) | |
| dataset = Dataset.from_pandas(df_clean[['text', 'label']]) | |
| def preprocess_function(examples): | |
| return tokenizer(examples['text'], truncation=True, padding='max_length', max_length=256) | |
| tokenized_dataset = dataset.map(preprocess_function, batched=True) | |
| train_test_split = tokenized_dataset.train_test_split(test_size=0.2, seed=42) | |
| train_dataset = train_test_split['train'] | |
| eval_dataset = train_test_split['test'] | |
| print(f"\n✅ data集準備complete:") | |
| print(f" training集: {len(train_dataset)} 筆") | |
| print(f" Validation set: {len(eval_dataset)} 筆") | |
| # ============================================================================ | |
| # Step 3:設定權重 - Your original code | |
| # ============================================================================ | |
| print("\n" + "=" * 80) | |
| print(f"Step 3:設定Class權重({weight_multiplier}x 倍數)") | |
| print("=" * 80) | |
| weight_0 = 1.0 | |
| weight_1 = ratio * weight_multiplier | |
| print(f"\n權重設定:") | |
| print(f" 倍數: {weight_multiplier}x") | |
| print(f" Survival類權重: {weight_0:.3f}") | |
| print(f" Death類權重: {weight_1:.3f} (= {ratio:.1f} × {weight_multiplier})") | |
| class_weights = torch.tensor([weight_0, weight_1], dtype=torch.float).to(device) | |
| # ============================================================================ | |
| # Step 4:trainingmodel - 這裡加入Second Fine-tuning的邏輯 | |
| # ============================================================================ | |
| print("\n" + "=" * 80) | |
| print(f"Step 4:training {tuning_method} BERT model ({training_type})") | |
| print("=" * 80) | |
| print(f"\n🔄 初始化model ({tuning_method})...") | |
| # 【新增】Second Fine-tuning:LoadingFirst Fine-tuning的model | |
| if is_second_finetuning and base_model_path: | |
| print(f"📦 LoadingFirst Fine-tuningmodel: {base_model_path}") | |
| # Read第一次model資訊 | |
| with open('./saved_models_list.json', 'r') as f: | |
| models_list = json.load(f) | |
| base_model_info = None | |
| for model_info in models_list: | |
| if model_info['model_path'] == base_model_path: | |
| base_model_info = model_info | |
| break | |
| if base_model_info is None: | |
| raise ValueError(f"找不到基礎model資訊: {base_model_path}") | |
| base_tuning_method = base_model_info['tuning_method'] | |
| print(f" 第一次Fine-tuning Method: {base_tuning_method}") | |
| # based on第一次的methodLoadingmodel | |
| if base_tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE: | |
| # Loading PEFT model | |
| base_bert = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2) | |
| model = PeftModel.from_pretrained(base_bert, base_model_path) | |
| print(f" ✅ 已Loading {base_tuning_method} model") | |
| else: | |
| # Loading一般model | |
| model = BertForSequenceClassification.from_pretrained(base_model_path, num_labels=2) | |
| print(f" ✅ 已Loading Full Fine-tuning model") | |
| model = model.to(device) | |
| print(f" ⚠️ 注意:Second Fine-tuning將using與第一次相同的method ({base_tuning_method})") | |
| # Second Fine-tuning時強制using相同method | |
| tuning_method = base_tuning_method | |
| else: | |
| # 【原始邏輯】First Fine-tuning:從Pure BERT starts | |
| model = BertForSequenceClassification.from_pretrained( | |
| "bert-base-uncased", num_labels=2, problem_type="single_label_classification" | |
| ) | |
| # based on選擇的Fine-tuning Method設定model | |
| if tuning_method == "Full Fine-tuning": | |
| # 您的原始method - 完全不動 | |
| model = model.to(device) | |
| print("✅ using完整 Fine-tuning(所有parameters可training)") | |
| trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) | |
| all_params = sum(p.numel() for p in model.parameters()) | |
| print(f" 可trainingparameters: {trainable_params:,} / {all_params:,} ({100 * trainable_params / all_params:.2f}%)") | |
| elif tuning_method == "LoRA" and PEFT_AVAILABLE: | |
| # LoRA 設定 | |
| target_modules = lora_modules.split(",") if lora_modules else ["query", "value"] | |
| target_modules = [m.strip() for m in target_modules] | |
| peft_config = LoraConfig( | |
| task_type=TaskType.SEQ_CLS, | |
| r=int(lora_r), | |
| lora_alpha=int(lora_alpha), | |
| lora_dropout=float(lora_dropout), | |
| target_modules=target_modules | |
| ) | |
| model = get_peft_model(model, peft_config) | |
| model = model.to(device) | |
| print("✅ using LoRA 微調") | |
| print(f" LoRA rank (r): {lora_r}") | |
| print(f" LoRA alpha: {lora_alpha}") | |
| print(f" LoRA dropout: {lora_dropout}") | |
| print(f" Target Modules: {target_modules}") | |
| model.print_trainable_parameters() | |
| elif tuning_method == "AdaLoRA" and PEFT_AVAILABLE: | |
| # AdaLoRA 設定 | |
| target_modules = lora_modules.split(",") if lora_modules else ["query", "value"] | |
| target_modules = [m.strip() for m in target_modules] | |
| peft_config = AdaLoraConfig( | |
| task_type=TaskType.SEQ_CLS, | |
| init_r=int(adalora_init_r), | |
| target_r=int(adalora_target_r), | |
| tinit=int(adalora_tinit), | |
| tfinal=int(adalora_tfinal), | |
| deltaT=int(adalora_delta_t), | |
| lora_alpha=int(lora_alpha), | |
| lora_dropout=float(lora_dropout), | |
| target_modules=target_modules | |
| ) | |
| model = get_peft_model(model, peft_config) | |
| model = model.to(device) | |
| print("✅ using AdaLoRA 微調") | |
| print(f" 初始 rank: {adalora_init_r}") | |
| print(f" 目標 rank: {adalora_target_r}") | |
| print(f" Tinit: {adalora_tinit}, Tfinal: {adalora_tfinal}, DeltaT: {adalora_delta_t}") | |
| model.print_trainable_parameters() | |
| else: | |
| # 預設using Full Fine-tuning | |
| model = model.to(device) | |
| print("⚠️ PEFT not installed或method無效,using Full Fine-tuning") | |
| # 自訂 Trainer(using權重) - Your original code | |
| class WeightedTrainer(Trainer): | |
| def compute_loss(self, model, inputs, return_outputs=False): | |
| labels = inputs.pop("labels") | |
| outputs = model(**inputs) | |
| loss_fct = nn.CrossEntropyLoss(weight=class_weights) | |
| loss = loss_fct(outputs.logits.view(-1, 2), labels.view(-1)) | |
| return (loss, outputs) if return_outputs else loss | |
| # training設定 - based on選擇的最佳metricsadjusted | |
| metric_map = { | |
| "f1": "f1", | |
| "accuracy": "accuracy", | |
| "precision": "precision", | |
| "recall": "recall", | |
| "sensitivity": "sensitivity", | |
| "specificity": "specificity", | |
| "auc": "auc" | |
| } | |
| training_args = TrainingArguments( | |
| output_dir='./results_weight', | |
| num_train_epochs=epochs, | |
| per_device_train_batch_size=batch_size, | |
| per_device_eval_batch_size=batch_size*2, | |
| warmup_steps=warmup_steps, | |
| weight_decay=0.01, | |
| learning_rate=learning_rate, | |
| logging_steps=50, | |
| evaluation_strategy="epoch", | |
| save_strategy="epoch", | |
| load_best_model_at_end=True, | |
| metric_for_best_model=metric_map.get(best_metric, "f1"), | |
| report_to="none", | |
| greater_is_better=True, | |
| seed=RANDOM_SEED, # ⭐ 加入隨機種子 | |
| data_seed=RANDOM_SEED, # ⭐ dataLoading種子 | |
| dataloader_num_workers=0 # ⭐ 單執行緒以確保可重現 | |
| ) | |
| trainer = WeightedTrainer( | |
| model=model, args=training_args, | |
| train_dataset=train_dataset, eval_dataset=eval_dataset, | |
| compute_metrics=compute_metrics | |
| ) | |
| print(f"\n🚀 Start training({epochs} epochs)...") | |
| print(f" 最佳化metrics: {best_metric}") | |
| print("-" * 80) | |
| trainer.train() | |
| print("\n✅ modeltrainingcomplete!") | |
| # evaluationmodel | |
| print("\n📊 evaluationmodel...") | |
| results = trainer.evaluate() | |
| print(f"\n{training_type} {tuning_method} BERT ({weight_multiplier}x 權重) 表現:") | |
| print(f" F1 Score: {results['eval_f1']:.4f}") | |
| print(f" Accuracy: {results['eval_accuracy']:.4f}") | |
| print(f" Precision: {results['eval_precision']:.4f}") | |
| print(f" Recall: {results['eval_recall']:.4f}") | |
| print(f" Sensitivity: {results['eval_sensitivity']:.4f}") | |
| print(f" Specificity: {results['eval_specificity']:.4f}") | |
| print(f" AUC: {results['eval_auc']:.4f}") | |
| print(f" Confusion Matrix: Tp={results['eval_tp']}, Tn={results['eval_tn']}, " | |
| f"Fp={results['eval_fp']}, Fn={results['eval_fn']}") | |
| # ============================================================================ | |
| # Step 5:Baseline 比較(Pure BERT) - 僅First Fine-tuning時執行 | |
| # ============================================================================ | |
| if not is_second_finetuning: | |
| print("\n" + "=" * 80) | |
| print("Step 5:Baseline 比較 - Pure BERT(完全沒看過data)") | |
| print("=" * 80) | |
| baseline_results = evaluate_baseline_bert(eval_dataset, df_clean) | |
| # ============================================================================ | |
| # Step 6:比較results | |
| # ============================================================================ | |
| print("\n" + "=" * 80) | |
| print(f"📊 【對比results】Pure BERT vs {tuning_method} BERT") | |
| print("=" * 80) | |
| print("\n📋 詳細比較表:") | |
| print("-" * 100) | |
| print(f"{'metrics':<15} {'Pure BERT':<20} {tuning_method:<20} {'改善幅度':<20}") | |
| print("-" * 100) | |
| metrics_to_compare = [ | |
| ('F1 Score', 'f1', 'eval_f1'), | |
| ('Accuracy', 'accuracy', 'eval_accuracy'), | |
| ('Precision', 'precision', 'eval_precision'), | |
| ('Recall', 'recall', 'eval_recall'), | |
| ('Sensitivity', 'sensitivity', 'eval_sensitivity'), | |
| ('Specificity', 'specificity', 'eval_specificity'), | |
| ('AUC', 'auc', 'eval_auc') | |
| ] | |
| for name, baseline_key, finetuned_key in metrics_to_compare: | |
| baseline_val = baseline_results[baseline_key] | |
| finetuned_val = results[finetuned_key] | |
| improvement = ((finetuned_val - baseline_val) / baseline_val * 100) if baseline_val > 0 else 0 | |
| print(f"{name:<15} {baseline_val:<20.4f} {finetuned_val:<20.4f} {improvement:>+18.1f}%") | |
| print("-" * 100) | |
| else: | |
| baseline_results = None | |
| # 儲存model | |
| training_label = "second" if is_second_finetuning else "first" | |
| save_dir = f'./breast_cancer_bert_{tuning_method.lower().replace(" ", "_")}_{training_label}_{datetime.now().strftime("%Y%m%d_%H%M%S")}' | |
| if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE: | |
| # PEFT model儲存方式 | |
| model.save_pretrained(save_dir) | |
| tokenizer.save_pretrained(save_dir) | |
| else: | |
| # 一般model儲存方式 | |
| model.save_pretrained(save_dir) | |
| tokenizer.save_pretrained(save_dir) | |
| # 儲存model資訊到 JSON 檔案(用於prediction頁面選擇) | |
| model_info = { | |
| 'model_path': save_dir, | |
| 'tuning_method': tuning_method, | |
| 'training_type': training_type, | |
| 'best_metric': best_metric, | |
| 'best_metric_value': float(results[f'eval_{metric_map.get(best_metric, "f1")}']), | |
| 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), | |
| 'weight_multiplier': weight_multiplier, | |
| 'epochs': epochs, | |
| 'is_second_finetuning': is_second_finetuning, | |
| 'base_model_path': base_model_path if is_second_finetuning else None | |
| } | |
| # Read現有的modellist | |
| models_list_file = './saved_models_list.json' | |
| if os.path.exists(models_list_file): | |
| with open(models_list_file, 'r') as f: | |
| models_list = json.load(f) | |
| else: | |
| models_list = [] | |
| # 加入新model資訊 | |
| models_list.append(model_info) | |
| # 儲存更新後的list | |
| with open(models_list_file, 'w') as f: | |
| json.dump(models_list, f, indent=2) | |
| # 儲存到全域變數供predictionusing | |
| LAST_MODEL_PATH = save_dir | |
| LAST_TOKENIZER = tokenizer | |
| LAST_TUNING_METHOD = tuning_method | |
| print(f"\n💾 model已儲存至: {save_dir}") | |
| print("\n" + "=" * 80) | |
| print("🎉 trainingcomplete!") | |
| print("=" * 80) | |
| print(f"complete時間: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") | |
| # ==================== Clear memory(training後) ==================== | |
| del model | |
| del trainer | |
| torch.cuda.empty_cache() | |
| gc.collect() | |
| print("🧹 training後Memory cleared") | |
| # 加入所有資訊到results中 | |
| results['tuning_method'] = tuning_method | |
| results['training_type'] = training_type | |
| results['best_metric'] = best_metric | |
| results['best_metric_value'] = results[f'eval_{metric_map.get(best_metric, "f1")}'] | |
| results['baseline_results'] = baseline_results | |
| results['model_path'] = save_dir | |
| results['is_second_finetuning'] = is_second_finetuning | |
| return results | |
| # ==================== 新增:新datatesting函數 ==================== | |
| def test_on_new_data(test_file_path, baseline_model_path, first_model_path, second_model_path): | |
| """ | |
| 在新testingdata上比較三models performance: | |
| 1. Pure BERT (baseline) | |
| 2. First Fine-tuningmodel | |
| 3. 第Second Fine-tuningmodel | |
| """ | |
| print("\n" + "=" * 80) | |
| print("📊 新datatesting - 三model比較") | |
| print("=" * 80) | |
| # Loadingtestingdata | |
| df_test = pd.read_csv(test_file_path) | |
| df_clean = pd.DataFrame({ | |
| 'text': df_test['Text'], | |
| 'label': df_test['label'] | |
| }) | |
| df_clean = df_clean.dropna() | |
| print(f"\ntestingdata:") | |
| print(f" 總筆數: {len(df_clean)}") | |
| print(f" Survival (0): {sum(df_clean['label']==0)} 筆") | |
| print(f" Death (1): {sum(df_clean['label']==1)} 筆") | |
| # 準備testingdata | |
| tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") | |
| test_dataset = Dataset.from_pandas(df_clean[['text', 'label']]) | |
| def preprocess_function(examples): | |
| return tokenizer(examples['text'], truncation=True, padding='max_length', max_length=256) | |
| test_tokenized = test_dataset.map(preprocess_function, batched=True) | |
| # evaluation函數 | |
| def evaluate_model(model, dataset_name): | |
| model.eval() | |
| trainer_args = TrainingArguments( | |
| output_dir='./temp_test', | |
| per_device_eval_batch_size=32, | |
| report_to="none" | |
| ) | |
| trainer = Trainer( | |
| model=model, | |
| args=trainer_args, | |
| ) | |
| predictions_output = trainer.predict(test_tokenized) | |
| all_preds = predictions_output.predictions.argmax(-1) | |
| all_labels = predictions_output.label_ids | |
| probs = torch.nn.functional.softmax(torch.tensor(predictions_output.predictions), dim=-1)[:, 1].numpy() | |
| precision, recall, f1, _ = precision_recall_fscore_support( | |
| all_labels, all_preds, average='binary', pos_label=1, zero_division=0 | |
| ) | |
| acc = accuracy_score(all_labels, all_preds) | |
| try: | |
| auc = roc_auc_score(all_labels, probs) | |
| except: | |
| auc = 0.0 | |
| cm = confusion_matrix(all_labels, all_preds) | |
| if cm.shape == (2, 2): | |
| tn, fp, fn, tp = cm.ravel() | |
| sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0 | |
| specificity = tn / (tn + fp) if (tn + fp) > 0 else 0 | |
| else: | |
| sensitivity = specificity = 0 | |
| tn = fp = fn = tp = 0 | |
| results = { | |
| 'f1': float(f1), | |
| 'accuracy': float(acc), | |
| 'precision': float(precision), | |
| 'recall': float(recall), | |
| 'sensitivity': float(sensitivity), | |
| 'specificity': float(specificity), | |
| 'auc': float(auc), | |
| 'tp': int(tp), | |
| 'tn': int(tn), | |
| 'fp': int(fp), | |
| 'fn': int(fn) | |
| } | |
| print(f"\n✅ {dataset_name} evaluationcomplete") | |
| del trainer | |
| torch.cuda.empty_cache() | |
| gc.collect() | |
| return results | |
| all_results = {} | |
| # 1. Evaluate pure BERT | |
| if baseline_model_path != "Skip": | |
| print("\n" + "-" * 80) | |
| print("1️⃣ Evaluate pure BERT (Baseline)") | |
| print("-" * 80) | |
| baseline_model = BertForSequenceClassification.from_pretrained( | |
| "bert-base-uncased", | |
| num_labels=2 | |
| ).to(device) | |
| all_results['baseline'] = evaluate_model(baseline_model, "Pure BERT") | |
| del baseline_model | |
| torch.cuda.empty_cache() | |
| else: | |
| all_results['baseline'] = None | |
| # 2. evaluationFirst Fine-tuningmodel | |
| if first_model_path != "Please select": | |
| print("\n" + "-" * 80) | |
| print("2️⃣ evaluationFirst Fine-tuningmodel") | |
| print("-" * 80) | |
| # Readmodel資訊 | |
| with open('./saved_models_list.json', 'r') as f: | |
| models_list = json.load(f) | |
| first_model_info = None | |
| for model_info in models_list: | |
| if model_info['model_path'] == first_model_path: | |
| first_model_info = model_info | |
| break | |
| if first_model_info: | |
| tuning_method = first_model_info['tuning_method'] | |
| if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE: | |
| base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2) | |
| first_model = PeftModel.from_pretrained(base_model, first_model_path) | |
| first_model = first_model.to(device) | |
| else: | |
| first_model = BertForSequenceClassification.from_pretrained(first_model_path).to(device) | |
| all_results['first'] = evaluate_model(first_model, "First Fine-tuningmodel") | |
| del first_model | |
| torch.cuda.empty_cache() | |
| else: | |
| all_results['first'] = None | |
| else: | |
| all_results['first'] = None | |
| # 3. evaluation第Second Fine-tuningmodel | |
| if second_model_path != "Please select": | |
| print("\n" + "-" * 80) | |
| print("3️⃣ evaluation第Second Fine-tuningmodel") | |
| print("-" * 80) | |
| # Readmodel資訊 | |
| with open('./saved_models_list.json', 'r') as f: | |
| models_list = json.load(f) | |
| second_model_info = None | |
| for model_info in models_list: | |
| if model_info['model_path'] == second_model_path: | |
| second_model_info = model_info | |
| break | |
| if second_model_info: | |
| tuning_method = second_model_info['tuning_method'] | |
| if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE: | |
| base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2) | |
| second_model = PeftModel.from_pretrained(base_model, second_model_path) | |
| second_model = second_model.to(device) | |
| else: | |
| second_model = BertForSequenceClassification.from_pretrained(second_model_path).to(device) | |
| all_results['second'] = evaluate_model(second_model, "第Second Fine-tuningmodel") | |
| del second_model | |
| torch.cuda.empty_cache() | |
| else: | |
| all_results['second'] = None | |
| else: | |
| all_results['second'] = None | |
| print("\n" + "=" * 80) | |
| print("✅ 新datatestingcomplete") | |
| print("=" * 80) | |
| return all_results | |
| # ==================== prediction函數(保持原樣) ==================== | |
| def predict_text(model_choice, text_input): | |
| """ | |
| prediction功能 - 支持選擇已training的model,並simultaneously顯示未微調and微調的predictionresults | |
| """ | |
| if not text_input or text_input.strip() == "": | |
| return "請輸入文本", "請輸入文本" | |
| try: | |
| # ==================== 未微調的 BERT prediction ==================== | |
| print("\nusingUnfine-tuned BERT prediction...") | |
| baseline_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") | |
| baseline_model = BertForSequenceClassification.from_pretrained( | |
| "bert-base-uncased", | |
| num_labels=2 | |
| ).to(device) | |
| baseline_model.eval() | |
| # Tokenize 輸入(未微調) | |
| baseline_inputs = baseline_tokenizer( | |
| text_input, | |
| truncation=True, | |
| padding='max_length', | |
| max_length=256, | |
| return_tensors='pt' | |
| ).to(device) | |
| # prediction(未微調) | |
| with torch.no_grad(): | |
| baseline_outputs = baseline_model(**baseline_inputs) | |
| baseline_probs = torch.nn.functional.softmax(baseline_outputs.logits, dim=-1) | |
| baseline_pred_class = baseline_probs.argmax(-1).item() | |
| baseline_confidence = baseline_probs[0][baseline_pred_class].item() | |
| baseline_result = "Survival" if baseline_pred_class == 0 else "Death" | |
| baseline_prob_survive = baseline_probs[0][0].item() | |
| baseline_prob_death = baseline_probs[0][1].item() | |
| baseline_output = f""" | |
| # 🔵 Unfine-tuned BERT predictionresults | |
| ## predictionClass: **{baseline_result}** | |
| ## Confidence: **{baseline_confidence:.1%}** | |
| ## Probability Distribution: | |
| - 🟢 **Survival機率**: {baseline_prob_survive:.2%} | |
| - 🔴 **Death機率**: {baseline_prob_death:.2%} | |
| --- | |
| **說明**: 此為原始 BERT model,未經任何領域datatraining | |
| """ | |
| # Clear memory | |
| del baseline_model | |
| del baseline_tokenizer | |
| torch.cuda.empty_cache() | |
| # ==================== 微調後的 BERT prediction ==================== | |
| if model_choice == "Please train a model first": | |
| finetuned_output = """ | |
| # 🟢 Fine-tuned BERT predictionresults | |
| ❌ 尚未training任何model,請先在「modeltraining」頁面trainingmodel | |
| """ | |
| return baseline_output, finetuned_output | |
| # 解析選擇的model路徑 | |
| model_path = model_choice.split(" | ")[0].replace("路徑: ", "") | |
| # 從 JSON Readmodel資訊 | |
| with open('./saved_models_list.json', 'r') as f: | |
| models_list = json.load(f) | |
| selected_model_info = None | |
| for model_info in models_list: | |
| if model_info['model_path'] == model_path: | |
| selected_model_info = model_info | |
| break | |
| if selected_model_info is None: | |
| finetuned_output = f""" | |
| # 🟢 Fine-tuned BERT predictionresults | |
| ❌ 找不到model:{model_path} | |
| """ | |
| return baseline_output, finetuned_output | |
| print(f"\nusing微調model: {model_path}") | |
| # Loading tokenizer | |
| finetuned_tokenizer = BertTokenizer.from_pretrained(model_path) | |
| # Loadingmodel | |
| tuning_method = selected_model_info['tuning_method'] | |
| if tuning_method in ["LoRA", "AdaLoRA"] and PEFT_AVAILABLE: | |
| # Loading PEFT model | |
| base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2) | |
| finetuned_model = PeftModel.from_pretrained(base_model, model_path) | |
| finetuned_model = finetuned_model.to(device) | |
| else: | |
| # Loading一般model | |
| finetuned_model = BertForSequenceClassification.from_pretrained(model_path).to(device) | |
| finetuned_model.eval() | |
| # Tokenize 輸入(微調) | |
| finetuned_inputs = finetuned_tokenizer( | |
| text_input, | |
| truncation=True, | |
| padding='max_length', | |
| max_length=256, | |
| return_tensors='pt' | |
| ).to(device) | |
| # prediction(微調) | |
| with torch.no_grad(): | |
| finetuned_outputs = finetuned_model(**finetuned_inputs) | |
| finetuned_probs = torch.nn.functional.softmax(finetuned_outputs.logits, dim=-1) | |
| finetuned_pred_class = finetuned_probs.argmax(-1).item() | |
| finetuned_confidence = finetuned_probs[0][finetuned_pred_class].item() | |
| finetuned_result = "Survival" if finetuned_pred_class == 0 else "Death" | |
| finetuned_prob_survive = finetuned_probs[0][0].item() | |
| finetuned_prob_death = finetuned_probs[0][1].item() | |
| training_type_label = "Second Fine-tuning" if selected_model_info.get('is_second_finetuning', False) else "First Fine-tuning" | |
| finetuned_output = f""" | |
| # 🟢 Fine-tuned BERT predictionresults | |
| ## predictionClass: **{finetuned_result}** | |
| ## Confidence: **{finetuned_confidence:.1%}** | |
| ## Probability Distribution: | |
| - 🟢 **Survival機率**: {finetuned_prob_survive:.2%} | |
| - 🔴 **Death機率**: {finetuned_prob_death:.2%} | |
| --- | |
| ### Model Information: | |
| - **training類型**: {training_type_label} | |
| - **Fine-tuning Method**: {selected_model_info['tuning_method']} | |
| - **最佳化metrics**: {selected_model_info['best_metric']} | |
| - **training時間**: {selected_model_info['timestamp']} | |
| - **model路徑**: {model_path} | |
| --- | |
| **注意**: 此prediction僅供參考,實際醫療決策應由專業醫師判斷。 | |
| """ | |
| # Clear memory | |
| del finetuned_model | |
| del finetuned_tokenizer | |
| torch.cuda.empty_cache() | |
| return baseline_output, finetuned_output | |
| except Exception as e: | |
| import traceback | |
| error_msg = f"❌ prediction錯誤:{str(e)}\n\n詳細錯誤訊息:\n{traceback.format_exc()}" | |
| return error_msg, error_msg | |
| def get_available_models(): | |
| """ | |
| 取得所有已training的modellist | |
| """ | |
| models_list_file = './saved_models_list.json' | |
| if not os.path.exists(models_list_file): | |
| return ["Please train a model first"] | |
| with open(models_list_file, 'r') as f: | |
| models_list = json.load(f) | |
| if len(models_list) == 0: | |
| return ["Please train a model first"] | |
| # format化model選項 | |
| model_choices = [] | |
| for i, model_info in enumerate(models_list, 1): | |
| training_type = model_info.get('training_type', 'First Fine-tuning') | |
| choice = f"路徑: {model_info['model_path']} | 類型: {training_type} | method: {model_info['tuning_method']} | 時間: {model_info['timestamp']}" | |
| model_choices.append(choice) | |
| return model_choices | |
| def get_first_finetuning_models(): | |
| """ | |
| 取得所有First Fine-tuning的model(用於Second Fine-tuning選擇) | |
| """ | |
| models_list_file = './saved_models_list.json' | |
| if not os.path.exists(models_list_file): | |
| return ["Please complete first fine-tuning first"] | |
| with open(models_list_file, 'r') as f: | |
| models_list = json.load(f) | |
| # 只返回First Fine-tuning的model | |
| first_models = [m for m in models_list if not m.get('is_second_finetuning', False)] | |
| if len(first_models) == 0: | |
| return ["Please complete first fine-tuning first"] | |
| model_choices = [] | |
| for model_info in first_models: | |
| choice = f"{model_info['model_path']}" | |
| model_choices.append(choice) | |
| return model_choices | |
| # ==================== Wrapper 函數 ==================== | |
| def train_first_wrapper( | |
| file, tuning_method, weight_mult, epochs, batch_size, lr, warmup, best_metric, | |
| lora_r, lora_alpha, lora_dropout, lora_modules, | |
| adalora_init_r, adalora_target_r, adalora_tinit, adalora_tfinal, adalora_delta_t | |
| ): | |
| """First Fine-tuning的包裝函數""" | |
| if file is None: | |
| return "請上傳 CSV 檔案", "", "" | |
| try: | |
| results = run_original_code_with_tuning( | |
| file_path=file.name, | |
| weight_multiplier=weight_mult, | |
| epochs=int(epochs), | |
| batch_size=int(batch_size), | |
| learning_rate=lr, | |
| warmup_steps=int(warmup), | |
| tuning_method=tuning_method, | |
| best_metric=best_metric, | |
| lora_r=lora_r, | |
| lora_alpha=lora_alpha, | |
| lora_dropout=lora_dropout, | |
| lora_modules=lora_modules, | |
| adalora_init_r=adalora_init_r, | |
| adalora_target_r=adalora_target_r, | |
| adalora_tinit=adalora_tinit, | |
| adalora_tfinal=adalora_tfinal, | |
| adalora_delta_t=adalora_delta_t, | |
| is_second_finetuning=False | |
| ) | |
| baseline_results = results['baseline_results'] | |
| # format化輸出 | |
| data_info = f""" | |
| # 📊 data資訊 (First Fine-tuning) | |
| ## 🔧 trainingconfiguration | |
| - **Fine-tuning Method**: {results['tuning_method']} | |
| - **最佳化metrics**: {results['best_metric']} | |
| - **最佳metrics值**: {results['best_metric_value']:.4f} | |
| ## ⚙️ trainingparameters | |
| - **Weight Multiplier**: {weight_mult}x | |
| - **Training Epochs**: {epochs} | |
| - **Batch Size**: {batch_size} | |
| - **Learning Rate**: {lr} | |
| - **Warmup Steps**: {warmup} | |
| ✅ First Fine-tuningcomplete!可進行Second Fine-tuning或prediction! | |
| """ | |
| baseline_output = f""" | |
| # 🔵 Pure BERT (Baseline) | |
| ### 📈 Evaluation Metrics | |
| | metrics | 數值 | | |
| |------|------| | |
| | **F1 Score** | {baseline_results['f1']:.4f} | | |
| | **Accuracy** | {baseline_results['accuracy']:.4f} | | |
| | **Precision** | {baseline_results['precision']:.4f} | | |
| | **Recall** | {baseline_results['recall']:.4f} | | |
| | **Sensitivity** | {baseline_results['sensitivity']:.4f} | | |
| | **Specificity** | {baseline_results['specificity']:.4f} | | |
| | **AUC** | {baseline_results['auc']:.4f} | | |
| ### 📈 Confusion Matrix | |
| | | prediction:Survival | prediction:Death | | |
| |---|-----------|-----------| | |
| | **實際:Survival** | TN={baseline_results['tn']} | FP={baseline_results['fp']} | | |
| | **實際:Death** | FN={baseline_results['fn']} | TP={baseline_results['tp']} | | |
| """ | |
| finetuned_output = f""" | |
| # 🟢 First Fine-tuning BERT | |
| ### 📈 Evaluation Metrics | |
| | metrics | 數值 | | |
| |------|------| | |
| | **F1 Score** | {results['eval_f1']:.4f} | | |
| | **Accuracy** | {results['eval_accuracy']:.4f} | | |
| | **Precision** | {results['eval_precision']:.4f} | | |
| | **Recall** | {results['eval_recall']:.4f} | | |
| | **Sensitivity** | {results['eval_sensitivity']:.4f} | | |
| | **Specificity** | {results['eval_specificity']:.4f} | | |
| | **AUC** | {results['eval_auc']:.4f} | | |
| ### 📈 Confusion Matrix | |
| | | prediction:Survival | prediction:Death | | |
| |---|-----------|-----------| | |
| | **實際:Survival** | TN={results['eval_tn']} | FP={results['eval_fp']} | | |
| | **實際:Death** | FN={results['eval_fn']} | TP={results['eval_tp']} | | |
| """ | |
| return data_info, baseline_output, finetuned_output | |
| except Exception as e: | |
| import traceback | |
| error_msg = f"❌ 錯誤:{str(e)}\n\n詳細錯誤訊息:\n{traceback.format_exc()}" | |
| return error_msg, "", "" | |
| def train_second_wrapper( | |
| base_model_choice, file, weight_mult, epochs, batch_size, lr, warmup, best_metric | |
| ): | |
| """Second Fine-tuning的包裝函數""" | |
| if base_model_choice == "Please complete first fine-tuning first": | |
| return "請先在「First Fine-tuning」頁面trainingmodel", "" | |
| if file is None: | |
| return "請Upload New Training Data CSV 檔案", "" | |
| try: | |
| # 解析基礎model路徑 | |
| base_model_path = base_model_choice | |
| # Read第一次model資訊 | |
| with open('./saved_models_list.json', 'r') as f: | |
| models_list = json.load(f) | |
| base_model_info = None | |
| for model_info in models_list: | |
| if model_info['model_path'] == base_model_path: | |
| base_model_info = model_info | |
| break | |
| if base_model_info is None: | |
| return "找不到基礎model資訊", "" | |
| # using第一次的parameters(Second Fine-tuning不更換method) | |
| tuning_method = base_model_info['tuning_method'] | |
| # 獲取第一次的 PEFT parameters | |
| lora_r = 16 | |
| lora_alpha = 32 | |
| lora_dropout = 0.1 | |
| lora_modules = "query,value" | |
| adalora_init_r = 12 | |
| adalora_target_r = 8 | |
| adalora_tinit = 0 | |
| adalora_tfinal = 0 | |
| adalora_delta_t = 1 | |
| results = run_original_code_with_tuning( | |
| file_path=file.name, | |
| weight_multiplier=weight_mult, | |
| epochs=int(epochs), | |
| batch_size=int(batch_size), | |
| learning_rate=lr, | |
| warmup_steps=int(warmup), | |
| tuning_method=tuning_method, | |
| best_metric=best_metric, | |
| lora_r=lora_r, | |
| lora_alpha=lora_alpha, | |
| lora_dropout=lora_dropout, | |
| lora_modules=lora_modules, | |
| adalora_init_r=adalora_init_r, | |
| adalora_target_r=adalora_target_r, | |
| adalora_tinit=adalora_tinit, | |
| adalora_tfinal=adalora_tfinal, | |
| adalora_delta_t=adalora_delta_t, | |
| is_second_finetuning=True, | |
| base_model_path=base_model_path | |
| ) | |
| data_info = f""" | |
| # 📊 Second Fine-tuningresults | |
| ## 🔧 trainingconfiguration | |
| - **基礎model**: {base_model_path} | |
| - **Fine-tuning Method**: {results['tuning_method']} (inherited自第一次) | |
| - **最佳化metrics**: {results['best_metric']} | |
| - **最佳metrics值**: {results['best_metric_value']:.4f} | |
| ## ⚙️ trainingparameters | |
| - **Weight Multiplier**: {weight_mult}x | |
| - **Training Epochs**: {epochs} | |
| - **Batch Size**: {batch_size} | |
| - **Learning Rate**: {lr} | |
| - **Warmup Steps**: {warmup} | |
| ✅ Second Fine-tuningcomplete!可進行prediction或新datatesting! | |
| """ | |
| finetuned_output = f""" | |
| # 🟢 Second Fine-tuning BERT | |
| ### 📈 Evaluation Metrics | |
| | metrics | 數值 | | |
| |------|------| | |
| | **F1 Score** | {results['eval_f1']:.4f} | | |
| | **Accuracy** | {results['eval_accuracy']:.4f} | | |
| | **Precision** | {results['eval_precision']:.4f} | | |
| | **Recall** | {results['eval_recall']:.4f} | | |
| | **Sensitivity** | {results['eval_sensitivity']:.4f} | | |
| | **Specificity** | {results['eval_specificity']:.4f} | | |
| | **AUC** | {results['eval_auc']:.4f} | | |
| ### 📈 Confusion Matrix | |
| | | prediction:Survival | prediction:Death | | |
| |---|-----------|-----------| | |
| | **實際:Survival** | TN={results['eval_tn']} | FP={results['eval_fp']} | | |
| | **實際:Death** | FN={results['eval_fn']} | TP={results['eval_tp']} | | |
| """ | |
| return data_info, finetuned_output | |
| except Exception as e: | |
| import traceback | |
| error_msg = f"❌ 錯誤:{str(e)}\n\n詳細錯誤訊息:\n{traceback.format_exc()}" | |
| return error_msg, "" | |
| def test_new_data_wrapper(test_file, baseline_choice, first_choice, second_choice): | |
| """新datatesting的包裝函數""" | |
| if test_file is None: | |
| return "請Upload Test Data CSV 檔案", "", "" | |
| try: | |
| all_results = test_on_new_data( | |
| test_file.name, | |
| baseline_choice, | |
| first_choice, | |
| second_choice | |
| ) | |
| # format化輸出 | |
| outputs = [] | |
| # 1. Pure BERT | |
| if all_results['baseline']: | |
| r = all_results['baseline'] | |
| baseline_output = f""" | |
| # 🔵 Pure BERT (Baseline) | |
| | metrics | 數值 | | |
| |------|------| | |
| | **F1 Score** | {r['f1']:.4f} | | |
| | **Accuracy** | {r['accuracy']:.4f} | | |
| | **Precision** | {r['precision']:.4f} | | |
| | **Recall** | {r['recall']:.4f} | | |
| | **Sensitivity** | {r['sensitivity']:.4f} | | |
| | **Specificity** | {r['specificity']:.4f} | | |
| | **AUC** | {r['auc']:.4f} | | |
| ### Confusion Matrix | |
| | | prediction:Survival | prediction:Death | | |
| |---|-----------|-----------| | |
| | **實際:Survival** | TN={r['tn']} | FP={r['fp']} | | |
| | **實際:Death** | FN={r['fn']} | TP={r['tp']} | | |
| """ | |
| else: | |
| baseline_output = "未選擇Evaluate pure BERT" | |
| outputs.append(baseline_output) | |
| # 2. First Fine-tuning | |
| if all_results['first']: | |
| r = all_results['first'] | |
| first_output = f""" | |
| # 🟢 First Fine-tuningmodel | |
| | metrics | 數值 | | |
| |------|------| | |
| | **F1 Score** | {r['f1']:.4f} | | |
| | **Accuracy** | {r['accuracy']:.4f} | | |
| | **Precision** | {r['precision']:.4f} | | |
| | **Recall** | {r['recall']:.4f} | | |
| | **Sensitivity** | {r['sensitivity']:.4f} | | |
| | **Specificity** | {r['specificity']:.4f} | | |
| | **AUC** | {r['auc']:.4f} | | |
| ### Confusion Matrix | |
| | | prediction:Survival | prediction:Death | | |
| |---|-----------|-----------| | |
| | **實際:Survival** | TN={r['tn']} | FP={r['fp']} | | |
| | **實際:Death** | FN={r['fn']} | TP={r['tp']} | | |
| """ | |
| else: | |
| first_output = "未選擇First Fine-tuningmodel" | |
| outputs.append(first_output) | |
| # 3. 第Second Fine-tuning | |
| if all_results['second']: | |
| r = all_results['second'] | |
| second_output = f""" | |
| # 🟡 第Second Fine-tuningmodel | |
| | metrics | 數值 | | |
| |------|------| | |
| | **F1 Score** | {r['f1']:.4f} | | |
| | **Accuracy** | {r['accuracy']:.4f} | | |
| | **Precision** | {r['precision']:.4f} | | |
| | **Recall** | {r['recall']:.4f} | | |
| | **Sensitivity** | {r['sensitivity']:.4f} | | |
| | **Specificity** | {r['specificity']:.4f} | | |
| | **AUC** | {r['auc']:.4f} | | |
| ### Confusion Matrix | |
| | | prediction:Survival | prediction:Death | | |
| |---|-----------|-----------| | |
| | **實際:Survival** | TN={r['tn']} | FP={r['fp']} | | |
| | **實際:Death** | FN={r['fn']} | TP={r['tp']} | | |
| """ | |
| else: | |
| second_output = "未選擇第Second Fine-tuningmodel" | |
| outputs.append(second_output) | |
| return outputs[0], outputs[1], outputs[2] | |
| except Exception as e: | |
| import traceback | |
| error_msg = f"❌ 錯誤:{str(e)}\n\n詳細錯誤訊息:\n{traceback.format_exc()}" | |
| return error_msg, "", "" | |
| # ============================================================================ | |
| # Gradio 介面 | |
| # ============================================================================ | |
| with gr.Blocks(title="Smart Colon Health Chatbox", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # 🥼 BERT乳癌存prediction大型微調應用(Fine-tuning) | |
| ### 🌟 Features: | |
| - 🎯 First Fine-tuning:從Pure BERT Start training | |
| - 🔄 第Second Fine-tuning:基於第一次model用新datato continue training | |
| - 📊 新datatesting:比較三models在新data performance | |
| - 🔮 prediction功能:usingtraining好的model進行prediction | |
| """) | |
| # Tab 1: First Fine-tuning | |
| with gr.Tab("1️⃣ First Fine-tuning"): | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 📤 data上傳") | |
| file_input_first = gr.File(label="Upload Training Data CSV", file_types=[".csv"]) | |
| gr.Markdown("### 🔧 Fine-tuning Method選擇") | |
| tuning_method_first = gr.Radio( | |
| choices=["Full Fine-tuning", "LoRA", "AdaLoRA"], | |
| value="Full Fine-tuning", | |
| label="Select fine-tuning method" | |
| ) | |
| gr.Markdown("### 🎯 Select Best Model Metric") | |
| best_metric_first = gr.Dropdown( | |
| choices=["f1", "accuracy", "precision", "recall", "sensitivity", "specificity", "auc"], | |
| value="f1", | |
| label="Select Optimization Metric" | |
| ) | |
| gr.Markdown("### ⚙️ Training Parameters") | |
| weight_slider_first = gr.Slider(0.1, 2.0, value=0.8, step=0.1, label="Weight Multiplier") | |
| epochs_input_first = gr.Number(value=3, label="Training Epochs") | |
| batch_size_input_first = gr.Number(value=16, label="Batch Size") | |
| lr_input_first = gr.Number(value=2e-5, label="Learning Rate") | |
| warmup_input_first = gr.Number(value=200, label="Warmup Steps") | |
| # LoRA parameters | |
| with gr.Column(visible=False) as lora_params_first: | |
| gr.Markdown("### 🔷 LoRA Parameters") | |
| lora_r_first = gr.Slider(4, 64, value=16, step=4, label="LoRA Rank (r)") | |
| lora_alpha_first = gr.Slider(8, 128, value=32, step=8, label="LoRA Alpha") | |
| lora_dropout_first = gr.Slider(0.0, 0.5, value=0.1, step=0.05, label="LoRA Dropout") | |
| lora_modules_first = gr.Textbox(value="query,value", label="Target Modules") | |
| # AdaLoRA parameters | |
| with gr.Column(visible=False) as adalora_params_first: | |
| gr.Markdown("### 🔶 AdaLoRA Parameters") | |
| adalora_init_r_first = gr.Slider(4, 64, value=12, step=4, label="初始 Rank") | |
| adalora_target_r_first = gr.Slider(4, 64, value=8, step=4, label="目標 Rank") | |
| adalora_tinit_first = gr.Number(value=0, label="Tinit") | |
| adalora_tfinal_first = gr.Number(value=0, label="Tfinal") | |
| adalora_delta_t_first = gr.Number(value=1, label="Delta T") | |
| train_button_first = gr.Button("🚀 Start First Fine-tuning", variant="primary", size="lg") | |
| with gr.Column(scale=2): | |
| gr.Markdown("### 📊 First Fine-tuning Results") | |
| data_info_output_first = gr.Markdown(value="Waiting for training...") | |
| with gr.Row(): | |
| baseline_output_first = gr.Markdown(value="### Pure BERT\nWaiting for training...") | |
| finetuned_output_first = gr.Markdown(value="### First Fine-tuning\nWaiting for training...") | |
| # Tab 2: Second Fine-tuning | |
| with gr.Tab("2️⃣ Second Fine-tuning"): | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 🔄 Select Base Model") | |
| base_model_dropdown = gr.Dropdown( | |
| label="Select First Fine-tuned Model", | |
| choices=["Please complete first fine-tuning first"], | |
| value="Please complete first fine-tuning first" | |
| ) | |
| refresh_base_models = gr.Button("🔄 Refresh Model List", size="sm") | |
| gr.Markdown("### 📤 Upload New Training Data") | |
| file_input_second = gr.File(label="Upload New Training Data CSV", file_types=[".csv"]) | |
| gr.Markdown("### ⚙️ Training Parameters") | |
| gr.Markdown("⚠️ Fine-tuning Method將自動inheritedFirst Fine-tuning的method") | |
| best_metric_second = gr.Dropdown( | |
| choices=["f1", "accuracy", "precision", "recall", "sensitivity", "specificity", "auc"], | |
| value="f1", | |
| label="Select Optimization Metric" | |
| ) | |
| weight_slider_second = gr.Slider(0.1, 2.0, value=0.8, step=0.1, label="Weight Multiplier") | |
| epochs_input_second = gr.Number(value=3, label="Training Epochs", info="Recommended: fewer than first time") | |
| batch_size_input_second = gr.Number(value=16, label="Batch Size") | |
| lr_input_second = gr.Number(value=1e-5, label="Learning Rate", info="Recommended: smaller than first time") | |
| warmup_input_second = gr.Number(value=100, label="Warmup Steps") | |
| train_button_second = gr.Button("🚀 Start Second Fine-tuning", variant="primary", size="lg") | |
| with gr.Column(scale=2): | |
| gr.Markdown("### 📊 Second Fine-tuning Results") | |
| data_info_output_second = gr.Markdown(value="Waiting for training...") | |
| finetuned_output_second = gr.Markdown(value="### Second Fine-tuning\nWaiting for training...") | |
| # Tab 3: 新datatesting | |
| with gr.Tab("3️⃣ New Data Testing"): | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 📤 Upload Test Data") | |
| test_file_input = gr.File(label="Upload Test Data CSV", file_types=[".csv"]) | |
| gr.Markdown("### 🎯 Select Models to Compare") | |
| gr.Markdown("Select 1-3 models進行比較") | |
| baseline_test_choice = gr.Radio( | |
| choices=["Evaluate pure BERT", "Skip"], | |
| value="Evaluate pure BERT", | |
| label="Pure BERT (Baseline)" | |
| ) | |
| first_model_test_dropdown = gr.Dropdown( | |
| label="First Fine-tuningmodel", | |
| choices=["Please select"], | |
| value="Please select" | |
| ) | |
| second_model_test_dropdown = gr.Dropdown( | |
| label="第Second Fine-tuningmodel", | |
| choices=["Please select"], | |
| value="Please select" | |
| ) | |
| refresh_test_models = gr.Button("🔄 Refresh Model List", size="sm") | |
| test_button = gr.Button("📊 Start Testing", variant="primary", size="lg") | |
| with gr.Column(scale=2): | |
| gr.Markdown("### 📊 New Data Testing Results - Three Model Comparison") | |
| with gr.Row(): | |
| baseline_test_output = gr.Markdown(value="### Pure BERT\nWaiting for testing...") | |
| first_test_output = gr.Markdown(value="### First Fine-tuning\nWaiting for testing...") | |
| second_test_output = gr.Markdown(value="### Second Fine-tuning\nWaiting for testing...") | |
| # Tab 4: prediction | |
| with gr.Tab("4️⃣ Model Prediction"): | |
| gr.Markdown(""" | |
| ### Use Trained Model for Prediction | |
| 選擇已training的model,Input Medical Record Text進行prediction。 | |
| """) | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_dropdown = gr.Dropdown( | |
| label="Select Model", | |
| choices=["Please train a model first"], | |
| value="Please train a model first" | |
| ) | |
| refresh_predict_models = gr.Button("🔄 Refresh Model List", size="sm") | |
| text_input = gr.Textbox( | |
| label="Input Medical Record Text", | |
| placeholder="Please enter patient medical record description(English)...", | |
| lines=10 | |
| ) | |
| predict_button = gr.Button("🔮 Start Prediction", variant="primary", size="lg") | |
| with gr.Column(): | |
| gr.Markdown("### Prediction Results Comparison") | |
| baseline_prediction_output = gr.Markdown(label="Unfine-tuned BERT", value="Waiting for prediction...") | |
| finetuned_prediction_output = gr.Markdown(label="Fine-tuned BERT", value="Waiting for prediction...") | |
| # Tab 5: using說明 | |
| with gr.Tab("📖 User Guide"): | |
| gr.Markdown(""" | |
| ## 🔄 Second Fine-tuning流程說明 | |
| ### Step 1: First Fine-tuning | |
| 1. Upload training data A (CSV format: Text, label) | |
| 2. Select fine-tuning method (Full Fine-tuning / LoRA / AdaLoRA) | |
| 3. Adjust training parameters | |
| 4. Start training | |
| 5. 系統會自動比較Pure BERT vs First Fine-tuning performance | |
| ### Step 2: Second Fine-tuning | |
| 1. 選擇已training的First Fine-tuningmodel | |
| 2. Upload new training data B | |
| 3. Adjust training parameters (recommended epochs fewer, learning rate smaller) | |
| 4. Start training (method automatically inherited from first time) | |
| 5. Model will continue learning based on first fine-tuning weights | |
| ### Step 3: New Data Testing | |
| 1. Upload test data C | |
| 2. 選擇要比較的model (Pure BERT / 第一次 / 第二次) | |
| 3. 系統會並排顯示三models performance | |
| ### Step 4: Prediction | |
| 1. 選擇任一已trainingmodel | |
| 2. Input Medical Record Text | |
| 3. View prediction results | |
| ## ⚠️ Notes | |
| - CSV format必須包含 `Text` and `label` 欄位 | |
| - 第Second Fine-tuning會自動using第一次的Fine-tuning Method | |
| - recommended第二次的Learning Rate比第一次小,to avoid destroying learned knowledge | |
| - New data testing can evaluate up to 3 models | |
| ## 📊 Metrics Description | |
| - **F1 Score**: Balanced metric,綜合考慮精確率and召回率 | |
| - **Accuracy**: Overall accuracy | |
| - **Precision**: Accuracy of death predictions | |
| - **Recall/Sensitivity**: Proportion of actual deaths correctly identified | |
| - **Specificity**: Proportion of actual survivals correctly identified | |
| - **AUC**: ROC Area under curve,Overall classification ability | |
| """) | |
| # ==================== Event Bindings ==================== | |
| # First Fine-tuning - parameters面板顯示/隱藏 | |
| def update_first_params(method): | |
| if method == "LoRA": | |
| return gr.update(visible=True), gr.update(visible=False) | |
| elif method == "AdaLoRA": | |
| return gr.update(visible=True), gr.update(visible=True) | |
| else: | |
| return gr.update(visible=False), gr.update(visible=False) | |
| tuning_method_first.change( | |
| fn=update_first_params, | |
| inputs=[tuning_method_first], | |
| outputs=[lora_params_first, adalora_params_first] | |
| ) | |
| # First Fine-tuning按鈕 | |
| train_button_first.click( | |
| fn=train_first_wrapper, | |
| inputs=[ | |
| file_input_first, tuning_method_first, weight_slider_first, | |
| epochs_input_first, batch_size_input_first, lr_input_first, | |
| warmup_input_first, best_metric_first, | |
| lora_r_first, lora_alpha_first, lora_dropout_first, lora_modules_first, | |
| adalora_init_r_first, adalora_target_r_first, adalora_tinit_first, | |
| adalora_tfinal_first, adalora_delta_t_first | |
| ], | |
| outputs=[data_info_output_first, baseline_output_first, finetuned_output_first] | |
| ) | |
| # Refresh基礎modellist | |
| def refresh_base_models_list(): | |
| choices = get_first_finetuning_models() | |
| return gr.update(choices=choices, value=choices[0]) | |
| refresh_base_models.click( | |
| fn=refresh_base_models_list, | |
| outputs=[base_model_dropdown] | |
| ) | |
| # Second Fine-tuning按鈕 | |
| train_button_second.click( | |
| fn=train_second_wrapper, | |
| inputs=[ | |
| base_model_dropdown, file_input_second, weight_slider_second, | |
| epochs_input_second, batch_size_input_second, lr_input_second, | |
| warmup_input_second, best_metric_second | |
| ], | |
| outputs=[data_info_output_second, finetuned_output_second] | |
| ) | |
| # Refreshtestingmodellist | |
| def refresh_test_models_list(): | |
| all_models = get_available_models() | |
| first_models = get_first_finetuning_models() | |
| # Filter第Second Fine-tuningmodel | |
| with open('./saved_models_list.json', 'r') as f: | |
| models_list = json.load(f) | |
| second_models = [m['model_path'] for m in models_list if m.get('is_second_finetuning', False)] | |
| if len(second_models) == 0: | |
| second_models = ["Please select"] | |
| return ( | |
| gr.update(choices=first_models if first_models[0] != "Please complete first fine-tuning first" else ["Please select"], value="Please select"), | |
| gr.update(choices=second_models, value="Please select") | |
| ) | |
| refresh_test_models.click( | |
| fn=refresh_test_models_list, | |
| outputs=[first_model_test_dropdown, second_model_test_dropdown] | |
| ) | |
| # testing按鈕 | |
| test_button.click( | |
| fn=test_new_data_wrapper, | |
| inputs=[test_file_input, baseline_test_choice, first_model_test_dropdown, second_model_test_dropdown], | |
| outputs=[baseline_test_output, first_test_output, second_test_output] | |
| ) | |
| # Refreshpredictionmodellist | |
| def refresh_predict_models_list(): | |
| choices = get_available_models() | |
| return gr.update(choices=choices, value=choices[0]) | |
| refresh_predict_models.click( | |
| fn=refresh_predict_models_list, | |
| outputs=[model_dropdown] | |
| ) | |
| # prediction按鈕 | |
| predict_button.click( | |
| fn=predict_text, | |
| inputs=[model_dropdown, text_input], | |
| outputs=[baseline_prediction_output, finetuned_prediction_output] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |