| """ |
| train_sft.py — TeenEmo SFT(教師あり微調整) |
| |
| フロー: |
| 1. LFM2.5-1.2B-Base を HF Hub からロード |
| 2. LoRA アダプタを設定 |
| 3. SFT データセットを HF Hub から取得・変換 |
| 4. SFTTrainer で学習 |
| 5. SFT 完了 → HF Hub にチェックポイントを push(STEP 1/3) |
| ※ GGUF保存は DPO 完了後に train_dpo.py が行う |
| |
| 実行例: |
| python train_sft.py |
| SFT_EPOCHS=2 SFT_BATCH_SIZE=16 python train_sft.py |
| """ |
|
|
| from __future__ import annotations |
|
|
| import os |
| import sys |
| import traceback |
| from datetime import datetime, timezone |
| from pathlib import Path |
|
|
| if not os.environ.get("HF_TOKEN"): |
| print("[ERROR] HF_TOKEN が未設定です。export HF_TOKEN='hf_...' を実行してください。") |
| sys.exit(1) |
|
|
| import torch |
| from unsloth import FastLanguageModel, is_bfloat16_supported |
| from trl import SFTTrainer, SFTConfig |
|
|
| import train_config as cfg |
| from train_utils import ( |
| setup_logger, log_gpu_info, log_training_config, |
| load_sft_dataset, apply_chat_template_sft, |
| ) |
|
|
|
|
| def main() -> None: |
| start_time = datetime.now(timezone.utc) |
| log_dir = Path(cfg.SFT_OUTPUT_DIR) / "logs" |
| log_dir.mkdir(parents=True, exist_ok=True) |
| log_file = log_dir / f"sft_{start_time.strftime('%Y%m%d_%H%M%S')}.log" |
|
|
| logger = setup_logger("sft", str(log_file)) |
| logger.info(f"=== TeenEmo SFT 開始 [{start_time.isoformat()}] ===") |
|
|
| log_gpu_info(logger) |
| log_training_config(logger, "SFT") |
|
|
| |
| logger.info(f"モデルロード中: {cfg.BASE_MODEL}") |
| try: |
| model, tokenizer = FastLanguageModel.from_pretrained( |
| model_name=cfg.BASE_MODEL, |
| max_seq_length=cfg.MAX_SEQ_LENGTH, |
| dtype=None, |
| load_in_4bit=False, |
| token=cfg.HF_TOKEN or None, |
| ) |
| logger.info("モデルロード完了 ✅") |
| except Exception as e: |
| logger.error(f"モデルロードエラー: {e}") |
| logger.debug(traceback.format_exc()) |
| raise |
|
|
| |
| logger.info("LoRA アダプタ設定中...") |
| try: |
| model = FastLanguageModel.get_peft_model( |
| model, |
| r=cfg.LORA_R, |
| target_modules=cfg.LORA_TARGET_MODULES, |
| lora_alpha=cfg.LORA_ALPHA, |
| lora_dropout=cfg.LORA_DROPOUT, |
| bias="none", |
| use_gradient_checkpointing="unsloth", |
| random_state=3407, |
| use_rslora=False, |
| loftq_config=None, |
| ) |
| total_params = sum(p.numel() for p in model.parameters()) |
| trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) |
| logger.info(f" 全パラメータ数: {total_params:,}") |
| logger.info(f" 学習可能パラメータ数: {trainable_params:,} ({trainable_params/total_params*100:.2f}%)") |
| except Exception as e: |
| logger.error(f"LoRA 設定エラー: {e}") |
| logger.debug(traceback.format_exc()) |
| raise |
|
|
| |
| logger.info("データセット準備中...") |
| try: |
| raw_ds = load_sft_dataset(logger) |
| logger.info("チャットテンプレート適用中...") |
| ds = raw_ds.map( |
| lambda x: apply_chat_template_sft(x, tokenizer, logger), |
| batched=True, |
| remove_columns=raw_ds.column_names, |
| desc="チャットテンプレート適用", |
| ) |
| before = len(ds) |
| ds = ds.filter(lambda x: len(x["text"]) > 0) |
| logger.info(f" 変換後: {before} → {len(ds)} 件") |
| logger.debug(f" サンプル[0]:\n{ds[0]['text'][:300]}") |
| except Exception as e: |
| logger.error(f"データセット準備エラー: {e}") |
| logger.debug(traceback.format_exc()) |
| raise |
|
|
| |
| logger.info("SFTTrainer 初期化中...") |
| try: |
| trainer = SFTTrainer( |
| model=model, |
| tokenizer=tokenizer, |
| train_dataset=ds, |
| dataset_text_field="text", |
| max_seq_length=cfg.MAX_SEQ_LENGTH, |
| packing=cfg.SFT_PACKING, |
| args=SFTConfig( |
| output_dir=cfg.SFT_OUTPUT_DIR, |
| per_device_train_batch_size=cfg.SFT_BATCH_SIZE, |
| gradient_accumulation_steps=cfg.SFT_GRAD_ACCUM, |
| num_train_epochs=cfg.SFT_EPOCHS, |
| learning_rate=cfg.SFT_LR, |
| warmup_ratio=cfg.SFT_WARMUP_RATIO, |
| lr_scheduler_type=cfg.SFT_LR_SCHEDULER, |
| weight_decay=cfg.SFT_WEIGHT_DECAY, |
| fp16=not is_bfloat16_supported(), |
| bf16=is_bfloat16_supported(), |
| logging_steps=cfg.SFT_LOGGING_STEPS, |
| save_steps=cfg.SFT_SAVE_STEPS, |
| save_total_limit=2, |
| optim="adamw_8bit", |
| seed=3407, |
| report_to="none", |
| dataset_num_proc=2, |
| ), |
| ) |
| logger.info("SFTTrainer 初期化完了 ✅") |
| except Exception as e: |
| logger.error(f"SFTTrainer 初期化エラー: {e}") |
| logger.debug(traceback.format_exc()) |
| raise |
|
|
| |
| logger.info("SFT 学習開始...") |
| try: |
| train_result = trainer.train() |
| logger.info("SFT 学習完了 ✅") |
| logger.info(f" train_loss: {train_result.training_loss:.4f}") |
| logger.info(f" train_runtime: {train_result.metrics.get('train_runtime', 0):.0f}s") |
| logger.info(f" samples/sec: {train_result.metrics.get('train_samples_per_second', 0):.2f}") |
| except Exception as e: |
| logger.error(f"SFT 学習エラー: {e}") |
| logger.debug(traceback.format_exc()) |
| raise |
|
|
| |
| lora_dir = Path(cfg.SFT_OUTPUT_DIR) / "lora" |
| logger.info(f"LoRA アダプタ保存: {lora_dir}") |
| try: |
| model.save_pretrained(str(lora_dir)) |
| tokenizer.save_pretrained(str(lora_dir)) |
| logger.info("LoRA アダプタ保存完了 ✅") |
| except Exception as e: |
| logger.error(f"LoRA 保存エラー: {e}") |
| logger.debug(traceback.format_exc()) |
| raise |
|
|
| |
| if cfg.PUSH_TO_HUB and cfg.HF_TOKEN: |
| logger.info(f"[STEP 1/3] HF Hub に SFT チェックポイント push: {cfg.SFT_HF_REPO}") |
| try: |
| model.push_to_hub(cfg.SFT_HF_REPO, token=cfg.HF_TOKEN) |
| tokenizer.push_to_hub(cfg.SFT_HF_REPO, token=cfg.HF_TOKEN) |
| logger.info(f" ✅ https://huggingface.co/{cfg.SFT_HF_REPO}") |
| except Exception as e: |
| logger.error(f"SFT push エラー: {e}") |
| logger.debug(traceback.format_exc()) |
|
|
| elapsed = (datetime.now(timezone.utc) - start_time).total_seconds() |
| logger.info(f"=== SFT 完了 ({elapsed/60:.1f}分) ===") |
| logger.info(f"次のステップ: python train_dpo.py") |
| logger.info(f"ログ: {log_file}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|