youtube-thumbnail-analysis / train_script.py
epinfomax's picture
Upload train_script.py with huggingface_hub
90068b2 verified
# /// script
# dependencies = ["trl>=0.12.0", "peft>=0.7.0", "trackio", "datasets"]
# ///
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
import trackio
# 데이터셋 로드
print("데이터셋 로드 중...")
dataset = load_dataset("epinfomax/youtube-thumbnail-analysis", split="train")
print(f"데이터셋 크기: {len(dataset)}개")
# 학습/검증 분리
dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
# LoRA 설정
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
task_type="CAUSAL_LM"
)
# 학습 설정
training_args = SFTConfig(
output_dir="./outputs",
# Hub 저장 (필수!)
push_to_hub=True,
hub_model_id="epinfomax/youtube-thumbnail-trend-analyzer",
hub_strategy="every_save",
# 학습 파라미터
num_train_epochs=3,
per_device_train_batch_size=2,
gradient_accumulation_steps=4,
learning_rate=2e-4,
warmup_ratio=0.1,
# 평가
eval_strategy="steps",
eval_steps=20,
# 저장
save_strategy="steps",
save_steps=50,
save_total_limit=2,
# 최적화
gradient_checkpointing=True,
bf16=True,
# Trackio 모니터링
report_to="trackio",
run_name="youtube-thumbnail-trainer",
# 로깅
logging_steps=10,
)
# 트레이너 생성
print("트레이너 초기화 중...")
trainer = SFTTrainer(
model="Qwen/Qwen2.5-0.5B",
train_dataset=dataset_split["train"],
eval_dataset=dataset_split["test"],
peft_config=peft_config,
args=training_args,
)
# 학습 시작
print("학습 시작!")
trainer.train()
# Hub에 최종 모델 저장
print("모델 Hub에 저장 중...")
trainer.push_to_hub()
print("학습 완료!")
print(f"모델 저장됨: https://huggingface.co/epinfomax/youtube-thumbnail-trend-analyzer")