tellang commited on
Commit
85d9fd1
·
verified ·
1 Parent(s): 1528d6f

Add phase1_dora_training.ipynb with auto-resume feature

Browse files
Files changed (1) hide show
  1. phase1_dora_training.ipynb +673 -0
phase1_dora_training.ipynb ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "A100"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "accelerator": "GPU"
14
+ },
15
+ "cells": [
16
+ {
17
+ "cell_type": "markdown",
18
+ "source": [
19
+ "# 🔮 YEJI Phase 1: QDoRA Fine-tuning (PEFT + TRL v0.26+)\n",
20
+ "\n",
21
+ "Qwen3-8B-Base 모델을 **PEFT + QDoRA**로 Fine-tuning합니다.\n",
22
+ "\n",
23
+ "## 주요 구성\n",
24
+ "- **모델**: Qwen/Qwen3-8B-Base (4-bit NF4)\n",
25
+ "- **방법**: QDoRA via PEFT (`use_dora=True`)\n",
26
+ "- **데이터**: 밸런싱 40K + 멀티턴 500건\n",
27
+ "- **환경**: Colab A100 40GB\n",
28
+ "- **TRL**: v0.26+ (SFTConfig 방식)"
29
+ ],
30
+ "metadata": {
31
+ "id": "cell-0"
32
+ }
33
+ },
34
+ {
35
+ "cell_type": "markdown",
36
+ "source": [
37
+ "---\n",
38
+ "## 1️⃣ 환경 설정"
39
+ ],
40
+ "metadata": {
41
+ "id": "cell-1"
42
+ }
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "source": [
47
+ "# GPU 확인\n",
48
+ "!nvidia-smi"
49
+ ],
50
+ "metadata": {
51
+ "id": "cell-2"
52
+ },
53
+ "execution_count": null,
54
+ "outputs": []
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "source": [
59
+ "# 패키지 설치 (TRL v0.26+)\n",
60
+ "!pip install --no-cache-dir -q transformers accelerate peft bitsandbytes\n",
61
+ "!pip install --no-cache-dir -q \"trl>=0.26\" datasets huggingface_hub\n",
62
+ "\n",
63
+ "print(\"✅ 패키지 설치 완료!\")"
64
+ ],
65
+ "metadata": {
66
+ "id": "cell-3"
67
+ },
68
+ "execution_count": null,
69
+ "outputs": []
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "source": [
74
+ "# 버전 확인 및 임포트\n",
75
+ "import json\n",
76
+ "import gc\n",
77
+ "import time\n",
78
+ "from datetime import datetime\n",
79
+ "\n",
80
+ "import torch\n",
81
+ "import transformers\n",
82
+ "import peft\n",
83
+ "import trl\n",
84
+ "\n",
85
+ "print(f\"PyTorch: {torch.__version__}\")\n",
86
+ "print(f\"Transformers: {transformers.__version__}\")\n",
87
+ "print(f\"PEFT: {peft.__version__}\")\n",
88
+ "print(f\"TRL: {trl.__version__}\")\n",
89
+ "print(f\"CUDA: {torch.cuda.is_available()}\")\n",
90
+ "if torch.cuda.is_available():\n",
91
+ " print(f\"GPU: {torch.cuda.get_device_name(0)}\")\n",
92
+ " print(f\"VRAM: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB\")"
93
+ ],
94
+ "metadata": {
95
+ "id": "cell-4"
96
+ },
97
+ "execution_count": null,
98
+ "outputs": []
99
+ },
100
+ {
101
+ "cell_type": "markdown",
102
+ "source": [
103
+ "---\n",
104
+ "## 2️⃣ 설정"
105
+ ],
106
+ "metadata": {
107
+ "id": "cell-5"
108
+ }
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "source": [
113
+ "# ============================================================\n",
114
+ "# 전역 설정\n",
115
+ "# ============================================================\n",
116
+ "CONFIG = {\n",
117
+ " # 데이터\n",
118
+ " \"balanced_dataset\": \"tellang/yeji-fortune-telling-ko-balanced\",\n",
119
+ " \"multiturn_dataset\": \"tellang/yeji-fortune-telling-ko-multiturn\",\n",
120
+ " \n",
121
+ " # 모델\n",
122
+ " \"base_model\": \"Qwen/Qwen3-8B-Base\",\n",
123
+ " \"output_repo\": \"tellang/yeji-8b-qdora-v1\",\n",
124
+ " \n",
125
+ " # QDoRA 설정\n",
126
+ " \"lora_r\": 8,\n",
127
+ " \"lora_alpha\": 16,\n",
128
+ " \"use_dora\": True,\n",
129
+ " \n",
130
+ " # 학습\n",
131
+ " \"num_epochs\": 3,\n",
132
+ " \"batch_size\": 2,\n",
133
+ " \"grad_accum_steps\": 4,\n",
134
+ " \"learning_rate\": 2e-4,\n",
135
+ " \"max_seq_length\": 2048,\n",
136
+ " \n",
137
+ " # 체크포인트\n",
138
+ " \"save_steps\": 500,\n",
139
+ " \"eval_steps\": 500,\n",
140
+ " \"save_total_limit\": 3,\n",
141
+ " \n",
142
+ " # 완료 후 자동 종료\n",
143
+ " \"auto_shutdown\": \"unassign\", # None / \"unassign\" / \"terminate\"\n",
144
+ "}\n",
145
+ "\n",
146
+ "SYSTEM_PROMPT = \"\"\"당신은 전문 점술가 '예지'입니다. 사주팔자, 타로, 호로스코프를 전문적으로 해석합니다.\n",
147
+ "친근하고 따뜻한 말투로 상담하며, 구체적이고 실용적인 조언을 제공합니다.\"\"\"\n",
148
+ "\n",
149
+ "print(\"✅ 설정 완료\")\n",
150
+ "print(f\" 모델: {CONFIG['base_model']}\")\n",
151
+ "print(f\" DoRA: {CONFIG['use_dora']}\")\n",
152
+ "print(f\" r={CONFIG['lora_r']}, alpha={CONFIG['lora_alpha']}\")"
153
+ ],
154
+ "metadata": {
155
+ "id": "cell-6"
156
+ },
157
+ "execution_count": null,
158
+ "outputs": []
159
+ },
160
+ {
161
+ "cell_type": "code",
162
+ "source": [
163
+ "# ============================================================\n",
164
+ "# 유틸리티 함수\n",
165
+ "# ============================================================\n",
166
+ "def shutdown_colab(mode):\n",
167
+ " \"\"\"Colab 세션 종료\"\"\"\n",
168
+ " if mode is None:\n",
169
+ " return\n",
170
+ " try:\n",
171
+ " from google.colab import runtime\n",
172
+ " if mode == \"unassign\":\n",
173
+ " print(\"🔌 GPU 할당 해제 중...\")\n",
174
+ " runtime.unassign()\n",
175
+ " elif mode == \"terminate\":\n",
176
+ " import os\n",
177
+ " os._exit(0)\n",
178
+ " except Exception as e:\n",
179
+ " print(f\"⚠️ 종료 실패: {e}\")\n",
180
+ "\n",
181
+ "print(\"✅ 유틸리티 함수 정의 완료\")"
182
+ ],
183
+ "metadata": {
184
+ "id": "cell-7"
185
+ },
186
+ "execution_count": null,
187
+ "outputs": []
188
+ },
189
+ {
190
+ "cell_type": "markdown",
191
+ "source": [
192
+ "---\n",
193
+ "## 3️⃣ HuggingFace 로그인"
194
+ ],
195
+ "metadata": {
196
+ "id": "cell-8"
197
+ }
198
+ },
199
+ {
200
+ "cell_type": "code",
201
+ "source": "# HuggingFace 로그인\nfrom huggingface_hub import login\n\ndef extract_token(obj):\n \"\"\"재귀적으로 토큰 추출\"\"\"\n if isinstance(obj, str) and obj.startswith('hf_'):\n return obj\n if isinstance(obj, dict):\n for key in ['token', 'HF_TOKEN', 'hf_token']:\n if key in obj:\n result = extract_token(obj[key])\n if result:\n return result\n for v in obj.values():\n result = extract_token(v)\n if result:\n return result\n return None\n\nHF_TOKEN = None\n\n# 1. Colab secrets\ntry:\n from google.colab import userdata\n raw = userdata.get('HF_TOKEN')\n HF_TOKEN = extract_token(raw) if isinstance(raw, dict) else raw\nexcept Exception:\n pass\n\n# 2. 환경변수\nif not HF_TOKEN:\n import os\n HF_TOKEN = os.environ.get('HF_TOKEN')\n\n# 3. 수동 입력\nif not HF_TOKEN or not isinstance(HF_TOKEN, str):\n HF_TOKEN = input(\"HuggingFace 토큰 입력: \")\n\nlogin(token=HF_TOKEN)\nprint(\"✅ HuggingFace 로그인 완료!\")",
202
+ "metadata": {
203
+ "id": "cell-9"
204
+ },
205
+ "execution_count": null,
206
+ "outputs": []
207
+ },
208
+ {
209
+ "cell_type": "markdown",
210
+ "source": [
211
+ "---\n",
212
+ "## 4️⃣ 데이터 준비"
213
+ ],
214
+ "metadata": {
215
+ "id": "cell-10"
216
+ }
217
+ },
218
+ {
219
+ "cell_type": "code",
220
+ "source": [
221
+ "# 토크나이저 로드\n",
222
+ "from transformers import AutoTokenizer\n",
223
+ "\n",
224
+ "tokenizer = AutoTokenizer.from_pretrained(\n",
225
+ " CONFIG[\"base_model\"],\n",
226
+ " trust_remote_code=True,\n",
227
+ ")\n",
228
+ "\n",
229
+ "if tokenizer.pad_token is None:\n",
230
+ " tokenizer.pad_token = tokenizer.eos_token\n",
231
+ " tokenizer.pad_token_id = tokenizer.eos_token_id\n",
232
+ "\n",
233
+ "print(f\"✅ 토크나이저 로드 완료\")\n",
234
+ "print(f\" vocab_size: {tokenizer.vocab_size}\")"
235
+ ],
236
+ "metadata": {
237
+ "id": "cell-11"
238
+ },
239
+ "execution_count": null,
240
+ "outputs": []
241
+ },
242
+ {
243
+ "cell_type": "code",
244
+ "source": [
245
+ "# 데이터셋 로드 및 변환\n",
246
+ "from datasets import load_dataset, concatenate_datasets\n",
247
+ "\n",
248
+ "print(\"📥 데이터셋 로드 중...\")\n",
249
+ "\n",
250
+ "# 밸런싱 데이터 로드\n",
251
+ "balanced_ds = load_dataset(CONFIG[\"balanced_dataset\"], split=\"train\")\n",
252
+ "print(f\" 밸런싱: {len(balanced_ds):,}건\")\n",
253
+ "\n",
254
+ "# 멀티턴 데이터 로드\n",
255
+ "multiturn_ds = load_dataset(CONFIG[\"multiturn_dataset\"], split=\"train\")\n",
256
+ "print(f\" 멀티턴: {len(multiturn_ds):,}건\")\n",
257
+ "\n",
258
+ "# Alpaca → text 변환\n",
259
+ "def format_alpaca(example):\n",
260
+ " messages = [\n",
261
+ " {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
262
+ " {\"role\": \"user\", \"content\": example[\"instruction\"] + (\"\\n\" + example[\"input\"] if example.get(\"input\") else \"\")},\n",
263
+ " {\"role\": \"assistant\", \"content\": example[\"output\"]},\n",
264
+ " ]\n",
265
+ " return {\"text\": tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)}\n",
266
+ "\n",
267
+ "# ShareGPT → text 변환\n",
268
+ "def format_sharegpt(example):\n",
269
+ " convs = json.loads(example[\"conversations\"])\n",
270
+ " messages = [{\"role\": \"system\", \"content\": SYSTEM_PROMPT}]\n",
271
+ " for msg in convs:\n",
272
+ " role = \"user\" if msg[\"role\"] == \"user\" else \"assistant\"\n",
273
+ " messages.append({\"role\": role, \"content\": msg[\"content\"]})\n",
274
+ " return {\"text\": tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)}\n",
275
+ "\n",
276
+ "print(\"🔄 데이터 변환 중...\")\n",
277
+ "balanced_formatted = balanced_ds.map(format_alpaca, remove_columns=balanced_ds.column_names, num_proc=4)\n",
278
+ "multiturn_formatted = multiturn_ds.map(format_sharegpt, remove_columns=multiturn_ds.column_names, num_proc=4)\n",
279
+ "\n",
280
+ "# 병합 및 셔플\n",
281
+ "train_ds = concatenate_datasets([balanced_formatted, multiturn_formatted]).shuffle(seed=42)\n",
282
+ "print(f\"\\n✅ 데이터 준비 완료: {len(train_ds):,}건\")"
283
+ ],
284
+ "metadata": {
285
+ "id": "cell-12"
286
+ },
287
+ "execution_count": null,
288
+ "outputs": []
289
+ },
290
+ {
291
+ "cell_type": "code",
292
+ "source": [
293
+ "# Train/Eval 분리 (95:5)\n",
294
+ "split = train_ds.train_test_split(test_size=0.05, seed=42)\n",
295
+ "train_dataset = split[\"train\"]\n",
296
+ "eval_dataset = split[\"test\"]\n",
297
+ "\n",
298
+ "print(f\"📊 데이터 분리:\")\n",
299
+ "print(f\" Train: {len(train_dataset):,}건\")\n",
300
+ "print(f\" Eval: {len(eval_dataset):,}건\")\n",
301
+ "\n",
302
+ "# 샘플 확인\n",
303
+ "print(f\"\\n📝 샘플:\")\n",
304
+ "print(train_dataset[0][\"text\"][:300] + \"...\")"
305
+ ],
306
+ "metadata": {
307
+ "id": "cell-13"
308
+ },
309
+ "execution_count": null,
310
+ "outputs": []
311
+ },
312
+ {
313
+ "cell_type": "markdown",
314
+ "source": [
315
+ "---\n",
316
+ "## 5️⃣ 모델 준비 (QDoRA)"
317
+ ],
318
+ "metadata": {
319
+ "id": "cell-14"
320
+ }
321
+ },
322
+ {
323
+ "cell_type": "code",
324
+ "source": [
325
+ "# 4-bit 양자화 모델 로드\n",
326
+ "from transformers import AutoModelForCausalLM, BitsAndBytesConfig\n",
327
+ "\n",
328
+ "print(f\"📥 모델 로드 중: {CONFIG['base_model']}\")\n",
329
+ "print(\" (약 2-3분 소요)\")\n",
330
+ "\n",
331
+ "try:\n",
332
+ " bnb_config = BitsAndBytesConfig(\n",
333
+ " load_in_4bit=True,\n",
334
+ " bnb_4bit_quant_type=\"nf4\",\n",
335
+ " bnb_4bit_compute_dtype=torch.bfloat16,\n",
336
+ " bnb_4bit_use_double_quant=True,\n",
337
+ " )\n",
338
+ " \n",
339
+ " model = AutoModelForCausalLM.from_pretrained(\n",
340
+ " CONFIG[\"base_model\"],\n",
341
+ " quantization_config=bnb_config,\n",
342
+ " device_map=\"auto\",\n",
343
+ " trust_remote_code=True,\n",
344
+ " attn_implementation=\"eager\",\n",
345
+ " )\n",
346
+ " \n",
347
+ " print(f\"\\n✅ 모델 로드 완료!\")\n",
348
+ " print(f\" VRAM: {torch.cuda.memory_allocated() / 1e9:.1f} GB\")\n",
349
+ "\n",
350
+ "except Exception as e:\n",
351
+ " print(f\"❌ 모델 로드 실패: {e}\")\n",
352
+ " shutdown_colab(\"unassign\")\n",
353
+ " raise"
354
+ ],
355
+ "metadata": {
356
+ "id": "cell-15"
357
+ },
358
+ "execution_count": null,
359
+ "outputs": []
360
+ },
361
+ {
362
+ "cell_type": "code",
363
+ "source": [
364
+ "# QDoRA 어댑터 적용\n",
365
+ "from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, TaskType\n",
366
+ "\n",
367
+ "print(f\"🔧 QDoRA 어댑터 적용 중... (use_dora={CONFIG['use_dora']})\")\n",
368
+ "\n",
369
+ "model = prepare_model_for_kbit_training(model)\n",
370
+ "\n",
371
+ "lora_config = LoraConfig(\n",
372
+ " task_type=TaskType.CAUSAL_LM,\n",
373
+ " r=CONFIG[\"lora_r\"],\n",
374
+ " lora_alpha=CONFIG[\"lora_alpha\"],\n",
375
+ " target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n",
376
+ " lora_dropout=0.05,\n",
377
+ " bias=\"none\",\n",
378
+ " use_dora=CONFIG[\"use_dora\"],\n",
379
+ ")\n",
380
+ "\n",
381
+ "model = get_peft_model(model, lora_config)\n",
382
+ "\n",
383
+ "print(f\"\\n✅ QDoRA 적용 완료!\")\n",
384
+ "model.print_trainable_parameters()"
385
+ ],
386
+ "metadata": {
387
+ "id": "cell-16"
388
+ },
389
+ "execution_count": null,
390
+ "outputs": []
391
+ },
392
+ {
393
+ "cell_type": "markdown",
394
+ "source": [
395
+ "---\n",
396
+ "## 6️⃣ Baseline 측정"
397
+ ],
398
+ "metadata": {
399
+ "id": "cell-17"
400
+ }
401
+ },
402
+ {
403
+ "cell_type": "code",
404
+ "source": [
405
+ "# Baseline 품질 측정\n",
406
+ "TEST_PROMPTS = {\n",
407
+ " \"사주\": \"1985년 12월 25일 자시(23시)에 태어난 사람의 사주팔자를 분석해주세요.\",\n",
408
+ " \"타로\": \"취업 운세를 보려고 합니다. 타로 카드 '황제', '세계', '심판'이 나왔어요.\",\n",
409
+ " \"호로스코프\": \"사자자리의 2024년 연간 운세를 알려주세요.\",\n",
410
+ "}\n",
411
+ "\n",
412
+ "KEYWORDS = {\n",
413
+ " \"사주\": [\"년\", \"월\", \"일\", \"시\"],\n",
414
+ " \"타로\": [\"황제\", \"세계\", \"심판\", \"의미\"],\n",
415
+ " \"호로스코프\": [\"사자\", \"운\"],\n",
416
+ "}\n",
417
+ "\n",
418
+ "def generate_response(prompt, max_new_tokens=256):\n",
419
+ " messages = [\n",
420
+ " {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
421
+ " {\"role\": \"user\", \"content\": prompt},\n",
422
+ " ]\n",
423
+ " text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
424
+ " inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n",
425
+ " \n",
426
+ " with torch.no_grad():\n",
427
+ " outputs = model.generate(\n",
428
+ " **inputs,\n",
429
+ " max_new_tokens=max_new_tokens,\n",
430
+ " do_sample=True,\n",
431
+ " temperature=0.7,\n",
432
+ " top_p=0.9,\n",
433
+ " pad_token_id=tokenizer.pad_token_id,\n",
434
+ " )\n",
435
+ " return tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True).strip()\n",
436
+ "\n",
437
+ "def evaluate_quality():\n",
438
+ " results = {}\n",
439
+ " total_score = 0\n",
440
+ " \n",
441
+ " for domain, prompt in TEST_PROMPTS.items():\n",
442
+ " response = generate_response(prompt)\n",
443
+ " keywords = KEYWORDS[domain]\n",
444
+ " found = [kw for kw in keywords if kw in response]\n",
445
+ " score = len(found) / len(keywords) * 100\n",
446
+ " \n",
447
+ " results[domain] = {\"score\": score, \"found\": found, \"length\": len(response)}\n",
448
+ " total_score += score\n",
449
+ " \n",
450
+ " status = \"✅\" if score >= 50 else \"⚠️\"\n",
451
+ " print(f\"{status} {domain}: {score:.0f}%\")\n",
452
+ " print(f\" 키워드: {found}\")\n",
453
+ " print(f\" 응답 길이: {len(response)}자\")\n",
454
+ " \n",
455
+ " avg = total_score / len(TEST_PROMPTS)\n",
456
+ " print(f\"\\n📊 종합 점수: {avg:.0f}%\")\n",
457
+ " return results, avg\n",
458
+ "\n",
459
+ "print(\"=\"*50)\n",
460
+ "print(\"📊 Baseline (학습 전)\")\n",
461
+ "print(\"=\"*50)\n",
462
+ "baseline_results, baseline_avg = evaluate_quality()"
463
+ ],
464
+ "metadata": {
465
+ "id": "cell-18"
466
+ },
467
+ "execution_count": null,
468
+ "outputs": []
469
+ },
470
+ {
471
+ "cell_type": "markdown",
472
+ "source": [
473
+ "---\n",
474
+ "## 7️⃣ 학습"
475
+ ],
476
+ "metadata": {
477
+ "id": "cell-19"
478
+ }
479
+ },
480
+ {
481
+ "cell_type": "code",
482
+ "source": "# SFTConfig 설정\nfrom trl import SFTConfig\n\nsft_config = SFTConfig(\n output_dir=\"./yeji-qdora-v1\",\n \n # 학습\n num_train_epochs=CONFIG[\"num_epochs\"],\n per_device_train_batch_size=CONFIG[\"batch_size\"],\n per_device_eval_batch_size=CONFIG[\"batch_size\"],\n gradient_accumulation_steps=CONFIG[\"grad_accum_steps\"],\n \n # Optimizer\n learning_rate=CONFIG[\"learning_rate\"],\n lr_scheduler_type=\"cosine\",\n warmup_ratio=0.05,\n weight_decay=0.01,\n optim=\"paged_adamw_8bit\",\n \n # Precision & Memory\n bf16=True,\n gradient_checkpointing=True,\n max_grad_norm=0.3,\n \n # 저장\n save_strategy=\"steps\",\n save_steps=CONFIG[\"save_steps\"],\n save_total_limit=CONFIG[\"save_total_limit\"],\n logging_steps=50,\n \n # 평가\n eval_strategy=\"steps\",\n eval_steps=CONFIG[\"eval_steps\"],\n load_best_model_at_end=True,\n metric_for_best_model=\"eval_loss\",\n \n # Hub\n push_to_hub=True,\n hub_model_id=CONFIG[\"output_repo\"],\n hub_strategy=\"checkpoint\",\n \n report_to=\"none\",\n)\n\nprint(\"✅ SFTConfig 설정 완료\")\nprint(f\" epochs: {sft_config.num_train_epochs}\")\nprint(f\" effective_batch: {sft_config.per_device_train_batch_size * sft_config.gradient_accumulation_steps}\")",
483
+ "metadata": {
484
+ "id": "cell-20"
485
+ },
486
+ "execution_count": null,
487
+ "outputs": []
488
+ },
489
+ {
490
+ "cell_type": "code",
491
+ "source": "# SFTTrainer 초기화\nfrom trl import SFTTrainer\n\ntrainer = SFTTrainer(\n model=model,\n processing_class=tokenizer,\n args=sft_config,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n)\n\nprint(\"✅ SFTTrainer 초기화 완료!\")",
492
+ "metadata": {
493
+ "id": "cell-21"
494
+ },
495
+ "execution_count": null,
496
+ "outputs": []
497
+ },
498
+ {
499
+ "cell_type": "code",
500
+ "source": "# ============================================================\n# [테스트] 체크포인트 감지 테스트 (학습 전 확인용)\n# ============================================================\n# 실제 학습 전에 체크포인트가 제대로 감지되는지 확인\n# 이 셀은 선택 사항 - 건너뛰어도 됨\n\nfrom huggingface_hub import HfApi\n\ndef test_checkpoint_detection():\n \"\"\"체크포인트 감지 테스트\"\"\"\n print(\"🧪 체크포인트 감지 테스트\")\n print(\"=\" * 50)\n \n try:\n api = HfApi()\n \n # Repo 존재 확인\n try:\n files = api.list_repo_files(CONFIG[\"output_repo\"])\n print(f\"✅ Repo 존재: {CONFIG['output_repo']}\")\n except Exception:\n print(f\"📭 Repo 없음: {CONFIG['output_repo']}\")\n print(\" → 처음부터 새로 학습합니다.\")\n return\n \n # 체크포인트 파일 확인\n checkpoint_files = [f for f in files if f.startswith(\"last-checkpoint/\")]\n \n if checkpoint_files:\n print(f\"✅ 체크포인트 파일 {len(checkpoint_files)}개 발견:\")\n for f in checkpoint_files[:5]:\n print(f\" - {f}\")\n if len(checkpoint_files) > 5:\n print(f\" ... 외 {len(checkpoint_files) - 5}개\")\n \n # trainer_state 확인\n if \"last-checkpoint/trainer_state.json\" in files:\n import requests\n url = f\"https://huggingface.co/{CONFIG['output_repo']}/raw/main/last-checkpoint/trainer_state.json\"\n state = requests.get(url).json()\n \n print(f\"\\n📊 학습 진행 상황:\")\n print(f\" Step: {state.get('global_step', '?')}\")\n print(f\" Epoch: {state.get('epoch', 0):.2f} / {CONFIG['num_epochs']}\")\n print(f\" Best Loss: {state.get('best_metric', '?')}\")\n \n # 남은 학습량 계산\n total_steps = len(train_dataset) // (CONFIG[\"batch_size\"] * CONFIG[\"grad_accum_steps\"]) * CONFIG[\"num_epochs\"]\n current_step = state.get('global_step', 0)\n remaining = ((total_steps - current_step) / total_steps) * 100\n \n print(f\"\\n📈 예상 진행률:\")\n print(f\" 완료: {100 - remaining:.1f}%\")\n print(f\" 남음: {remaining:.1f}%\")\n \n print(f\"\\n✅ 테스트 통과! 전체 실행하면 이어서 학습됩니다.\")\n else:\n print(\"📭 체크포인트 없음\")\n print(\" → 처음부터 새로 학습합니다.\")\n \n except Exception as e:\n print(f\"❌ 테스트 실패: {e}\")\n\ntest_checkpoint_detection()",
501
+ "metadata": {},
502
+ "execution_count": null,
503
+ "outputs": []
504
+ },
505
+ {
506
+ "cell_type": "code",
507
+ "source": "# 학습 시작 (체크포인트 자동 감지)\nfrom huggingface_hub import HfApi, snapshot_download\nimport os\n\ndef check_checkpoint():\n \"\"\"HuggingFace Hub에서 체크포인트 확인 및 다운로드\"\"\"\n try:\n api = HfApi()\n files = api.list_repo_files(CONFIG[\"output_repo\"])\n \n if \"last-checkpoint/trainer_state.json\" in files:\n print(\"📦 기존 체크포인트 발견!\")\n \n # 체크포인트 다운로드\n print(\" 다운로드 중...\")\n checkpoint_path = snapshot_download(\n CONFIG[\"output_repo\"],\n allow_patterns=[\"last-checkpoint/*\"],\n local_dir=\"./resume_checkpoint\",\n )\n \n # 진행 상황 확인\n import json\n state_path = f\"{checkpoint_path}/last-checkpoint/trainer_state.json\"\n with open(state_path) as f:\n state = json.load(f)\n \n print(f\" ✅ Step: {state.get('global_step', '?')}\")\n print(f\" ✅ Epoch: {state.get('epoch', '?'):.2f}\")\n print(f\" ✅ Best Loss: {state.get('best_metric', '?'):.4f}\")\n \n return f\"{checkpoint_path}/last-checkpoint\"\n else:\n print(\"📭 체크포인트 없음 - 처음부터 학습\")\n return None\n \n except Exception as e:\n print(f\"📭 체크포인트 확인 실패: {e}\")\n print(\" 처음부터 학습합니다.\")\n return None\n\n# 체크포인트 확인\nprint(\"=\" * 50)\nprint(\"🔍 체크포인트 확인 중...\")\nprint(\"=\" * 50)\nresume_path = check_checkpoint()\n\n# 학습 시작\nprint(\"\\n\" + \"=\" * 50)\nif resume_path:\n print(\"🔄 이어서 학습 시작!\")\nelse:\n print(\"🚀 새로 학습 시작!\")\nprint(f\" 시작: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\nprint(f\" 데이터: {len(train_dataset):,}건\")\nprint(f\" Baseline: {baseline_avg:.0f}%\")\nprint(\"=\" * 50)\n\nstart_time = time.time()\n\ntry:\n if resume_path:\n train_result = trainer.train(resume_from_checkpoint=resume_path)\n else:\n train_result = trainer.train()\n \n elapsed = time.time() - start_time\n print(f\"\\n✅ 학습 완료!\")\n print(f\" 소요: {elapsed/60:.1f}분\")\n print(f\" Train Loss: {train_result.training_loss:.4f}\")\n\nexcept Exception as e:\n print(f\"\\n❌ 학습 실패: {e}\")\n shutdown_colab(\"unassign\")\n raise",
508
+ "metadata": {
509
+ "id": "cell-22"
510
+ },
511
+ "execution_count": null,
512
+ "outputs": []
513
+ },
514
+ {
515
+ "cell_type": "markdown",
516
+ "source": [
517
+ "---\n",
518
+ "## 8️⃣ 평가"
519
+ ],
520
+ "metadata": {
521
+ "id": "cell-23"
522
+ }
523
+ },
524
+ {
525
+ "cell_type": "code",
526
+ "source": [
527
+ "# Eval Loss\n",
528
+ "eval_result = trainer.evaluate()\n",
529
+ "print(f\"📊 Eval Loss: {eval_result['eval_loss']:.4f}\")"
530
+ ],
531
+ "metadata": {
532
+ "id": "cell-24"
533
+ },
534
+ "execution_count": null,
535
+ "outputs": []
536
+ },
537
+ {
538
+ "cell_type": "code",
539
+ "source": [
540
+ "# 학습 후 품질 측정\n",
541
+ "print(\"=\"*50)\n",
542
+ "print(\"📊 Fine-tuned (학습 후)\")\n",
543
+ "print(\"=\"*50)\n",
544
+ "finetuned_results, finetuned_avg = evaluate_quality()\n",
545
+ "\n",
546
+ "# 비교\n",
547
+ "print(\"\\n\" + \"=\"*50)\n",
548
+ "print(\"📊 Baseline vs Fine-tuned\")\n",
549
+ "print(\"=\"*50)\n",
550
+ "for domain in TEST_PROMPTS:\n",
551
+ " b = baseline_results[domain][\"score\"]\n",
552
+ " f = finetuned_results[domain][\"score\"]\n",
553
+ " diff = f - b\n",
554
+ " print(f\"{domain}: {b:.0f}% → {f:.0f}% ({'+' if diff >= 0 else ''}{diff:.0f}%)\")\n",
555
+ "\n",
556
+ "improvement = finetuned_avg - baseline_avg\n",
557
+ "print(f\"\\n종합: {baseline_avg:.0f}% → {finetuned_avg:.0f}% ({'+' if improvement >= 0 else ''}{improvement:.0f}%)\")"
558
+ ],
559
+ "metadata": {
560
+ "id": "cell-25"
561
+ },
562
+ "execution_count": null,
563
+ "outputs": []
564
+ },
565
+ {
566
+ "cell_type": "markdown",
567
+ "source": [
568
+ "---\n",
569
+ "## 9️⃣ 저장 & 업로드"
570
+ ],
571
+ "metadata": {
572
+ "id": "cell-26"
573
+ }
574
+ },
575
+ {
576
+ "cell_type": "code",
577
+ "source": [
578
+ "# 저장 및 업로드\n",
579
+ "print(\"💾 저장 중...\")\n",
580
+ "trainer.save_model(\"./yeji-qdora-v1-final\")\n",
581
+ "tokenizer.save_pretrained(\"./yeji-qdora-v1-final\")\n",
582
+ "\n",
583
+ "print(f\"📤 Hub 업로드 중: {CONFIG['output_repo']}\")\n",
584
+ "trainer.push_to_hub(\n",
585
+ " commit_message=f\"YEJI QDoRA v1 (Loss: {train_result.training_loss:.4f}, {baseline_avg:.0f}%→{finetuned_avg:.0f}%)\"\n",
586
+ ")\n",
587
+ "\n",
588
+ "print(f\"\\n✅ 완료!\")\n",
589
+ "print(f\" https://huggingface.co/{CONFIG['output_repo']}\")"
590
+ ],
591
+ "metadata": {
592
+ "id": "cell-27"
593
+ },
594
+ "execution_count": null,
595
+ "outputs": []
596
+ },
597
+ {
598
+ "cell_type": "code",
599
+ "source": [
600
+ "# 결과 요약\n",
601
+ "print(\"\\n\" + \"=\"*50)\n",
602
+ "print(\"📊 학습 결과 요약\")\n",
603
+ "print(\"=\"*50)\n",
604
+ "print(f\"모델: {CONFIG['base_model']}\")\n",
605
+ "print(f\"방법: QDoRA (r={CONFIG['lora_r']}, alpha={CONFIG['lora_alpha']})\")\n",
606
+ "print(f\"데이터: {len(train_dataset):,}건\")\n",
607
+ "print(f\"Train Loss: {train_result.training_loss:.4f}\")\n",
608
+ "print(f\"Eval Loss: {eval_result['eval_loss']:.4f}\")\n",
609
+ "print(f\"품질: {baseline_avg:.0f}% → {finetuned_avg:.0f}%\")\n",
610
+ "print(f\"출력: {CONFIG['output_repo']}\")"
611
+ ],
612
+ "metadata": {
613
+ "id": "cell-28"
614
+ },
615
+ "execution_count": null,
616
+ "outputs": []
617
+ },
618
+ {
619
+ "cell_type": "markdown",
620
+ "source": [
621
+ "---\n",
622
+ "## 🔟 리소스 정리"
623
+ ],
624
+ "metadata": {
625
+ "id": "cell-29"
626
+ }
627
+ },
628
+ {
629
+ "cell_type": "code",
630
+ "source": [
631
+ "# 메모리 정리\n",
632
+ "del model\n",
633
+ "del trainer\n",
634
+ "gc.collect()\n",
635
+ "torch.cuda.empty_cache()\n",
636
+ "print(\"✅ 메모리 정리 완료\")"
637
+ ],
638
+ "metadata": {
639
+ "id": "cell-30"
640
+ },
641
+ "execution_count": null,
642
+ "outputs": []
643
+ },
644
+ {
645
+ "cell_type": "code",
646
+ "source": [
647
+ "# 자동 종료\n",
648
+ "if CONFIG[\"auto_shutdown\"]:\n",
649
+ " print(f\"⏰ 5초 후 GPU 해제...\")\n",
650
+ " time.sleep(5)\n",
651
+ " shutdown_colab(CONFIG[\"auto_shutdown\"])"
652
+ ],
653
+ "metadata": {
654
+ "id": "cell-31"
655
+ },
656
+ "execution_count": null,
657
+ "outputs": []
658
+ },
659
+ {
660
+ "cell_type": "code",
661
+ "source": [
662
+ "# (유틸) 수동 GPU 해제\n",
663
+ "# from google.colab import runtime\n",
664
+ "# runtime.unassign()"
665
+ ],
666
+ "metadata": {
667
+ "id": "cell-32"
668
+ },
669
+ "execution_count": null,
670
+ "outputs": []
671
+ }
672
+ ]
673
+ }