kgrabko commited on
Commit
1f6b1d9
·
verified ·
1 Parent(s): ce3ac9d

Upload fine_tune_jit_with_validation_cuda_1b.py

Browse files
fine_tune_jit_with_validation_cuda_1b.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ # install tokenizer before run
4
+ mkdir -p tokenizer
5
+ wget -O tokenizer/tokenizer.json https://huggingface.co/gpt2/resolve/main/tokenizer.json
6
+ wget -O tokenizer/vocab.json https://huggingface.co/gpt2/resolve/main/vocab.json
7
+ wget -O tokenizer/merges.txt https://huggingface.co/gpt2/resolve/main/merges.txt
8
+ wget -O tokenizer/tokenizer_config.json https://huggingface.co/gpt2/resolve/main/tokenizer_config.json
9
+
10
+ Updated fine-tuning script, version "prefer Python nn.Module with gradient checkpointing".
11
+
12
+ What it does:
13
+ - Tries to load a local Python implementation of the model (as torch.nn.Module). If found — uses it and
14
+ enables gradient_checkpointing (if the model supports it).
15
+ - If no Python model class is found — falls back to JIT ScriptModule (as before).
16
+ - If the original weights are only available as JIT, attempts to extract state_dict() from the ScriptModule
17
+ and load it into the nn.Module (best-effort).
18
+ - Saves the final trained model as a JIT torch.jit.save at the end, or as state_dict if something fails.
19
+ - Saves the tokenizer locally (./tokenizer) and uses it. Gives a helpful message if the tokenizer is missing.
20
+ - Supports AMP (autocast + GradScaler) on GPU.
21
+ - Optional support for bitsandbytes 8-bit optimizer (if installed).
22
+ - Comments and console messages are in Russian.
23
+
24
+ Before running: if you have a Python file with the model implementation
25
+ (for example gpt_modern_1b.py or gpt_modern_1b_class.py), place it in the same folder
26
+ and make sure it contains a class named JiRackPyTorch (or one of the other names the script looks for).
27
+ If no such file exists — the script will just use the JIT model as before.
28
+ """
29
+
30
+ import os
31
+ os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "max_split_size_mb:128,garbage_collection_threshold:0.6")
32
+
33
+ import sys
34
+ import importlib
35
+ import math
36
+ import shutil
37
+ import re
38
+ from pathlib import Path
39
+ from typing import Optional
40
+
41
+ import torch
42
+ import torch.nn as nn
43
+ import torch.optim as optim
44
+ from torch.utils.data import IterableDataset, DataLoader
45
+ from transformers import GPT2TokenizerFast
46
+ from tqdm import tqdm
47
+ from torch.cuda.amp import GradScaler, autocast
48
+
49
+ # ========================= SETTINGS =========================
50
+ TRAIN_SEQ_LEN = int(os.environ.get("TRAIN_SEQ_LEN", 64))
51
+ BATCH_SIZE = int(os.environ.get("BATCH_SIZE", 1))
52
+ EPOCHS = int(os.environ.get("EPOCHS", 999))
53
+ LEARNING_RATE = float(os.environ.get("LEARNING_RATE", 6e-6))
54
+ WEIGHT_DECAY = float(os.environ.get("WEIGHT_DECAY", 0.01))
55
+ GRAD_CLIP = float(os.environ.get("GRAD_CLIP", 1.0))
56
+ KEEP_LAST_EPOCHS = int(os.environ.get("KEEP_LAST_EPOCHS", 3))
57
+ VAL_SPLIT_RATIO = float(os.environ.get("VAL_SPLIT_RATIO", 0.05))
58
+
59
+ BASE_MODEL_PATH = Path("models/gpt_modern_1b_class.script.pt")
60
+ LAST_TRAINED_PATH = Path("models/gpt_1b_last_trained.script.pt")
61
+ PT_STATE_DICT_PATH = Path("models/gpt_modern_1b_class.state_dict.pt")
62
+ BACKUP_DIR = Path("models/backups")
63
+ BACKUP_DIR.mkdir(parents=True, exist_ok=True)
64
+
65
+ RAW_PATH = Path("datasets/dialogues_text.txt")
66
+ CLEAN_PATH = Path("datasets/dialogues_text_clean.txt")
67
+ TOKENIZER_LOCAL_DIR = Path("./tokenizer")
68
+
69
+ OUTPUT_DIR = Path("build/fine_tuning_output")
70
+ MODEL_SAVE_NAME = "gpt_finetuned.script.pt"
71
+
72
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
73
+ print(f"Using device: {device}")
74
+
75
+ # ========================= Tokenizer helper =========================
76
+ def _load_tokenizer_local(tokenizer_name: str = "gpt2"):
77
+ """
78
+ Try to load tokenizer from local files. If not found — give the user instructions.
79
+ """
80
+ candidates = []
81
+ env_path = os.environ.get("TOKENIZER_PATH")
82
+ if env_path:
83
+ candidates.append(env_path)
84
+ candidates.append(str(TOKENIZER_LOCAL_DIR))
85
+ candidates.append(tokenizer_name)
86
+ candidates.append("./")
87
+ for cand in candidates:
88
+ try:
89
+ tok = GPT2TokenizerFast.from_pretrained(cand, local_files_only=True)
90
+ if getattr(tok, "pad_token", None) is None:
91
+ tok.pad_token = tok.eos_token
92
+ print(f"Tokenizer loaded from: {cand}")
93
+ return tok
94
+ except Exception:
95
+ continue
96
+
97
+ raise RuntimeError(
98
+ "Local tokenizer not found. Place tokenizer.json or (vocab.json + merges.txt) into ./tokenizer\n"
99
+ "OR set the path via TOKENIZER_PATH environment variable.\n"
100
+ "Example: export TOKENIZER_PATH=/path/to/tokenizer\n"
101
+ "If you have internet access, you can temporarily use transformers.GPT2TokenizerFast.from_pretrained('gpt2')"
102
+ )
103
+
104
+ # ========================= Dataset =========================
105
+ class LazyTextDataset(IterableDataset):
106
+ def __init__(self, text_file: Path, seq_len: int = TRAIN_SEQ_LEN, tokenizer_name: str = "gpt2",
107
+ split_type: str = 'train', val_ratio: float = VAL_SPLIT_RATIO):
108
+ self.seq_len = seq_len
109
+ self.tokenizer = _load_tokenizer_local(tokenizer_name)
110
+ self.text_file = Path(text_file)
111
+ self.split_type = split_type
112
+ self.val_ratio = val_ratio
113
+
114
+ print(f"Loading and tokenizing {self.text_file} (one-time tokenization into ids)...")
115
+ with open(self.text_file, "r", encoding="utf-8") as f:
116
+ data = f.read()
117
+ self.tokens = self.tokenizer.encode(data)
118
+
119
+ total_tokens = max(0, len(self.tokens) - 1)
120
+ total_batches = total_tokens // seq_len if seq_len > 0 else 0
121
+ val_size = int(total_batches * val_ratio)
122
+ train_size = total_batches - val_size
123
+ if split_type == 'train':
124
+ self.start = 0
125
+ self.stop = train_size
126
+ elif split_type == 'val':
127
+ self.start = train_size
128
+ self.stop = train_size + val_size
129
+ else:
130
+ raise ValueError("split_type must be 'train' or 'val'")
131
+ self.total_sequences = max(0, self.stop - self.start)
132
+ print(f"Split {split_type}: {self.total_sequences} sequences (out of {total_batches})")
133
+
134
+ def __iter__(self):
135
+ for i in range(self.start * self.seq_len, self.stop * self.seq_len, self.seq_len):
136
+ if i + self.seq_len + 1 > len(self.tokens):
137
+ break
138
+ input_seq = torch.tensor(self.tokens[i: i + self.seq_len], dtype=torch.long)
139
+ label_seq = torch.tensor(self.tokens[i + 1: i + self.seq_len + 1], dtype=torch.long)
140
+ yield input_seq, label_seq
141
+
142
+ def __len__(self):
143
+ return self.total_sequences
144
+
145
+ # ========================= Try to load Python nn.Module model =========================
146
+ def try_load_python_model():
147
+ """
148
+ Attempt to find and import a local Python model implementation (nn.Module).
149
+ Looks for several possible module and class names.
150
+ Returns (model_instance, source_description) or (None, None).
151
+ """
152
+ candidates_modules = [
153
+ "gpt_modern_1b_class",
154
+ "gpt_modern_1b",
155
+ "gpt_modern_1b_class_fixed",
156
+ "model", "ji_rack_model"
157
+ ]
158
+ candidates_class_names = [
159
+ "JiRackPyTorch",
160
+ "JiRackPyTorch1B",
161
+ "GPTModel",
162
+ "JiRackModel"
163
+ ]
164
+
165
+ for modname in candidates_modules:
166
+ try:
167
+ spec = importlib.util.find_spec(modname)
168
+ if spec is None:
169
+ continue
170
+ mod = importlib.import_module(modname)
171
+ for cls_name in candidates_class_names:
172
+ if hasattr(mod, cls_name):
173
+ cls = getattr(mod, cls_name)
174
+ try:
175
+ inst = cls()
176
+ print(f"Loaded Python model class {cls_name} from module {modname}")
177
+ return inst, f"python:{modname}.{cls_name}"
178
+ except Exception as e:
179
+ print(f"Found class {cls_name} in {modname} but instantiation failed: {e}")
180
+ continue
181
+ except Exception:
182
+ continue
183
+ return None, None
184
+
185
+ # ========================= Utility: load weights from JIT script into nn.Module =========================
186
+ def load_weights_from_script_to_module(script_path: Path, module_model: nn.Module):
187
+ """
188
+ Best-effort: extract state_dict from a ScriptModule and load it into a regular nn.Module.
189
+ Returns True on success.
190
+ """
191
+ try:
192
+ script_mod = torch.jit.load(script_path, map_location="cpu")
193
+ except Exception as e:
194
+ print(f"Cannot load script {script_path}: {e}")
195
+ return False
196
+ try:
197
+ sd = script_mod.state_dict()
198
+ except Exception as e:
199
+ print(f"ScriptModule.state_dict() failed: {e}")
200
+ return False
201
+ try:
202
+ module_model.load_state_dict(sd, strict=False)
203
+ print("Weights successfully loaded from ScriptModule into Python nn.Module (strict=False).")
204
+ return True
205
+ except Exception as e:
206
+ print(f"load_state_dict failed: {e}")
207
+ return False
208
+
209
+ # ========================= Helper to get logits from any model type =========================
210
+ def get_logits_from_model(model, inputs: torch.Tensor):
211
+ inputs = inputs.to(device)
212
+ out = model(inputs)
213
+ if isinstance(out, (tuple, list)):
214
+ return out[0]
215
+ return out
216
+
217
+ # ========================= Evaluation =========================
218
+ def evaluate(model, dataloader, criterion):
219
+ model.eval()
220
+ total_loss = 0.0
221
+ count = 0
222
+ with torch.no_grad():
223
+ for inputs, targets in dataloader:
224
+ inputs, targets = inputs.to(device), targets.to(device)
225
+ logits = get_logits_from_model(model, inputs)
226
+ logits = logits.contiguous().view(-1, logits.size(-1))
227
+ targets = targets.contiguous().view(-1)[:logits.shape[0]]
228
+ loss = criterion(logits, targets)
229
+ total_loss += float(loss.item())
230
+ count += 1
231
+ model.train()
232
+ return total_loss / max(1, count)
233
+
234
+ # ========================= Training loop =========================
235
+ def train():
236
+ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
237
+ print("Loading model...")
238
+
239
+ python_model, python_source = try_load_python_model()
240
+ model = None
241
+ model_source = None
242
+
243
+ # Prefer Python nn.Module if available
244
+ if python_model is not None:
245
+ model = python_model
246
+ model_source = python_source
247
+ loaded = False
248
+ # Try to load latest weights (state_dict first, then JIT → state_dict)
249
+ if PT_STATE_DICT_PATH.exists():
250
+ try:
251
+ sd = torch.load(PT_STATE_DICT_PATH, map_location="cpu")
252
+ model.load_state_dict(sd, strict=False)
253
+ print(f"Loaded state_dict from {PT_STATE_DICT_PATH}")
254
+ loaded = True
255
+ except Exception as e:
256
+ print(f"Failed to load state_dict from {PT_STATE_DICT_PATH}: {e}")
257
+ if not loaded and LAST_TRAINED_PATH.exists():
258
+ if load_weights_from_script_to_module(LAST_TRAINED_PATH, model):
259
+ loaded = True
260
+ if not loaded and BASE_MODEL_PATH.exists():
261
+ if load_weights_from_script_to_module(BASE_MODEL_PATH, model):
262
+ loaded = True
263
+ else:
264
+ # Fallback to JIT ScriptModule
265
+ if LAST_TRAINED_PATH.exists():
266
+ model = torch.jit.load(LAST_TRAINED_PATH, map_location=device)
267
+ model_source = f"jit:{LAST_TRAINED_PATH}"
268
+ elif BASE_MODEL_PATH.exists():
269
+ model = torch.jit.load(BASE_MODEL_PATH, map_location=device)
270
+ model_source = f"jit:{BASE_MODEL_PATH}"
271
+ else:
272
+ print("ERROR: No model found (neither Python module nor JIT). Place a model file or Python implementation.")
273
+ return
274
+
275
+ print(f"Model loaded from: {model_source}")
276
+
277
+ # If we are using a real nn.Module → move to device + enable gradient checkpointing if possible
278
+ is_python_module = isinstance(model, nn.Module)
279
+ if is_python_module:
280
+ model.to(device)
281
+ model.train()
282
+ try:
283
+ model.gradient_checkpointing_enable()
284
+ print("Gradient checkpointing ENABLED on Python nn.Module.")
285
+ except Exception:
286
+ try:
287
+ model.gradient_checkpointing = True
288
+ print("Set attribute gradient_checkpointing = True (best-effort).")
289
+ except Exception:
290
+ print("Gradient checkpointing not available on this Python model.")
291
+ else:
292
+ # ScriptModule path
293
+ try:
294
+ model.to(device)
295
+ except Exception:
296
+ print("Warning: model.to(device) failed for ScriptModule; trying best-effort buffer move.")
297
+ model.train()
298
+ print("Training on ScriptModule (gradient checkpointing not available).")
299
+
300
+ # ========================= Dataset preparation =========================
301
+ if not CLEAN_PATH.exists():
302
+ if not RAW_PATH.exists():
303
+ raise FileNotFoundError(f"Missing dataset {RAW_PATH}")
304
+ print("Cleaning raw dataset → cleaned version...")
305
+ text = RAW_PATH.read_text(encoding="utf-8")
306
+ text = re.sub(r" {2,}", " ", text)
307
+ text = text.replace(" \n", "\n").replace("\n ", "\n")
308
+ CLEAN_PATH.write_text(text, encoding="utf-8")
309
+ print(f"Cleaned dataset saved → {CLEAN_PATH}")
310
+
311
+ train_dataset = LazyTextDataset(CLEAN_PATH, seq_len=TRAIN_SEQ_LEN, split_type='train', val_ratio=VAL_SPLIT_RATIO)
312
+ val_dataset = LazyTextDataset(CLEAN_PATH, seq_len=TRAIN_SEQ_LEN, split_type='val', val_ratio=VAL_SPLIT_RATIO)
313
+
314
+ train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False, drop_last=True, num_workers=0)
315
+ val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, drop_last=True, num_workers=0)
316
+
317
+ # ========================= Optimizer (try 8-bit first) =========================
318
+ try:
319
+ import bitsandbytes as bnb # type: ignore
320
+ try:
321
+ optimizer = bnb.optim.AdamW8bit(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
322
+ except Exception:
323
+ optimizer = bnb.optim.Adam8bit(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
324
+ print("Using bitsandbytes 8-bit optimizer.")
325
+ except Exception:
326
+ optimizer = optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
327
+ print("Using standard torch.optim.AdamW (bitsandbytes not available).")
328
+
329
+ criterion = nn.CrossEntropyLoss()
330
+ scaler = GradScaler(enabled=(device.type == 'cuda'))
331
+
332
+ if device.type == 'cuda':
333
+ torch.cuda.empty_cache()
334
+
335
+ total_steps = (len(train_dataset) // BATCH_SIZE) * EPOCHS if len(train_dataset) > 0 else 0
336
+ print(f"\nSTARTING training: epochs={EPOCHS}, approx. steps={total_steps}, examples={len(train_dataset)}")
337
+ print(f"Batch size={BATCH_SIZE}, seq_len={TRAIN_SEQ_LEN}, device={device}, AMP={'on' if device.type=='cuda' else 'off'}")
338
+
339
+ global_step = 0
340
+ for epoch in range(1, EPOCHS + 1):
341
+ print(f"\n=== Epoch {epoch}/{EPOCHS} ===")
342
+ epoch_loss = 0.0
343
+
344
+ pbar = tqdm(train_loader, desc=f"Epoch {epoch} [TRAIN]", leave=False)
345
+ for inputs, targets in pbar:
346
+ inputs, targets = inputs.to(device), targets.to(device)
347
+ optimizer.zero_grad(set_to_none=True)
348
+
349
+ with autocast(enabled=(device.type == 'cuda')):
350
+ logits = get_logits_from_model(model, inputs)
351
+ logits = logits.contiguous().view(-1, logits.size(-1))
352
+ targets_view = targets.contiguous().view(-1)[:logits.shape[0]]
353
+ loss = criterion(logits, targets_view)
354
+
355
+ # Backward pass (AMP-safe)
356
+ if device.type == 'cuda':
357
+ try:
358
+ scaler.scale(loss).backward()
359
+ scaler.unscale_(optimizer)
360
+ except Exception as e:
361
+ print("Scaled backward failed:", e)
362
+ loss.backward()
363
+ try:
364
+ torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_CLIP)
365
+ except Exception:
366
+ pass
367
+ try:
368
+ scaler.step(optimizer)
369
+ scaler.update()
370
+ except RuntimeError as e:
371
+ print("RuntimeError in scaler.step():", e)
372
+ print(torch.cuda.memory_summary())
373
+ # Fallback without scaler
374
+ try:
375
+ scaler.unscale_(optimizer)
376
+ optimizer.step()
377
+ except Exception as e2:
378
+ print("Fallback optimizer.step() failed:", e2)
379
+ raise e
380
+ else:
381
+ loss.backward()
382
+ torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_CLIP)
383
+ optimizer.step()
384
+
385
+ if device.type == 'cuda':
386
+ torch.cuda.empty_cache()
387
+
388
+ loss_val = float(loss.item())
389
+ epoch_loss += loss_val
390
+ global_step += 1
391
+ pbar.set_postfix({"loss": f"{loss_val:.4f}", "ppl": f"{math.exp(min(loss_val, 10)):.2f}", "step": global_step})
392
+
393
+ avg_train_loss = epoch_loss / max(1, len(train_dataset) // BATCH_SIZE)
394
+ print(f"[TRAIN] Avg loss: {avg_train_loss:.4f} | Perplexity: {math.exp(avg_train_loss):.2f}")
395
+
396
+ print("Running validation...")
397
+ val_loss = evaluate(model, val_loader, criterion)
398
+ print(f"[VAL] Avg loss: {val_loss:.4f} | Perplexity: {math.exp(val_loss):.2f}")
399
+
400
+ # Save checkpoint for this epoch
401
+ epoch_dir = OUTPUT_DIR / f"epoch{epoch}"
402
+ epoch_dir.mkdir(parents=True, exist_ok=True)
403
+ try:
404
+ if is_python_module:
405
+ model.eval()
406
+ dummy = torch.randint(0, 50257, (1, min(32, TRAIN_SEQ_LEN)), device=device)
407
+ try:
408
+ traced = torch.jit.trace(model, dummy, strict=False)
409
+ torch.jit.save(traced, epoch_dir / MODEL_SAVE_NAME)
410
+ print(f"Exported traced JIT → {epoch_dir / MODEL_SAVE_NAME}")
411
+ except Exception as e:
412
+ torch.save(model.state_dict(), epoch_dir / "state_dict.pt")
413
+ print(f"Saved state_dict (trace failed): {e}")
414
+ model.train()
415
+ else:
416
+ torch.jit.save(model, epoch_dir / MODEL_SAVE_NAME)
417
+ print(f"Saved ScriptModule → {epoch_dir / MODEL_SAVE_NAME}")
418
+ except Exception as e:
419
+ print("Error while saving epoch model:", e)
420
+
421
+ cleanup_old_epochs()
422
+
423
+ # ========================= Final model save =========================
424
+ final_dir = OUTPUT_DIR / "final"
425
+ final_dir.mkdir(parents=True, exist_ok=True)
426
+ try:
427
+ if is_python_module:
428
+ model.eval()
429
+ dummy = torch.randint(0, 50257, (1, min(32, TRAIN_SEQ_LEN)), device=device)
430
+ traced = torch.jit.trace(model, dummy, strict=False)
431
+ torch.jit.save(traced, final_dir / MODEL_SAVE_NAME)
432
+ print(f"Final traced JIT saved → {final_dir / MODEL_SAVE_NAME}")
433
+ else:
434
+ torch.jit.save(model, final_dir / MODEL_SAVE_NAME)
435
+ print(f"Final ScriptModule saved → {final_dir / MODEL_SAVE_NAME}")
436
+ except Exception:
437
+ torch.save(model.state_dict(), final_dir / "state_dict.pt")
438
+ print("Final model saved as state_dict (trace failed).")
439
+
440
+ # Save tokenizer with the final model
441
+ try:
442
+ train_dataset.tokenizer.save_pretrained(final_dir)
443
+ except Exception:
444
+ pass
445
+
446
+ # Backup previous last-trained model and update the "current" symlink/file
447
+ if LAST_TRAINED_PATH.exists():
448
+ backup_path = BACKUP_DIR / f"gpt_last_trained_backup_{int(LAST_TRAINED_PATH.stat().st_mtime)}.script.pt"
449
+ shutil.copy(LAST_TRAINED_PATH, backup_path)
450
+ print(f"Backed up previous last_trained → {backup_path}")
451
+
452
+ if (final_dir / MODEL_SAVE_NAME).exists():
453
+ shutil.copy(final_dir / MODEL_SAVE_NAME, LAST_TRAINED_PATH)
454
+ print(f"Copied final model → {LAST_TRAINED_PATH}")
455
+ elif (final_dir / "state_dict.pt").exists():
456
+ shutil.copy(final_dir / "state_dict.pt", LAST_TRAINED_PATH.with_suffix(".state_dict.pt"))
457
+
458
+ print("TRAINING COMPLETED.")
459
+
460
+ # ========================= Entrypoint =========================
461
+ if __name__ == "__main__":
462
+ if not RAW_PATH.exists():
463
+ print(f"ERROR: dataset {RAW_PATH} not found. Place your training text there.")
464
+ sys.exit(1)
465
+ train()