|
|
import argparse, os, sys |
|
|
from typing import List |
|
|
from datasets import load_dataset |
|
|
from transformers import ( |
|
|
AutoTokenizer, AutoModelForCausalLM, |
|
|
DataCollatorForLanguageModeling, TrainingArguments, Trainer |
|
|
) |
|
|
|
|
|
def parse_args(): |
|
|
p = argparse.ArgumentParser() |
|
|
p.add_argument("--dataset", required=True, help="JSON/JSONL (.jsonl or .jsonl.gz)") |
|
|
p.add_argument("--output", default="trained_model") |
|
|
p.add_argument("--model_name", default="distilgpt2") |
|
|
p.add_argument("--epochs", type=float, default=0.5) |
|
|
p.add_argument("--batch_size", type=int, default=2) |
|
|
p.add_argument("--block_size", type=int, default=256) |
|
|
p.add_argument("--learning_rate", type=float, default=5e-5) |
|
|
|
|
|
p.add_argument("--quick", type=int, default=0) |
|
|
p.add_argument("--max_steps", type=int, default=0) |
|
|
p.add_argument("--subset", type=int, default=0) |
|
|
return p.parse_args() |
|
|
|
|
|
def main(): |
|
|
a = parse_args() |
|
|
|
|
|
if a.quick: |
|
|
a.model_name = "sshleifer/tiny-gpt2" |
|
|
if a.max_steps <= 0: a.max_steps = 8 |
|
|
if a.subset <= 0: a.subset = 32 |
|
|
a.epochs = 1.0 |
|
|
|
|
|
print(f"📥 Loading dataset: {a.dataset}", flush=True) |
|
|
ds = load_dataset("json", data_files=a.dataset, split="train") |
|
|
cols = ds.column_names |
|
|
print("🧾 Columns:", cols, flush=True) |
|
|
|
|
|
if a.subset and a.subset > 0: |
|
|
ds = ds.select(range(min(a.subset, len(ds)))) |
|
|
print(f"✂ Using subset: {len(ds)} rows", flush=True) |
|
|
|
|
|
tok = AutoTokenizer.from_pretrained(a.model_name) |
|
|
if tok.pad_token is None: |
|
|
tok.pad_token = tok.eos_token |
|
|
model = AutoModelForCausalLM.from_pretrained(a.model_name) |
|
|
|
|
|
def build_texts(batch) -> List[str]: |
|
|
if "text" in batch: |
|
|
return [str(t) for t in batch["text"]] |
|
|
if "prompt" in batch and "completion" in batch: |
|
|
return [f"{str(p).rstrip()}\n{str(c)}" for p, c in zip(batch["prompt"], batch["completion"])] |
|
|
raise ValueError("Dataset must contain 'text' OR both 'prompt' and 'completion'.") |
|
|
|
|
|
def tokenize(batch): |
|
|
texts = build_texts(batch) |
|
|
return tok(texts, padding="max_length", truncation=True, max_length=a.block_size) |
|
|
|
|
|
print("🔁 Tokenizing…", flush=True) |
|
|
tokds = ds.map(tokenize, batched=True, remove_columns=cols) |
|
|
collator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False) |
|
|
|
|
|
print("⚙ Trainer…", flush=True) |
|
|
args = TrainingArguments( |
|
|
output_dir=a.output, |
|
|
overwrite_output_dir=True, |
|
|
per_device_train_batch_size=a.batch_size, |
|
|
num_train_epochs=a.epochs if a.max_steps == 0 else 1, |
|
|
learning_rate=a.learning_rate, |
|
|
logging_steps=1, |
|
|
save_steps=50, |
|
|
save_total_limit=1, |
|
|
report_to=[], |
|
|
fp16=False, |
|
|
max_steps=a.max_steps if a.max_steps > 0 else -1, |
|
|
) |
|
|
trainer = Trainer(model=model, args=args, train_dataset=tokds, tokenizer=tok, data_collator=collator) |
|
|
|
|
|
print("🚀 Training…", flush=True) |
|
|
trainer.train() |
|
|
print(f"💾 Saving to {a.output}", flush=True) |
|
|
os.makedirs(a.output, exist_ok=True) |
|
|
trainer.save_model(a.output) |
|
|
tok.save_pretrained(a.output) |
|
|
print("✅ Done.", flush=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
try: |
|
|
main() |
|
|
except Exception as e: |
|
|
print(f"❌ Training failed: {e}", flush=True) |
|
|
sys.exit(1) |