Python_ai_attempt2 / train.py
Percy3822's picture
Update train.py
6f02285 verified
raw
history blame
3.22 kB
import argparse, os, traceback
from pathlib import Path
from datasets import load_dataset
from transformers import (
AutoTokenizer, AutoModelForCausalLM,
DataCollatorForLanguageModeling, Trainer, TrainingArguments
)
ROOT = Path(__file__).resolve().parent # /home/user/app
DONE = ROOT / "TRAIN_DONE" # <- write here
ERRF = ROOT / "TRAIN_ERROR"
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("--dataset", required=True)
ap.add_argument("--output", default=str(ROOT / "trained_model"))
ap.add_argument("--model_name", default="Salesforce/codegen-350M-multi")
ap.add_argument("--epochs", type=float, default=1.0)
ap.add_argument("--batch_size", type=int, default=2)
ap.add_argument("--block_size", type=int, default=256)
ap.add_argument("--learning_rate", type=float, default=5e-5)
ap.add_argument("--subset", type=int, default=0)
return ap.parse_args()
def main():
a = parse_args()
print(f"📦 Loading dataset from: {a.dataset}", flush=True)
ds = load_dataset("json", data_files=a.dataset, split="train")
cols = ds.column_names
print("🧾 Columns:", cols, flush=True)
if a.subset and a.subset > 0:
ds = ds.select(range(min(a.subset, len(ds))))
print(f"✂ Subset: {len(ds)} rows", flush=True)
tok = AutoTokenizer.from_pretrained(a.model_name, use_fast=True)
if tok.pad_token is None and tok.eos_token is not None:
tok.pad_token = tok.eos_token
model = AutoModelForCausalLM.from_pretrained(a.model_name)
def tokenize(batch):
if "text" in batch:
texts = batch["text"]
elif "prompt" in batch and "completion" in batch:
texts = [str(p).rstrip() + "\n" + str(c) for p, c in zip(batch["prompt"], batch["completion"])]
else:
raise ValueError("Dataset must have 'text' or 'prompt' + 'completion'.")
return tok(texts, padding="max_length", truncation=True, max_length=a.block_size)
print("🔁 Tokenizing…", flush=True)
tokenized = ds.map(tokenize, batched=True, remove_columns=cols)
collator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)
args = TrainingArguments(
output_dir=a.output,
overwrite_output_dir=True,
per_device_train_batch_size=a.batch_size,
num_train_epochs=a.epochs,
learning_rate=a.learning_rate,
logging_steps=5,
save_strategy="no",
report_to=[],
fp16=False,
)
print("⚙ Trainer…", flush=True)
trainer = Trainer(model=model, args=args, train_dataset=tokenized,
tokenizer=tok, data_collator=collator)
print("🚀 Training…", flush=True)
trainer.train()
print(f"💾 Saving to {a.output}", flush=True)
os.makedirs(a.output, exist_ok=True)
trainer.save_model(a.output)
tok.save_pretrained(a.output)
DONE.write_text("ok") # <- SIGNAL!
print("✅ Done.", flush=True)
if __name__ == "__main__":
try:
DONE.unlink(missing_ok=True)
ERRF.unlink(missing_ok=True)
main()
except Exception:
ERRF.write_text(traceback.format_exc())
raise