Percy3822 commited on
Commit
68c2563
·
verified ·
1 Parent(s): 719f624

Create train.py

Browse files
Files changed (1) hide show
  1. train.py +90 -0
train.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse, os
2
+ from datasets import load_dataset
3
+ from transformers import (
4
+ AutoTokenizer, AutoModelForCausalLM,
5
+ DataCollatorForLanguageModeling, Trainer, TrainingArguments
6
+ )
7
+
8
+ def parse_args():
9
+ ap = argparse.ArgumentParser()
10
+ ap.add_argument("--dataset", required=True, help="JSON/JSONL file (or folder with shards)")
11
+ ap.add_argument("--output", default="trained_model")
12
+ ap.add_argument("--model_name", default="Salesforce/codegen-350M-multi")
13
+ ap.add_argument("--epochs", type=float, default=1.0)
14
+ ap.add_argument("--batch_size", type=int, default=2)
15
+ ap.add_argument("--block_size", type=int, default=256)
16
+ ap.add_argument("--learning_rate", type=float, default=5e-5)
17
+ ap.add_argument("--subset", type=int, default=0, help="Use first N rows for quick runs")
18
+ return ap.parse_args()
19
+
20
+ def main():
21
+ a = parse_args()
22
+ print(f"📦 Loading dataset from: {a.dataset}", flush=True)
23
+
24
+ if os.path.isdir(a.dataset):
25
+ pattern = os.path.join(a.dataset, "/.jsonl")
26
+ ds = load_dataset("json", data_files=pattern, split="train")
27
+ else:
28
+ ds = load_dataset("json", data_files=a.dataset, split="train")
29
+
30
+ cols = ds.column_names
31
+ print("🧾 Columns:", cols, flush=True)
32
+
33
+ # Accept either {"text": "..."} or {"prompt": "...", "completion": "..."}
34
+ def to_text(example):
35
+ if "text" in example:
36
+ return example["text"]
37
+ if "prompt" in example and "completion" in example:
38
+ return (str(example["prompt"]).rstrip() + "\n" + str(example["completion"]))
39
+ raise ValueError("Dataset must have 'text' or 'prompt' + 'completion'.")
40
+
41
+ if a.subset and a.subset > 0:
42
+ ds = ds.select(range(min(a.subset, len(ds))))
43
+ print(f"✂ Subset: {len(ds)} rows", flush=True)
44
+
45
+ print(f"🧠 Loading model: {a.model_name}", flush=True)
46
+ tok = AutoTokenizer.from_pretrained(a.model_name, use_fast=True)
47
+ if tok.pad_token is None and tok.eos_token is not None:
48
+ tok.pad_token = tok.eos_token
49
+ model = AutoModelForCausalLM.from_pretrained(a.model_name)
50
+
51
+ def tokenize(batch):
52
+ texts = [to_text(x) for x in batch]
53
+ return tok(texts, padding="max_length", truncation=True, max_length=a.block_size)
54
+
55
+ print("🔁 Tokenizing…", flush=True)
56
+ tokenized = ds.map(tokenize, batched=True, remove_columns=cols)
57
+ collator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)
58
+
59
+ args = TrainingArguments(
60
+ output_dir=a.output,
61
+ overwrite_output_dir=True,
62
+ per_device_train_batch_size=a.batch_size,
63
+ num_train_epochs=a.epochs,
64
+ learning_rate=a.learning_rate,
65
+ logging_steps=5,
66
+ save_steps=200,
67
+ save_total_limit=1,
68
+ report_to=[],
69
+ fp16=False, # CPU-friendly in Spaces
70
+ )
71
+
72
+ print("⚙ Trainer…", flush=True)
73
+ trainer = Trainer(model=model, args=args, train_dataset=tokenized,
74
+ tokenizer=tok, data_collator=collator)
75
+
76
+ print("🚀 Training…", flush=True)
77
+ trainer.train()
78
+
79
+ print(f"💾 Saving to {a.output}", flush=True)
80
+ os.makedirs(a.output, exist_ok=True)
81
+ trainer.save_model(a.output)
82
+ tok.save_pretrained(a.output)
83
+ print("✅ Done.", flush=True)
84
+
85
+ if __name__ == "__main__":
86
+ try:
87
+ main()
88
+ except Exception as e:
89
+ print(f"❌ Error during training: {e}", flush=True)
90
+ raise