Percy3822 commited on
Commit
6d266d1
Β·
verified Β·
1 Parent(s): eca2f3b

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +16 -20
train.py CHANGED
@@ -1,53 +1,45 @@
1
- import argparse, os
2
  from datasets import load_dataset
3
  from transformers import (
4
  AutoTokenizer, AutoModelForCausalLM,
5
  DataCollatorForLanguageModeling, Trainer, TrainingArguments
6
  )
7
 
 
 
 
8
  def parse_args():
9
  ap = argparse.ArgumentParser()
10
- ap.add_argument("--dataset", required=True, help="JSON/JSONL file (or folder with shards)")
11
  ap.add_argument("--output", default="trained_model")
12
  ap.add_argument("--model_name", default="Salesforce/codegen-350M-multi")
13
  ap.add_argument("--epochs", type=float, default=1.0)
14
  ap.add_argument("--batch_size", type=int, default=2)
15
  ap.add_argument("--block_size", type=int, default=256)
16
  ap.add_argument("--learning_rate", type=float, default=5e-5)
17
- ap.add_argument("--subset", type=int, default=0, help="Use first N rows for quick runs")
18
  return ap.parse_args()
19
 
20
  def main():
21
  a = parse_args()
22
  print(f"πŸ“¦ Loading dataset from: {a.dataset}", flush=True)
23
-
24
- if os.path.isdir(a.dataset):
25
- pattern = os.path.join(a.dataset, "/.jsonl")
26
- ds = load_dataset("json", data_files=pattern, split="train")
27
- else:
28
- ds = load_dataset("json", data_files=a.dataset, split="train")
29
-
30
  cols = ds.column_names
31
  print("🧾 Columns:", cols, flush=True)
32
-
33
  if a.subset and a.subset > 0:
34
  ds = ds.select(range(min(a.subset, len(ds))))
35
  print(f"βœ‚ Subset: {len(ds)} rows", flush=True)
36
 
37
- print(f"🧠 Loading model: {a.model_name}", flush=True)
38
  tok = AutoTokenizer.from_pretrained(a.model_name, use_fast=True)
39
  if tok.pad_token is None and tok.eos_token is not None:
40
  tok.pad_token = tok.eos_token
41
  model = AutoModelForCausalLM.from_pretrained(a.model_name)
42
 
43
- # βœ… batched=True passes dict-of-lists
44
  def tokenize(batch):
45
  if "text" in batch:
46
  texts = batch["text"]
47
  elif "prompt" in batch and "completion" in batch:
48
- prompts = batch["prompt"]
49
- completions = batch["completion"]
50
- texts = [(str(p).rstrip() + "\n" + str(c)) for p, c in zip(prompts, completions)]
51
  else:
52
  raise ValueError("Dataset must have 'text' or 'prompt' + 'completion'.")
53
  return tok(texts, padding="max_length", truncation=True, max_length=a.block_size)
@@ -63,8 +55,7 @@ def main():
63
  num_train_epochs=a.epochs,
64
  learning_rate=a.learning_rate,
65
  logging_steps=5,
66
- save_steps=200,
67
- save_total_limit=1,
68
  report_to=[],
69
  fp16=False,
70
  )
@@ -80,7 +71,12 @@ def main():
80
  os.makedirs(a.output, exist_ok=True)
81
  trainer.save_model(a.output)
82
  tok.save_pretrained(a.output)
 
83
  print("βœ… Done.", flush=True)
84
 
85
- if __name__ == "__main__":
86
- main()
 
 
 
 
 
1
+ import argparse, os, traceback
2
  from datasets import load_dataset
3
  from transformers import (
4
  AutoTokenizer, AutoModelForCausalLM,
5
  DataCollatorForLanguageModeling, Trainer, TrainingArguments
6
  )
7
 
8
+ DONE = "TRAIN_DONE"
9
+ ERRF = "TRAIN_ERROR"
10
+
11
  def parse_args():
12
  ap = argparse.ArgumentParser()
13
+ ap.add_argument("--dataset", required=True)
14
  ap.add_argument("--output", default="trained_model")
15
  ap.add_argument("--model_name", default="Salesforce/codegen-350M-multi")
16
  ap.add_argument("--epochs", type=float, default=1.0)
17
  ap.add_argument("--batch_size", type=int, default=2)
18
  ap.add_argument("--block_size", type=int, default=256)
19
  ap.add_argument("--learning_rate", type=float, default=5e-5)
20
+ ap.add_argument("--subset", type=int, default=0)
21
  return ap.parse_args()
22
 
23
  def main():
24
  a = parse_args()
25
  print(f"πŸ“¦ Loading dataset from: {a.dataset}", flush=True)
26
+ ds = load_dataset("json", data_files=a.dataset, split="train")
 
 
 
 
 
 
27
  cols = ds.column_names
28
  print("🧾 Columns:", cols, flush=True)
 
29
  if a.subset and a.subset > 0:
30
  ds = ds.select(range(min(a.subset, len(ds))))
31
  print(f"βœ‚ Subset: {len(ds)} rows", flush=True)
32
 
 
33
  tok = AutoTokenizer.from_pretrained(a.model_name, use_fast=True)
34
  if tok.pad_token is None and tok.eos_token is not None:
35
  tok.pad_token = tok.eos_token
36
  model = AutoModelForCausalLM.from_pretrained(a.model_name)
37
 
 
38
  def tokenize(batch):
39
  if "text" in batch:
40
  texts = batch["text"]
41
  elif "prompt" in batch and "completion" in batch:
42
+ texts = [str(p).rstrip() + "\n" + str(c) for p, c in zip(batch["prompt"], batch["completion"])]
 
 
43
  else:
44
  raise ValueError("Dataset must have 'text' or 'prompt' + 'completion'.")
45
  return tok(texts, padding="max_length", truncation=True, max_length=a.block_size)
 
55
  num_train_epochs=a.epochs,
56
  learning_rate=a.learning_rate,
57
  logging_steps=5,
58
+ save_strategy="no",
 
59
  report_to=[],
60
  fp16=False,
61
  )
 
71
  os.makedirs(a.output, exist_ok=True)
72
  trainer.save_model(a.output)
73
  tok.save_pretrained(a.output)
74
+ open(DONE, "w").write("ok") # <β€”β€” signal file
75
  print("βœ… Done.", flush=True)
76
 
77
+ if __name___ == "__main__":
78
+ try:
79
+ main()
80
+ except Exception:
81
+ open(ERRF, "w").write(traceback.format_exc())
82
+ raise