Spaces:
Sleeping
Sleeping
| import argparse, os | |
| from datasets import load_dataset | |
| from transformers import ( | |
| AutoTokenizer, AutoModelForCausalLM, | |
| DataCollatorForLanguageModeling, Trainer, TrainingArguments | |
| ) | |
| def parse_args(): | |
| ap = argparse.ArgumentParser() | |
| ap.add_argument("--dataset", required=True, help="JSON/JSONL file (or folder with shards)") | |
| ap.add_argument("--output", default="trained_model") | |
| ap.add_argument("--model_name", default="Salesforce/codegen-350M-multi") | |
| ap.add_argument("--epochs", type=float, default=1.0) | |
| ap.add_argument("--batch_size", type=int, default=2) | |
| ap.add_argument("--block_size", type=int, default=256) | |
| ap.add_argument("--learning_rate", type=float, default=5e-5) | |
| ap.add_argument("--subset", type=int, default=0, help="Use first N rows for quick runs") | |
| return ap.parse_args() | |
| def main(): | |
| a = parse_args() | |
| print(f"📦 Loading dataset from: {a.dataset}", flush=True) | |
| if os.path.isdir(a.dataset): | |
| pattern = os.path.join(a.dataset, "/.jsonl") | |
| ds = load_dataset("json", data_files=pattern, split="train") | |
| else: | |
| ds = load_dataset("json", data_files=a.dataset, split="train") | |
| cols = ds.column_names | |
| print("🧾 Columns:", cols, flush=True) | |
| # Accept either {"text": "..."} or {"prompt": "...", "completion": "..."} | |
| def to_text(example): | |
| if "text" in example: | |
| return example["text"] | |
| if "prompt" in example and "completion" in example: | |
| return (str(example["prompt"]).rstrip() + "\n" + str(example["completion"])) | |
| raise ValueError("Dataset must have 'text' or 'prompt' + 'completion'.") | |
| if a.subset and a.subset > 0: | |
| ds = ds.select(range(min(a.subset, len(ds)))) | |
| print(f"✂ Subset: {len(ds)} rows", flush=True) | |
| print(f"🧠 Loading model: {a.model_name}", flush=True) | |
| tok = AutoTokenizer.from_pretrained(a.model_name, use_fast=True) | |
| if tok.pad_token is None and tok.eos_token is not None: | |
| tok.pad_token = tok.eos_token | |
| model = AutoModelForCausalLM.from_pretrained(a.model_name) | |
| def tokenize(batch): | |
| texts = [to_text(x) for x in batch] | |
| return tok(texts, padding="max_length", truncation=True, max_length=a.block_size) | |
| print("🔁 Tokenizing…", flush=True) | |
| tokenized = ds.map(tokenize, batched=True, remove_columns=cols) | |
| collator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False) | |
| args = TrainingArguments( | |
| output_dir=a.output, | |
| overwrite_output_dir=True, | |
| per_device_train_batch_size=a.batch_size, | |
| num_train_epochs=a.epochs, | |
| learning_rate=a.learning_rate, | |
| logging_steps=5, | |
| save_steps=200, | |
| save_total_limit=1, | |
| report_to=[], | |
| fp16=False, # CPU-friendly in Spaces | |
| ) | |
| print("⚙ Trainer…", flush=True) | |
| trainer = Trainer(model=model, args=args, train_dataset=tokenized, | |
| tokenizer=tok, data_collator=collator) | |
| print("🚀 Training…", flush=True) | |
| trainer.train() | |
| print(f"💾 Saving to {a.output}", flush=True) | |
| os.makedirs(a.output, exist_ok=True) | |
| trainer.save_model(a.output) | |
| tok.save_pretrained(a.output) | |
| print("✅ Done.", flush=True) | |
| if __name__ == "__main__": | |
| try: | |
| main() | |
| except Exception as e: | |
| print(f"❌ Error during training: {e}", flush=True) | |
| raise |