Spaces:
Sleeping
Sleeping
| import argparse | |
| import os | |
| from typing import List | |
| from datasets import load_dataset | |
| from transformers import ( | |
| AutoTokenizer, | |
| AutoModelForCausalLM, | |
| DataCollatorForLanguageModeling, | |
| TrainingArguments, | |
| Trainer, | |
| ) | |
| def parse_args(): | |
| p = argparse.ArgumentParser() | |
| p.add_argument("--dataset", required=True, help="Path to a JSON/JSONL file with either 'text' or 'prompt'+'completion'") | |
| p.add_argument("--output", default="trained_model", help="Where to save the fine-tuned model") | |
| p.add_argument("--model_name", default="distilgpt2", help="Base model name or path") | |
| p.add_argument("--epochs", type=float, default=1.0) | |
| p.add_argument("--batch_size", type=int, default=2) | |
| p.add_argument("--block_size", type=int, default=256) | |
| return p.parse_args() | |
| def main(): | |
| args = parse_args() | |
| print("π Loading dataset:", args.dataset, flush=True) | |
| dataset = load_dataset("json", data_files=args.dataset, split="train") | |
| cols = dataset.column_names | |
| print("π§Ύ Columns detected:", cols, flush=True) | |
| print("π§ Loading model & tokenizer:", args.model_name, flush=True) | |
| tokenizer = AutoTokenizer.from_pretrained(args.model_name) | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token # GPT-2 family has no pad_token by default | |
| model = AutoModelForCausalLM.from_pretrained(args.model_name) | |
| def build_texts(batch) -> List[str]: | |
| if "text" in batch: | |
| return [str(t) for t in batch["text"]] | |
| if "prompt" in batch and "completion" in batch: | |
| return [f"{str(p).rstrip()}\n{str(c)}" for p, c in zip(batch["prompt"], batch["completion"])] | |
| raise ValueError("Dataset must have 'text' OR ('prompt' and 'completion').") | |
| def tokenize(batch): | |
| texts = build_texts(batch) | |
| return tokenizer(texts, padding="max_length", truncation=True, max_length=args.block_size) | |
| print("π Tokenizing...", flush=True) | |
| tokenized = dataset.map( | |
| tokenize, | |
| batched=True, | |
| remove_columns=cols, # keep only tokenized fields | |
| ) | |
| data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) | |
| print("β Preparing Trainer...", flush=True) | |
| training_args = TrainingArguments( | |
| output_dir=args.output, | |
| overwrite_output_dir=True, | |
| per_device_train_batch_size=args.batch_size, | |
| num_train_epochs=args.epochs, | |
| logging_steps=5, | |
| save_steps=50, | |
| save_total_limit=1, | |
| report_to=[], | |
| gradient_accumulation_steps=1, | |
| fp16=False, # CPU-friendly on Spaces | |
| ) | |
| trainer = Trainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=tokenized, | |
| tokenizer=tokenizer, | |
| data_collator=data_collator, | |
| ) | |
| print("π Training...", flush=True) | |
| trainer.train() | |
| print("πΎ Saving model to:", args.output, flush=True) | |
| os.makedirs(args.output, exist_ok=True) | |
| trainer.save_model(args.output) | |
| tokenizer.save_pretrained(args.output) | |
| print("β Done.", flush=True) | |
| if __name__ == "__main__": | |
| main() |