cognitive-trainer-scripts / train_question_generator.py
KevinKeller's picture
Upload train_question_generator.py with huggingface_hub
682e102 verified
# /// script
# dependencies = ["trl>=0.20.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
# ///
import os
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
# Authenticate
from huggingface_hub import login
hf_token = os.environ.get("HF_TOKEN")
if hf_token:
login(token=hf_token)
print("Authenticated with HuggingFace")
print("Loading dataset...")
dataset = load_dataset("KevinKeller/cognitive-question-generator-v1")
train_dataset = dataset["train"]
eval_dataset = dataset.get("validation")
print(f"Train samples: {len(train_dataset)}")
if eval_dataset:
print(f"Eval samples: {len(eval_dataset)}")
# Using Qwen2.5-7B for question generation
model_id = "Qwen/Qwen2.5-7B-Instruct"
print(f"Using model: {model_id}")
# LoRA config - slightly higher rank for more complex task
peft_config = LoraConfig(
r=32,
lora_alpha=64,
lora_dropout=0.05,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
bias="none",
task_type="CAUSAL_LM",
)
# Training config - modern TRL API
training_args = SFTConfig(
output_dir="./question-generator-output",
num_train_epochs=2,
per_device_train_batch_size=1,
gradient_accumulation_steps=8,
learning_rate=1e-4,
logging_steps=50,
save_strategy="steps",
save_steps=500,
eval_strategy="steps" if eval_dataset else "no",
eval_steps=500,
bf16=True,
push_to_hub=True,
hub_model_id="KevinKeller/cognitive-question-generator-qwen2.5-7b",
report_to="none",
max_length=8192, # Use max_length, not max_seq_length
)
print("Starting training...")
trainer = SFTTrainer(
model=model_id, # Pass model name, not loaded model
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=peft_config,
args=training_args,
)
trainer.train()
print("Training complete! Pushing to Hub...")
trainer.push_to_hub()
print("Done!")