File size: 2,611 Bytes
7c9b6bf
dd32d27
7c9b6bf
 
 
 
 
 
 
 
 
dd32d27
 
 
 
 
 
 
7c9b6bf
 
 
 
 
 
 
 
 
dd32d27
7c9b6bf
 
 
 
 
 
 
 
 
 
 
dd32d27
 
7c9b6bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116c7be
7c9b6bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd32d27
7c9b6bf
116c7be
7c9b6bf
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
# /// script
# dependencies = ["trl>=0.12.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
# ///

import os
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import torch

# Authenticate
from huggingface_hub import login
hf_token = os.environ.get("HF_TOKEN")
if hf_token:
    login(token=hf_token)
    print("Authenticated with HuggingFace")

print("Loading dataset...")
dataset = load_dataset("KevinKeller/cognitive-question-generator-v1")
train_dataset = dataset["train"]
eval_dataset = dataset.get("validation")

print(f"Train samples: {len(train_dataset)}")
if eval_dataset:
    print(f"Eval samples: {len(eval_dataset)}")

# Using Qwen2.5-7B for question generation
print("Loading model: Qwen/Qwen2.5-7B-Instruct...")
model_id = "Qwen/Qwen2.5-7B-Instruct"

# 4-bit quantization for fitting on A10G
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
    trust_remote_code=True,
)

# LoRA config - slightly higher rank for more complex task
peft_config = LoraConfig(
    r=32,
    lora_alpha=64,
    lora_dropout=0.05,
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
    bias="none",
    task_type="CAUSAL_LM",
)

# Training config - removed max_seq_length, gradient_checkpointing from SFTConfig
training_args = SFTConfig(
    output_dir="./question-generator-output",
    num_train_epochs=2,
    per_device_train_batch_size=1,
    gradient_accumulation_steps=8,
    learning_rate=1e-4,
    logging_steps=50,
    save_strategy="steps",
    save_steps=500,
    eval_strategy="steps" if eval_dataset else "no",
    eval_steps=500,
    bf16=True,
    push_to_hub=True,
    hub_model_id="KevinKeller/cognitive-question-generator-qwen2.5-7b",
    report_to="none",
)

print("Starting training...")
trainer = SFTTrainer(
    model=model,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    peft_config=peft_config,
    tokenizer=tokenizer,
    args=training_args,
    max_seq_length=8192,  # Moved here
)

trainer.train()
print("Training complete! Pushing to Hub...")
trainer.push_to_hub()
print("Done!")