KevinKeller commited on
Commit
882aa90
·
verified ·
1 Parent(s): b3f451d

Upload train_question_generator.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_question_generator.py +92 -0
train_question_generator.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["trl>=0.12.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
3
+ # ///
4
+
5
+ import os
6
+ from datasets import load_dataset
7
+ from peft import LoraConfig
8
+ from trl import SFTTrainer, SFTConfig
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
10
+ import torch
11
+
12
+ # Authenticate
13
+ from huggingface_hub import login
14
+ hf_token = os.environ.get("HF_TOKEN")
15
+ if hf_token:
16
+ login(token=hf_token)
17
+ print("Authenticated with HuggingFace")
18
+
19
+ print("Loading dataset...")
20
+ dataset = load_dataset("KevinKeller/cognitive-question-generator-v1")
21
+ train_dataset = dataset["train"]
22
+ eval_dataset = dataset.get("validation")
23
+
24
+ print(f"Train samples: {len(train_dataset)}")
25
+ if eval_dataset:
26
+ print(f"Eval samples: {len(eval_dataset)}")
27
+
28
+ # Using Qwen2.5-7B for question generation
29
+ print("Loading model: Qwen/Qwen2.5-7B-Instruct...")
30
+ model_id = "Qwen/Qwen2.5-7B-Instruct"
31
+
32
+ # 4-bit quantization for fitting on A10G
33
+ bnb_config = BitsAndBytesConfig(
34
+ load_in_4bit=True,
35
+ bnb_4bit_quant_type="nf4",
36
+ bnb_4bit_compute_dtype=torch.bfloat16,
37
+ )
38
+
39
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
40
+ if tokenizer.pad_token is None:
41
+ tokenizer.pad_token = tokenizer.eos_token
42
+
43
+ model = AutoModelForCausalLM.from_pretrained(
44
+ model_id,
45
+ quantization_config=bnb_config,
46
+ device_map="auto",
47
+ trust_remote_code=True,
48
+ )
49
+
50
+ # LoRA config - slightly higher rank for more complex task
51
+ peft_config = LoraConfig(
52
+ r=32,
53
+ lora_alpha=64,
54
+ lora_dropout=0.05,
55
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
56
+ bias="none",
57
+ task_type="CAUSAL_LM",
58
+ )
59
+
60
+ # Training config - removed max_seq_length, gradient_checkpointing from SFTConfig
61
+ training_args = SFTConfig(
62
+ output_dir="./question-generator-output",
63
+ num_train_epochs=2,
64
+ per_device_train_batch_size=1,
65
+ gradient_accumulation_steps=8,
66
+ learning_rate=1e-4,
67
+ logging_steps=50,
68
+ save_strategy="steps",
69
+ save_steps=500,
70
+ eval_strategy="steps" if eval_dataset else "no",
71
+ eval_steps=500,
72
+ bf16=True,
73
+ push_to_hub=True,
74
+ hub_model_id="KevinKeller/cognitive-question-generator-qwen2.5-7b",
75
+ report_to="none",
76
+ )
77
+
78
+ print("Starting training...")
79
+ trainer = SFTTrainer(
80
+ model=model,
81
+ train_dataset=train_dataset,
82
+ eval_dataset=eval_dataset,
83
+ peft_config=peft_config,
84
+ processing_class=tokenizer, # Renamed from 'tokenizer' in TRL 0.12+
85
+ args=training_args,
86
+ max_seq_length=8192,
87
+ )
88
+
89
+ trainer.train()
90
+ print("Training complete! Pushing to Hub...")
91
+ trainer.push_to_hub()
92
+ print("Done!")