KevinKeller commited on
Commit
d3abe91
·
verified ·
1 Parent(s): 319ff67

Upload train_question_generator.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_question_generator.py +6 -26
train_question_generator.py CHANGED
@@ -1,13 +1,11 @@
1
  # /// script
2
- # dependencies = ["trl>=0.12.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
3
  # ///
4
 
5
  import os
6
  from datasets import load_dataset
7
  from peft import LoraConfig
8
  from trl import SFTTrainer, SFTConfig
9
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
10
- import torch
11
 
12
  # Authenticate
13
  from huggingface_hub import login
@@ -26,26 +24,8 @@ if eval_dataset:
26
  print(f"Eval samples: {len(eval_dataset)}")
27
 
28
  # Using Qwen2.5-7B for question generation
29
- print("Loading model: Qwen/Qwen2.5-7B-Instruct...")
30
  model_id = "Qwen/Qwen2.5-7B-Instruct"
31
-
32
- # 4-bit quantization for fitting on A10G
33
- bnb_config = BitsAndBytesConfig(
34
- load_in_4bit=True,
35
- bnb_4bit_quant_type="nf4",
36
- bnb_4bit_compute_dtype=torch.bfloat16,
37
- )
38
-
39
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
40
- if tokenizer.pad_token is None:
41
- tokenizer.pad_token = tokenizer.eos_token
42
-
43
- model = AutoModelForCausalLM.from_pretrained(
44
- model_id,
45
- quantization_config=bnb_config,
46
- device_map="auto",
47
- trust_remote_code=True,
48
- )
49
 
50
  # LoRA config - slightly higher rank for more complex task
51
  peft_config = LoraConfig(
@@ -57,7 +37,7 @@ peft_config = LoraConfig(
57
  task_type="CAUSAL_LM",
58
  )
59
 
60
- # Training config - removed max_seq_length, gradient_checkpointing from SFTConfig
61
  training_args = SFTConfig(
62
  output_dir="./question-generator-output",
63
  num_train_epochs=2,
@@ -73,17 +53,17 @@ training_args = SFTConfig(
73
  push_to_hub=True,
74
  hub_model_id="KevinKeller/cognitive-question-generator-qwen2.5-7b",
75
  report_to="none",
 
 
76
  )
77
 
78
  print("Starting training...")
79
  trainer = SFTTrainer(
80
- model=model,
81
  train_dataset=train_dataset,
82
  eval_dataset=eval_dataset,
83
  peft_config=peft_config,
84
- processing_class=tokenizer, # Renamed from 'tokenizer' in TRL 0.12+
85
  args=training_args,
86
- max_seq_length=8192,
87
  )
88
 
89
  trainer.train()
 
1
  # /// script
2
+ # dependencies = ["trl>=0.20.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
3
  # ///
4
 
5
  import os
6
  from datasets import load_dataset
7
  from peft import LoraConfig
8
  from trl import SFTTrainer, SFTConfig
 
 
9
 
10
  # Authenticate
11
  from huggingface_hub import login
 
24
  print(f"Eval samples: {len(eval_dataset)}")
25
 
26
  # Using Qwen2.5-7B for question generation
 
27
  model_id = "Qwen/Qwen2.5-7B-Instruct"
28
+ print(f"Using model: {model_id}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  # LoRA config - slightly higher rank for more complex task
31
  peft_config = LoraConfig(
 
37
  task_type="CAUSAL_LM",
38
  )
39
 
40
+ # Training config - modern TRL API
41
  training_args = SFTConfig(
42
  output_dir="./question-generator-output",
43
  num_train_epochs=2,
 
53
  push_to_hub=True,
54
  hub_model_id="KevinKeller/cognitive-question-generator-qwen2.5-7b",
55
  report_to="none",
56
+ max_seq_length=8192,
57
+ load_in_4bit=True, # Enable 4-bit quantization
58
  )
59
 
60
  print("Starting training...")
61
  trainer = SFTTrainer(
62
+ model=model_id, # Pass model name, not loaded model
63
  train_dataset=train_dataset,
64
  eval_dataset=eval_dataset,
65
  peft_config=peft_config,
 
66
  args=training_args,
 
67
  )
68
 
69
  trainer.train()