rosssso commited on
Commit
b78ee90
·
verified ·
1 Parent(s): 09c07bd

Upload folder using huggingface_hub

Browse files
examples/prt14_qwen25vl/requirements.txt CHANGED
@@ -2,10 +2,9 @@ transformers
2
  peft
3
  accelerate
4
  datasets
5
- torch
6
- torchvision
7
  pillow
8
  requests
 
9
  tqdm
10
  qwen_vl_utils
11
  hydra-core
 
2
  peft
3
  accelerate
4
  datasets
 
 
5
  pillow
6
  requests
7
+ bitsandbytes
8
  tqdm
9
  qwen_vl_utils
10
  hydra-core
examples/prt14_qwen25vl/train_prt14.py CHANGED
@@ -17,6 +17,7 @@ from transformers import (
17
  Trainer,
18
  TrainingArguments,
19
  get_scheduler,
 
20
  )
21
 
22
  # Configure logging
@@ -59,6 +60,7 @@ class PRTTrainer(Trainer):
59
  if self.ref_model.device != device:
60
  self.ref_model.to(device)
61
 
 
62
  with torch.no_grad():
63
  ref_outputs = self.ref_model(**inputs)
64
  ref_logits = ref_outputs.logits
@@ -167,19 +169,27 @@ def main(cfg: DictConfig):
167
  # 2. Load Models (Heavy)
168
  logger.info(f"Loading Models: {cfg.model.model_id}")
169
 
170
- # Reference Model (Frozen)
 
 
 
 
 
 
 
 
171
  ref_model = Qwen2VLForConditionalGeneration.from_pretrained(
172
  cfg.model.model_id,
173
- torch_dtype=torch.bfloat16 if cfg.model.bf16 else torch.float16,
174
- device_map=None,
175
  trust_remote_code=True,
176
  )
177
 
178
- # Reward Model (Trainable Base)
179
  reward_model = Qwen2VLForConditionalGeneration.from_pretrained(
180
  cfg.model.model_id,
181
- torch_dtype=torch.bfloat16 if cfg.model.bf16 else torch.float16,
182
- device_map=None,
183
  trust_remote_code=True,
184
  )
185
 
@@ -197,7 +207,9 @@ def main(cfg: DictConfig):
197
  reward_model.print_trainable_parameters()
198
  else:
199
  logger.info("Full Fine-Tuning Mode")
200
- reward_model.gradient_checkpointing_enable()
 
 
201
 
202
  # Define training arguments
203
  training_args = PRTTrainingArguments(
 
17
  Trainer,
18
  TrainingArguments,
19
  get_scheduler,
20
+ BitsAndBytesConfig,
21
  )
22
 
23
  # Configure logging
 
60
  if self.ref_model.device != device:
61
  self.ref_model.to(device)
62
 
63
+
64
  with torch.no_grad():
65
  ref_outputs = self.ref_model(**inputs)
66
  ref_logits = ref_outputs.logits
 
169
  # 2. Load Models (Heavy)
170
  logger.info(f"Loading Models: {cfg.model.model_id}")
171
 
172
+ # Configure 4-bit quantization
173
+ bnb_config = BitsAndBytesConfig(
174
+ load_in_4bit=True,
175
+ bnb_4bit_quant_type="nf4",
176
+ bnb_4bit_compute_dtype=torch.bfloat16 if cfg.model.bf16 else torch.float16,
177
+ bnb_4bit_use_double_quant=True,
178
+ )
179
+
180
+ # Reference Model (Frozen, 4-bit)
181
  ref_model = Qwen2VLForConditionalGeneration.from_pretrained(
182
  cfg.model.model_id,
183
+ quantization_config=bnb_config,
184
+ device_map={"": 0}, # Explicitly put on GPU 0
185
  trust_remote_code=True,
186
  )
187
 
188
+ # Reward Model (Trainable Base, 4-bit)
189
  reward_model = Qwen2VLForConditionalGeneration.from_pretrained(
190
  cfg.model.model_id,
191
+ quantization_config=bnb_config,
192
+ device_map={"": 0}, # Explicitly put on GPU 0
193
  trust_remote_code=True,
194
  )
195
 
 
207
  reward_model.print_trainable_parameters()
208
  else:
209
  logger.info("Full Fine-Tuning Mode")
210
+
211
+ # Unconditionally enable gradient checkpointing for memory efficiency
212
+ reward_model.gradient_checkpointing_enable()
213
 
214
  # Define training arguments
215
  training_args = PRTTrainingArguments(