rosssso commited on
Commit
fc0f3cb
·
verified ·
1 Parent(s): 4175d2b

Upload folder using huggingface_hub

Browse files
examples/prt14_qwen25vl/configs/config.yaml CHANGED
@@ -3,7 +3,7 @@ defaults:
3
 
4
  model:
5
  model_id: "Qwen/Qwen2-VL-2B-Instruct"
6
- bf16: true
7
  use_lora: true
8
 
9
  data:
@@ -19,7 +19,7 @@ generation:
19
  training:
20
  output_dir: "output_prt14"
21
  batch_size: 1
22
- gradient_accumulation_steps: 8
23
  learning_rate: 2.0e-4
24
  epochs: 1
25
  save_steps: 100
 
3
 
4
  model:
5
  model_id: "Qwen/Qwen2-VL-2B-Instruct"
6
+ bf16: false
7
  use_lora: true
8
 
9
  data:
 
19
  training:
20
  output_dir: "output_prt14"
21
  batch_size: 1
22
+ gradient_accumulation_steps: 1
23
  learning_rate: 2.0e-4
24
  epochs: 1
25
  save_steps: 100
examples/prt14_qwen25vl/train_prt14.py CHANGED
@@ -24,6 +24,13 @@ from transformers import (
24
  logging.basicConfig(level=logging.INFO)
25
  logger = logging.getLogger(__name__)
26
 
 
 
 
 
 
 
 
27
  @dataclass
28
  class PRTTrainingArguments(TrainingArguments):
29
  """PRT固有のパラメータを追加したTrainingArguments"""
 
24
  logging.basicConfig(level=logging.INFO)
25
  logger = logging.getLogger(__name__)
26
 
27
+ # Enforce T4-safe attention settings (Avoids OOM in SDPA)
28
+ # T4 does not support Flash Attention 2, and mem_efficient can sometimes use more memory or fail.
29
+ # Math (vanilla) Attention is the safest fallback.
30
+ torch.backends.cuda.enable_flash_sdp(False)
31
+ torch.backends.cuda.enable_mem_efficient_sdp(False)
32
+ torch.backends.cuda.enable_math_sdp(True)
33
+
34
  @dataclass
35
  class PRTTrainingArguments(TrainingArguments):
36
  """PRT固有のパラメータを追加したTrainingArguments"""