File size: 2,299 Bytes
b6ae7b8
 
 
 
 
 
 
 
bfc7d04
b6ae7b8
bfc7d04
 
 
 
b6ae7b8
 
bfc7d04
b6ae7b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfc7d04
 
b6ae7b8
bfc7d04
 
b6ae7b8
bfc7d04
b6ae7b8
 
 
bfc7d04
 
 
b6ae7b8
 
 
 
 
 
 
 
 
bfc7d04
b6ae7b8
 
 
bfc7d04
b6ae7b8
bfc7d04
b6ae7b8
bfc7d04
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# Stack 2.9 Training Configuration

# Model Configuration
model:
  name: "Qwen/Qwen2.5-Coder-32B"
  trust_remote_code: true
  torch_dtype: "bfloat16"

# Data Configuration - supports multiple training files
data:
  train_files:
    - "/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/final/train.jsonl"
  val_file: "/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/final/val.jsonl"
  test_file: "/Users/walidsobhi/.openclaw/workspace/stack-2.9/training-data/final/test.jsonl"
  train_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data/train"
  eval_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/data/eval"
  max_length: 4096  # Reduced for practical training on MPS/consumer GPUs
  train_split: 0.9
  test_split: 0.1

# LoRA Configuration
lora:
  r: 64
  alpha: 128
  dropout: 0.05
  target_modules:
    - "q_proj"
    - "k_proj"
    - "v_proj"
    - "o_proj"
    - "gate_proj"
    - "up_proj"
    - "down_proj"
  bias: "none"
  task_type: "CAUSAL_LM"

# Training Configuration
training:
  num_epochs: 3
  batch_size: 1
  gradient_accumulation: 16
  learning_rate: 1.0e-4
  warmup_steps: 100
  weight_decay: 0.01
  max_grad_norm: 1.0
  logging_steps: 10
  eval_steps: 500
  save_steps: 1000
  save_total_limit: 3
  fp16: false
  bf16: true
  gradient_checkpointing: true
  optim: "adamw_torch"

# Output Configuration
output:
  lora_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-lora"
  merged_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-merged"
  awq_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-awq"

# Quantization Configuration  
quantization:
  enabled: true
  bits: 4
  group_size: 128

# Logging Configuration
logging:
  report_to: "none"  # Set to "wandb" to enable Weights & Biases tracking
  wandb_project: "stack-2.9-training"
  run_name: null

# Hardware Configuration
hardware:
  device: "cuda"  # Change to "mps" for Mac, "cpu" for CPU
  num_gpus: 1
  use_4bit: true  # Enable 4-bit for 32B model on limited VRAM
  use_8bit: false

# Merge Configuration (for after training completes)
merge:
  enabled: true
  output_dir: "/Users/walidsobhi/.openclaw/workspace/stack-2.9-training/output/stack-2.9-32b-merged"