tobil commited on
Commit
001aac2
·
verified ·
1 Parent(s): 9f9c531

Upload train_4b_sft.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_4b_sft.py +101 -0
train_4b_sft.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "trl>=0.12.0",
5
+ # "peft>=0.7.0",
6
+ # "transformers>=4.36.0",
7
+ # "accelerate>=0.24.0",
8
+ # "trackio",
9
+ # ]
10
+ # ///
11
+
12
+ """
13
+ SFT training for Qwen3-4B on query expansion dataset.
14
+ Output: tobil/qmd-query-expansion-4B-sft
15
+ """
16
+
17
+ import trackio
18
+ from datasets import load_dataset
19
+ from peft import LoraConfig
20
+ from trl import SFTTrainer, SFTConfig
21
+
22
+ # Load dataset
23
+ print("Loading dataset...")
24
+ dataset = load_dataset("tobil/qmd-query-expansion-train-v2", split="train")
25
+ print(f"Dataset loaded: {len(dataset)} examples")
26
+
27
+ # Create train/eval split
28
+ print("Creating train/eval split...")
29
+ dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
30
+ train_dataset = dataset_split["train"]
31
+ eval_dataset = dataset_split["test"]
32
+ print(f" Train: {len(train_dataset)} examples")
33
+ print(f" Eval: {len(eval_dataset)} examples")
34
+
35
+ # Training configuration
36
+ config = SFTConfig(
37
+ # Hub settings - use separate repo, not subfolder
38
+ output_dir="qmd-query-expansion-4B-sft",
39
+ push_to_hub=True,
40
+ hub_model_id="tobil/qmd-query-expansion-4B-sft",
41
+ hub_strategy="every_save",
42
+
43
+ # Training parameters
44
+ num_train_epochs=3,
45
+ per_device_train_batch_size=4,
46
+ gradient_accumulation_steps=4,
47
+ learning_rate=2e-4,
48
+ max_length=512,
49
+
50
+ # Logging & checkpointing
51
+ logging_steps=10,
52
+ save_strategy="steps",
53
+ save_steps=100,
54
+ save_total_limit=2,
55
+
56
+ # Evaluation
57
+ eval_strategy="steps",
58
+ eval_steps=100,
59
+
60
+ # Optimization
61
+ warmup_ratio=0.1,
62
+ lr_scheduler_type="cosine",
63
+
64
+ # Monitoring
65
+ report_to="trackio",
66
+ project="qmd-query-expansion",
67
+ run_name="qwen3-4B-sft-v2",
68
+ )
69
+
70
+ # LoRA configuration: rank 16, alpha 32
71
+ peft_config = LoraConfig(
72
+ r=16,
73
+ lora_alpha=32,
74
+ lora_dropout=0.05,
75
+ bias="none",
76
+ task_type="CAUSAL_LM",
77
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
78
+ )
79
+
80
+ # Initialize and train
81
+ print("Initializing trainer...")
82
+ trainer = SFTTrainer(
83
+ model="Qwen/Qwen3-4B",
84
+ train_dataset=train_dataset,
85
+ eval_dataset=eval_dataset,
86
+ args=config,
87
+ peft_config=peft_config,
88
+ )
89
+
90
+ print("Starting training...")
91
+ trainer.train()
92
+
93
+ print("Pushing to Hub...")
94
+ # Fixed: Call push_to_hub() without subfolder argument
95
+ trainer.push_to_hub()
96
+
97
+ # Finish Trackio tracking
98
+ trackio.finish()
99
+
100
+ print("Complete! Model at: https://huggingface.co/tobil/qmd-query-expansion-4B-sft")
101
+ print("View metrics at: https://huggingface.co/spaces/tobil/trackio")