wheattoast11 commited on
Commit
e60aeeb
·
verified ·
1 Parent(s): 67d4be3

Upload train_glm_qlora_v4.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_glm_qlora_v4.py +105 -0
train_glm_qlora_v4.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "trl>=0.12.0",
5
+ # "peft>=0.7.0",
6
+ # "transformers @ git+https://github.com/huggingface/transformers.git",
7
+ # "accelerate>=0.24.0",
8
+ # "bitsandbytes>=0.41.0",
9
+ # "trackio",
10
+ # "datasets",
11
+ # ]
12
+ # ///
13
+
14
+ """
15
+ Agent Zero SFT: zai-org/GLM-4.7-Flash (30B MoE)
16
+ QLoRA (4-bit) with CPU offloading for layers that don't fit in 24GB VRAM.
17
+ """
18
+
19
+ import torch
20
+ import trackio
21
+ from datasets import load_dataset
22
+ from peft import LoraConfig
23
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
24
+ from trl import SFTTrainer, SFTConfig
25
+
26
+ print("Loading dataset...")
27
+ train_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/train.jsonl", split="train")
28
+ val_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/validation.jsonl", split="train")
29
+ print(f"Train: {len(train_ds)}, Val: {len(val_ds)}")
30
+
31
+ bnb_config = BitsAndBytesConfig(
32
+ load_in_4bit=True,
33
+ bnb_4bit_quant_type="nf4",
34
+ bnb_4bit_compute_dtype=torch.bfloat16,
35
+ bnb_4bit_use_double_quant=True,
36
+ llm_int8_enable_fp32_cpu_offload=True,
37
+ )
38
+
39
+ print("Loading model in 4-bit with CPU offload...")
40
+ model = AutoModelForCausalLM.from_pretrained(
41
+ "zai-org/GLM-4.7-Flash",
42
+ quantization_config=bnb_config,
43
+ trust_remote_code=True,
44
+ device_map="auto",
45
+ max_memory={0: "20GiB", "cpu": "30GiB"},
46
+ )
47
+ tokenizer = AutoTokenizer.from_pretrained("zai-org/GLM-4.7-Flash", trust_remote_code=True)
48
+ print("Model loaded.")
49
+
50
+ # Print device map summary
51
+ if hasattr(model, 'hf_device_map'):
52
+ devices = set(model.hf_device_map.values())
53
+ print(f"Devices used: {devices}")
54
+ gpu_layers = sum(1 for v in model.hf_device_map.values() if v == 0)
55
+ cpu_layers = sum(1 for v in model.hf_device_map.values() if v == 'cpu')
56
+ print(f"GPU layers: {gpu_layers}, CPU layers: {cpu_layers}")
57
+
58
+ config = SFTConfig(
59
+ output_dir="agent-zero-glm-4.7-v1",
60
+ push_to_hub=True,
61
+ hub_model_id="wheattoast11/agent-zero-glm-4.7-v1",
62
+ hub_strategy="every_save",
63
+ hub_private_repo=True,
64
+ num_train_epochs=2,
65
+ per_device_train_batch_size=1,
66
+ gradient_accumulation_steps=16,
67
+ learning_rate=1e-4,
68
+ bf16=True,
69
+ gradient_checkpointing=True,
70
+ logging_steps=10,
71
+ save_strategy="steps",
72
+ save_steps=50,
73
+ save_total_limit=2,
74
+ eval_strategy="steps",
75
+ eval_steps=50,
76
+ warmup_ratio=0.1,
77
+ lr_scheduler_type="cosine",
78
+ report_to="trackio",
79
+ project="agent-zero-finetune",
80
+ run_name="glm-4.7-flash-qlora-v1",
81
+ )
82
+
83
+ peft_config = LoraConfig(
84
+ r=16, lora_alpha=32, lora_dropout=0.05,
85
+ bias="none", task_type="CAUSAL_LM",
86
+ target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
87
+ )
88
+
89
+ print("Initializing trainer...")
90
+ trainer = SFTTrainer(
91
+ model=model,
92
+ tokenizer=tokenizer,
93
+ train_dataset=train_ds,
94
+ eval_dataset=val_ds,
95
+ args=config,
96
+ peft_config=peft_config,
97
+ )
98
+
99
+ print("Starting training...")
100
+ trainer.train()
101
+
102
+ print("Pushing to Hub...")
103
+ trainer.push_to_hub()
104
+ trackio.finish()
105
+ print("Done! Model at: https://huggingface.co/wheattoast11/agent-zero-glm-4.7-v1")