wheattoast11 commited on
Commit
daa8cbe
·
verified ·
1 Parent(s): 09732e8

Upload train_glm_qlora.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_glm_qlora.py +30 -25
train_glm_qlora.py CHANGED
@@ -1,8 +1,11 @@
1
  # /// script
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
- # "unsloth",
5
- # "transformers>=4.49.0",
 
 
 
6
  # "trackio",
7
  # "datasets",
8
  # ]
@@ -10,13 +13,17 @@
10
 
11
  """
12
  Agent Zero SFT: zai-org/GLM-4.7-Flash (30B MoE)
13
- QLoRA (4-bit) fine-tuning with Unsloth on agent-zero-sft-v1 dataset.
 
14
  Router layers frozen - only attention layers trained.
15
  """
16
 
 
17
  import trackio
18
  from datasets import load_dataset
19
- from unsloth import FastLanguageModel
 
 
20
 
21
  # Load dataset
22
  print("Loading dataset...")
@@ -24,28 +31,14 @@ train_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/train
24
  val_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/validation.jsonl", split="train")
25
  print(f"Train: {len(train_ds)}, Val: {len(val_ds)}")
26
 
27
- # Load model in 4-bit with Unsloth
28
- print("Loading model with Unsloth (4-bit QLoRA)...")
29
- model, tokenizer = FastLanguageModel.from_pretrained(
30
- model_name="unsloth/GLM-4.7-Flash",
31
- max_seq_length=2048,
32
  load_in_4bit=True,
33
- dtype=None,
 
 
34
  )
35
 
36
- # Apply LoRA adapters via Unsloth
37
- model = FastLanguageModel.get_peft_model(
38
- model,
39
- r=16,
40
- lora_alpha=32,
41
- lora_dropout=0.05,
42
- target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
43
- bias="none",
44
- use_gradient_checkpointing="unsloth",
45
- )
46
-
47
- from trl import SFTTrainer, SFTConfig
48
-
49
  config = SFTConfig(
50
  output_dir="agent-zero-glm-4.7-v1",
51
  push_to_hub=True,
@@ -58,6 +51,7 @@ config = SFTConfig(
58
  gradient_accumulation_steps=16,
59
  learning_rate=1e-4,
60
  bf16=True,
 
61
 
62
  logging_steps=10,
63
  save_strategy="steps",
@@ -75,13 +69,24 @@ config = SFTConfig(
75
  run_name="glm-4.7-flash-qlora-v1",
76
  )
77
 
 
 
 
 
 
 
 
 
 
 
78
  print("Initializing trainer...")
79
  trainer = SFTTrainer(
80
- model=model,
81
- tokenizer=tokenizer,
82
  train_dataset=train_ds,
83
  eval_dataset=val_ds,
84
  args=config,
 
 
85
  )
86
 
87
  print("Starting training...")
 
1
  # /// script
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
+ # "trl>=0.12.0",
5
+ # "peft>=0.7.0",
6
+ # "transformers @ git+https://github.com/huggingface/transformers.git",
7
+ # "accelerate>=0.24.0",
8
+ # "bitsandbytes>=0.41.0",
9
  # "trackio",
10
  # "datasets",
11
  # ]
 
13
 
14
  """
15
  Agent Zero SFT: zai-org/GLM-4.7-Flash (30B MoE)
16
+ QLoRA (4-bit) fine-tuning with bitsandbytes on agent-zero-sft-v1 dataset.
17
+ No Unsloth — transformers from source for glm4_moe_lite support.
18
  Router layers frozen - only attention layers trained.
19
  """
20
 
21
+ import torch
22
  import trackio
23
  from datasets import load_dataset
24
+ from peft import LoraConfig
25
+ from transformers import BitsAndBytesConfig
26
+ from trl import SFTTrainer, SFTConfig
27
 
28
  # Load dataset
29
  print("Loading dataset...")
 
31
  val_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/validation.jsonl", split="train")
32
  print(f"Train: {len(train_ds)}, Val: {len(val_ds)}")
33
 
34
+ # 4-bit quantization config
35
+ bnb_config = BitsAndBytesConfig(
 
 
 
36
  load_in_4bit=True,
37
+ bnb_4bit_quant_type="nf4",
38
+ bnb_4bit_compute_dtype=torch.bfloat16,
39
+ bnb_4bit_use_double_quant=True,
40
  )
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  config = SFTConfig(
43
  output_dir="agent-zero-glm-4.7-v1",
44
  push_to_hub=True,
 
51
  gradient_accumulation_steps=16,
52
  learning_rate=1e-4,
53
  bf16=True,
54
+ gradient_checkpointing=True,
55
 
56
  logging_steps=10,
57
  save_strategy="steps",
 
69
  run_name="glm-4.7-flash-qlora-v1",
70
  )
71
 
72
+ # LoRA targeting attention layers only (router layers frozen)
73
+ peft_config = LoraConfig(
74
+ r=16,
75
+ lora_alpha=32,
76
+ lora_dropout=0.05,
77
+ bias="none",
78
+ task_type="CAUSAL_LM",
79
+ target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
80
+ )
81
+
82
  print("Initializing trainer...")
83
  trainer = SFTTrainer(
84
+ model="zai-org/GLM-4.7-Flash",
 
85
  train_dataset=train_ds,
86
  eval_dataset=val_ds,
87
  args=config,
88
+ peft_config=peft_config,
89
+ model_init_kwargs={"quantization_config": bnb_config, "trust_remote_code": True},
90
  )
91
 
92
  print("Starting training...")