wheattoast11 commited on
Commit
9249579
·
verified ·
1 Parent(s): 451c040

Upload train_glm_qlora.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_glm_qlora.py +25 -29
train_glm_qlora.py CHANGED
@@ -1,11 +1,8 @@
1
  # /// script
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
- # "trl>=0.12.0",
5
- # "peft>=0.7.0",
6
- # "transformers @ git+https://github.com/huggingface/transformers.git",
7
- # "accelerate>=0.24.0",
8
- # "bitsandbytes>=0.41.0",
9
  # "trackio",
10
  # "datasets",
11
  # ]
@@ -13,16 +10,13 @@
13
 
14
  """
15
  Agent Zero SFT: zai-org/GLM-4.7-Flash (30B MoE)
16
- QLoRA (4-bit) fine-tuning on agent-zero-sft-v1 dataset.
17
  Router layers frozen - only attention layers trained.
18
  """
19
 
20
- import torch
21
  import trackio
22
  from datasets import load_dataset
23
- from peft import LoraConfig
24
- from transformers import BitsAndBytesConfig
25
- from trl import SFTTrainer, SFTConfig
26
 
27
  # Load dataset
28
  print("Loading dataset...")
@@ -30,14 +24,28 @@ train_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/train
30
  val_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/validation.jsonl", split="train")
31
  print(f"Train: {len(train_ds)}, Val: {len(val_ds)}")
32
 
33
- # 4-bit quantization config
34
- bnb_config = BitsAndBytesConfig(
 
 
 
35
  load_in_4bit=True,
36
- bnb_4bit_quant_type="nf4",
37
- bnb_4bit_compute_dtype=torch.bfloat16,
38
- bnb_4bit_use_double_quant=True,
39
  )
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  config = SFTConfig(
42
  output_dir="agent-zero-glm-4.7-v1",
43
  push_to_hub=True,
@@ -50,7 +58,6 @@ config = SFTConfig(
50
  gradient_accumulation_steps=16,
51
  learning_rate=1e-4,
52
  bf16=True,
53
- gradient_checkpointing=True,
54
 
55
  logging_steps=10,
56
  save_strategy="steps",
@@ -68,24 +75,13 @@ config = SFTConfig(
68
  run_name="glm-4.7-flash-qlora-v1",
69
  )
70
 
71
- # LoRA targeting attention layers only (router layers frozen)
72
- peft_config = LoraConfig(
73
- r=16,
74
- lora_alpha=32,
75
- lora_dropout=0.05,
76
- bias="none",
77
- task_type="CAUSAL_LM",
78
- target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
79
- )
80
-
81
  print("Initializing trainer...")
82
  trainer = SFTTrainer(
83
- model="zai-org/GLM-4.7-Flash",
 
84
  train_dataset=train_ds,
85
  eval_dataset=val_ds,
86
  args=config,
87
- peft_config=peft_config,
88
- model_init_kwargs={"quantization_config": bnb_config},
89
  )
90
 
91
  print("Starting training...")
 
1
  # /// script
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
+ # "unsloth",
5
+ # "transformers>=4.49.0",
 
 
 
6
  # "trackio",
7
  # "datasets",
8
  # ]
 
10
 
11
  """
12
  Agent Zero SFT: zai-org/GLM-4.7-Flash (30B MoE)
13
+ QLoRA (4-bit) fine-tuning with Unsloth on agent-zero-sft-v1 dataset.
14
  Router layers frozen - only attention layers trained.
15
  """
16
 
 
17
  import trackio
18
  from datasets import load_dataset
19
+ from unsloth import FastLanguageModel
 
 
20
 
21
  # Load dataset
22
  print("Loading dataset...")
 
24
  val_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/validation.jsonl", split="train")
25
  print(f"Train: {len(train_ds)}, Val: {len(val_ds)}")
26
 
27
+ # Load model in 4-bit with Unsloth
28
+ print("Loading model with Unsloth (4-bit QLoRA)...")
29
+ model, tokenizer = FastLanguageModel.from_pretrained(
30
+ model_name="unsloth/GLM-4.7-Flash",
31
+ max_seq_length=2048,
32
  load_in_4bit=True,
33
+ dtype=None,
 
 
34
  )
35
 
36
+ # Apply LoRA adapters via Unsloth
37
+ model = FastLanguageModel.get_peft_model(
38
+ model,
39
+ r=16,
40
+ lora_alpha=32,
41
+ lora_dropout=0.05,
42
+ target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
43
+ bias="none",
44
+ use_gradient_checkpointing="unsloth",
45
+ )
46
+
47
+ from trl import SFTTrainer, SFTConfig
48
+
49
  config = SFTConfig(
50
  output_dir="agent-zero-glm-4.7-v1",
51
  push_to_hub=True,
 
58
  gradient_accumulation_steps=16,
59
  learning_rate=1e-4,
60
  bf16=True,
 
61
 
62
  logging_steps=10,
63
  save_strategy="steps",
 
75
  run_name="glm-4.7-flash-qlora-v1",
76
  )
77
 
 
 
 
 
 
 
 
 
 
 
78
  print("Initializing trainer...")
79
  trainer = SFTTrainer(
80
+ model=model,
81
+ tokenizer=tokenizer,
82
  train_dataset=train_ds,
83
  eval_dataset=val_ds,
84
  args=config,
 
 
85
  )
86
 
87
  print("Starting training...")