wheattoast11's picture
GLM-4.7 training script
3520fb7 verified
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "trl>=0.12.0",
# "peft>=0.7.0",
# "transformers>=4.36.0",
# "accelerate>=0.24.0",
# "trackio",
# "datasets",
# ]
# ///
"""
Agent Zero SFT: zai-org/GLM-4.7-Flash (30B MoE)
LoRA fine-tuning on agent-zero-sft-v1 dataset.
Router layers frozen - only attention layers trained.
"""
import trackio
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
# Load dataset
print("Loading dataset...")
train_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/train.jsonl", split="train")
val_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/validation.jsonl", split="train")
print(f"Train: {len(train_ds)}, Val: {len(val_ds)}")
config = SFTConfig(
output_dir="agent-zero-glm-4.7-v1",
push_to_hub=True,
hub_model_id="wheattoast11/agent-zero-glm-4.7-v1",
hub_strategy="every_save",
hub_private_repo=True,
num_train_epochs=2,
per_device_train_batch_size=1,
gradient_accumulation_steps=16,
learning_rate=1e-4,
bf16=True,
gradient_checkpointing=True,
logging_steps=10,
save_strategy="steps",
save_steps=50,
save_total_limit=2,
eval_strategy="steps",
eval_steps=50,
warmup_ratio=0.1,
lr_scheduler_type="cosine",
report_to="trackio",
project="agent-zero-finetune",
run_name="glm-4.7-flash-sft-v1",
)
# LoRA targeting attention layers only (router layers frozen)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
)
print("Initializing trainer...")
trainer = SFTTrainer(
model="zai-org/GLM-4.7-Flash",
train_dataset=train_ds,
eval_dataset=val_ds,
args=config,
peft_config=peft_config,
)
print("Starting training...")
trainer.train()
print("Pushing to Hub...")
trainer.push_to_hub()
trackio.finish()
print("Done! Model at: https://huggingface.co/wheattoast11/agent-zero-glm-4.7-v1")