xTronz commited on
Commit
58a0e5a
·
verified ·
1 Parent(s): 92a855e

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +60 -0
README.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```
2
+ model, tokenizer = FastLanguageModel.from_pretrained(
3
+ model_name = "unsloth/GLM-4.7-Flash",
4
+ max_seq_length = 2048, # Choose any for long context!
5
+ load_in_4bit = False, # 4 bit quantization to reduce memory
6
+ load_in_8bit = False, # [NEW!] A bit more accurate, uses 2x memory
7
+ full_finetuning = False, # [NEW!] We have full finetuning now!
8
+ trust_remote_code = True,
9
+ unsloth_force_compile = False,
10
+ )
11
+
12
+ model = FastLanguageModel.get_peft_model(
13
+ model,
14
+ r = 8, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
15
+ target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
16
+ "gate_proj", "up_proj", "down_proj",
17
+ "in_proj", "out_proj",],
18
+ lora_alpha = 16,
19
+ lora_dropout = 0, # Supports any, but = 0 is optimized
20
+ bias = "none", # Supports any, but = "none" is optimized
21
+ # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
22
+ use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
23
+ random_state = 3407,
24
+ use_rslora = False, # We support rank stabilized LoRA
25
+ loftq_config = None, # And LoftQ
26
+ )
27
+
28
+ dataset = load_dataset("unsloth/OpenMathReasoning-mini", split = "cot")
29
+
30
+ # This step might take ~3m on this A100 notebook
31
+ from trl import SFTTrainer, SFTConfig
32
+ trainer = SFTTrainer(
33
+ model = model,
34
+ tokenizer = tokenizer,
35
+ train_dataset = dataset,
36
+ eval_dataset = None, # Can set up evaluation!
37
+ args = SFTConfig(
38
+ dataset_text_field = "text",
39
+ dataset_num_proc=1, # Increasing "might" throw error on Colab/other envs.
40
+ per_device_train_batch_size = 4,
41
+ gradient_accumulation_steps = 2, # Use GA to mimic batch size!
42
+ warmup_steps = 5,
43
+ # num_train_epochs = 1, # Set this for 1 full training run.
44
+ max_steps = 60,
45
+ learning_rate = 2e-4, # Reduce to 2e-5 for long training runs
46
+ logging_steps = 1,
47
+ optim = "adamw_8bit",
48
+ weight_decay = 0.001,
49
+ lr_scheduler_type = "linear",
50
+ seed = 3407,
51
+ report_to = "none", # Use TrackIO/WandB etc
52
+ ),
53
+ )
54
+
55
+ trainer = train_on_responses_only(
56
+ trainer,
57
+ instruction_part = "[gMASK]<sop><|user|>", # Updated for GLM
58
+ response_part = "<|assistant|><think>",
59
+ )
60
+