KevinKeller commited on
Commit
b3f451d
·
verified ·
1 Parent(s): e6f26d1

Upload train_pattern_selector.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_pattern_selector.py +89 -0
train_pattern_selector.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = ["trl>=0.12.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
3
+ # ///
4
+
5
+ import os
6
+ from datasets import load_dataset
7
+ from peft import LoraConfig
8
+ from trl import SFTTrainer, SFTConfig
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
10
+ import torch
11
+
12
+ # Authenticate
13
+ from huggingface_hub import login
14
+ hf_token = os.environ.get("HF_TOKEN")
15
+ if hf_token:
16
+ login(token=hf_token)
17
+ print("Authenticated with HuggingFace")
18
+
19
+ print("Loading dataset...")
20
+ dataset = load_dataset("KevinKeller/cognitive-pattern-selector-v1")
21
+ train_dataset = dataset["train"]
22
+ eval_dataset = dataset.get("validation")
23
+
24
+ print(f"Train samples: {len(train_dataset)}")
25
+ if eval_dataset:
26
+ print(f"Eval samples: {len(eval_dataset)}")
27
+
28
+ print("Loading model: Qwen/Qwen2.5-7B-Instruct...")
29
+ model_id = "Qwen/Qwen2.5-7B-Instruct"
30
+
31
+ # 4-bit quantization for fitting on A10G
32
+ bnb_config = BitsAndBytesConfig(
33
+ load_in_4bit=True,
34
+ bnb_4bit_quant_type="nf4",
35
+ bnb_4bit_compute_dtype=torch.bfloat16,
36
+ )
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
39
+ if tokenizer.pad_token is None:
40
+ tokenizer.pad_token = tokenizer.eos_token
41
+
42
+ model = AutoModelForCausalLM.from_pretrained(
43
+ model_id,
44
+ quantization_config=bnb_config,
45
+ device_map="auto",
46
+ trust_remote_code=True,
47
+ )
48
+
49
+ # LoRA config
50
+ peft_config = LoraConfig(
51
+ r=16,
52
+ lora_alpha=32,
53
+ lora_dropout=0.05,
54
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
55
+ bias="none",
56
+ task_type="CAUSAL_LM",
57
+ )
58
+
59
+ # Training config - removed max_seq_length from SFTConfig
60
+ training_args = SFTConfig(
61
+ output_dir="./pattern-selector-output",
62
+ num_train_epochs=3,
63
+ per_device_train_batch_size=2,
64
+ gradient_accumulation_steps=4,
65
+ learning_rate=2e-4,
66
+ logging_steps=10,
67
+ save_strategy="epoch",
68
+ eval_strategy="epoch" if eval_dataset else "no",
69
+ bf16=True,
70
+ push_to_hub=True,
71
+ hub_model_id="KevinKeller/cognitive-pattern-selector-qwen2.5-7b",
72
+ report_to="none",
73
+ )
74
+
75
+ print("Starting training...")
76
+ trainer = SFTTrainer(
77
+ model=model,
78
+ train_dataset=train_dataset,
79
+ eval_dataset=eval_dataset,
80
+ peft_config=peft_config,
81
+ processing_class=tokenizer, # Renamed from 'tokenizer' in TRL 0.12+
82
+ args=training_args,
83
+ max_seq_length=4096,
84
+ )
85
+
86
+ trainer.train()
87
+ print("Training complete! Pushing to Hub...")
88
+ trainer.push_to_hub()
89
+ print("Done!")