KevinKeller commited on
Commit
319ff67
·
verified ·
1 Parent(s): 882aa90

Upload train_pattern_selector.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_pattern_selector.py +7 -26
train_pattern_selector.py CHANGED
@@ -1,13 +1,11 @@
1
  # /// script
2
- # dependencies = ["trl>=0.12.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
3
  # ///
4
 
5
  import os
6
  from datasets import load_dataset
7
  from peft import LoraConfig
8
  from trl import SFTTrainer, SFTConfig
9
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
10
- import torch
11
 
12
  # Authenticate
13
  from huggingface_hub import login
@@ -25,26 +23,9 @@ print(f"Train samples: {len(train_dataset)}")
25
  if eval_dataset:
26
  print(f"Eval samples: {len(eval_dataset)}")
27
 
28
- print("Loading model: Qwen/Qwen2.5-7B-Instruct...")
29
  model_id = "Qwen/Qwen2.5-7B-Instruct"
30
-
31
- # 4-bit quantization for fitting on A10G
32
- bnb_config = BitsAndBytesConfig(
33
- load_in_4bit=True,
34
- bnb_4bit_quant_type="nf4",
35
- bnb_4bit_compute_dtype=torch.bfloat16,
36
- )
37
-
38
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
39
- if tokenizer.pad_token is None:
40
- tokenizer.pad_token = tokenizer.eos_token
41
-
42
- model = AutoModelForCausalLM.from_pretrained(
43
- model_id,
44
- quantization_config=bnb_config,
45
- device_map="auto",
46
- trust_remote_code=True,
47
- )
48
 
49
  # LoRA config
50
  peft_config = LoraConfig(
@@ -56,7 +37,7 @@ peft_config = LoraConfig(
56
  task_type="CAUSAL_LM",
57
  )
58
 
59
- # Training config - removed max_seq_length from SFTConfig
60
  training_args = SFTConfig(
61
  output_dir="./pattern-selector-output",
62
  num_train_epochs=3,
@@ -70,17 +51,17 @@ training_args = SFTConfig(
70
  push_to_hub=True,
71
  hub_model_id="KevinKeller/cognitive-pattern-selector-qwen2.5-7b",
72
  report_to="none",
 
 
73
  )
74
 
75
  print("Starting training...")
76
  trainer = SFTTrainer(
77
- model=model,
78
  train_dataset=train_dataset,
79
  eval_dataset=eval_dataset,
80
  peft_config=peft_config,
81
- processing_class=tokenizer, # Renamed from 'tokenizer' in TRL 0.12+
82
  args=training_args,
83
- max_seq_length=4096,
84
  )
85
 
86
  trainer.train()
 
1
  # /// script
2
+ # dependencies = ["trl>=0.20.0", "peft>=0.13.0", "datasets", "transformers>=4.45.0", "accelerate", "bitsandbytes", "huggingface_hub"]
3
  # ///
4
 
5
  import os
6
  from datasets import load_dataset
7
  from peft import LoraConfig
8
  from trl import SFTTrainer, SFTConfig
 
 
9
 
10
  # Authenticate
11
  from huggingface_hub import login
 
23
  if eval_dataset:
24
  print(f"Eval samples: {len(eval_dataset)}")
25
 
26
+ # Model - using Qwen2.5-7B for pattern selection
27
  model_id = "Qwen/Qwen2.5-7B-Instruct"
28
+ print(f"Using model: {model_id}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  # LoRA config
31
  peft_config = LoraConfig(
 
37
  task_type="CAUSAL_LM",
38
  )
39
 
40
+ # Training config - modern TRL API
41
  training_args = SFTConfig(
42
  output_dir="./pattern-selector-output",
43
  num_train_epochs=3,
 
51
  push_to_hub=True,
52
  hub_model_id="KevinKeller/cognitive-pattern-selector-qwen2.5-7b",
53
  report_to="none",
54
+ max_seq_length=4096,
55
+ load_in_4bit=True, # Enable 4-bit quantization
56
  )
57
 
58
  print("Starting training...")
59
  trainer = SFTTrainer(
60
+ model=model_id, # Pass model name, not loaded model
61
  train_dataset=train_dataset,
62
  eval_dataset=eval_dataset,
63
  peft_config=peft_config,
 
64
  args=training_args,
 
65
  )
66
 
67
  trainer.train()