sebelsn commited on
Commit
ca9531a
·
verified ·
1 Parent(s): e1316ca

Upload kassandra-lora.py

Browse files
Files changed (1) hide show
  1. example/kassandra-lora.py +131 -0
example/kassandra-lora.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import random
3
+ import numpy as np
4
+ import gc
5
+ from datasets import load_dataset
6
+ from transformers import (
7
+ AutoTokenizer,
8
+ AutoModelForCausalLM,
9
+ Trainer,
10
+ TrainingArguments,
11
+ DataCollatorForLanguageModeling,
12
+ )
13
+ from peft import LoraConfig, get_peft_model
14
+
15
+ # ============================================================
16
+ # Seed
17
+ # ============================================================
18
+ def set_seed(seed=42):
19
+ random.seed(seed)
20
+ np.random.seed(seed)
21
+ torch.manual_seed(seed)
22
+ torch.cuda.manual_seed_all(seed)
23
+
24
+ set_seed(42)
25
+
26
+ # ============================================================
27
+ # Model & Tokenizer
28
+ # ============================================================
29
+ MODEL_ID = "/opt/models/mistralai/Mistral-7B-Instruct-v0.3"
30
+ DATA_PATH = "sebelsn/style-adjustment-dataset_de/2026-02-06_style-adjustment-dataset_de.jsonl"
31
+
32
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=True)
33
+ if tokenizer.pad_token is None:
34
+ tokenizer.pad_token = tokenizer.eos_token
35
+
36
+ model = AutoModelForCausalLM.from_pretrained(
37
+ MODEL_ID,
38
+ dtype=torch.bfloat16,
39
+ device_map="cuda",
40
+ )
41
+ model.config.pad_token_id = tokenizer.pad_token_id # ← NEU
42
+
43
+ # Optional: Gradient Checkpointing
44
+ # model.gradient_checkpointing_enable()
45
+
46
+ # ============================================================
47
+ # LoRA
48
+ # ============================================================
49
+ lora_config = LoraConfig(
50
+ r=1,
51
+ lora_alpha=2,
52
+ target_modules=["q_proj", "v_proj"],
53
+ lora_dropout=0.1,
54
+ bias="none",
55
+ task_type="CAUSAL_LM",
56
+ )
57
+ model = get_peft_model(model, lora_config)
58
+ model.print_trainable_parameters()
59
+
60
+ # ============================================================
61
+ # Dataset
62
+ # ============================================================
63
+ dataset = load_dataset("json", data_files=DATA_PATH)["train"]
64
+ dataset = dataset.shuffle(seed=42) # ← NEU
65
+
66
+ def format_example(example):
67
+ text = f"Frage:\n{example['instruction']}\n\nAntwort:\n{example['response']}"
68
+ return {"text": text}
69
+
70
+ dataset = dataset.map(format_example, remove_columns=dataset.column_names)
71
+
72
+ def tokenize(example):
73
+ return tokenizer(example["text"], truncation=True, max_length=512, padding=False)
74
+
75
+ dataset = dataset.map(tokenize, batched=True)
76
+
77
+ # Train/Val Split
78
+ dataset = dataset.train_test_split(test_size=0.1, seed=42) # ← NEU
79
+ train_dataset = dataset["train"]
80
+ val_dataset = dataset["test"]
81
+
82
+ # ============================================================
83
+ # Training
84
+ # ============================================================
85
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
86
+
87
+ training_args = TrainingArguments(
88
+ output_dir="/opt/models/lora-style-out",
89
+ per_device_train_batch_size=1,
90
+ gradient_accumulation_steps=8,
91
+ learning_rate=1e-5, # ← Etwas höher
92
+ lr_scheduler_type="cosine", # ← NEU
93
+ warmup_ratio=0.5, # ← Mehr warmup
94
+ max_grad_norm=1.0, # ← NEU
95
+ num_train_epochs=2.5,
96
+ bf16=True,
97
+ max_steps=56,
98
+ logging_steps=7,
99
+ logging_dir="/opt/models/lora-style-out/logs", # ← NEU
100
+ save_strategy="steps",
101
+ save_steps=14,
102
+ eval_strategy="steps", # ← NEU
103
+ eval_steps=5, # ← NEU
104
+ load_best_model_at_end=False, # ← NEU
105
+ metric_for_best_model="loss", # ← NEU
106
+ report_to="none",
107
+ )
108
+
109
+ trainer = Trainer(
110
+ model=model,
111
+ args=training_args,
112
+ train_dataset=train_dataset, # ← Geändert
113
+ eval_dataset=val_dataset, # ← NEU
114
+ data_collator=data_collator,
115
+ )
116
+
117
+ trainer.train()
118
+
119
+ # ============================================================
120
+ # Save & Cleanup
121
+ # ============================================================
122
+ #trainer.save_model("/opt/models/lora-style-out/final") # ← NEU
123
+ #tokenizer.save_pretrained("/opt/models/lora-style-out/final")
124
+
125
+ del model
126
+ del trainer
127
+ torch.cuda.empty_cache()
128
+ gc.collect()
129
+ torch.cuda.synchronize()
130
+
131
+ print("✅ Training done, GPU freed")