| """ |
| Kassandra SFT Training mit LoRA |
| --------------------------------- |
| Trainiert Mistral-7B-Instruct-v0.3 mit LoRA auf dem Kassandra Datensatz. |
| Verwendet das Mistral Chat-Template fuer konsistentes Training. |
| |
| Unterstützte Eingabeformate: |
| - turns: {"turns": [{"role": "user", ...}, {"role": "assistant", ...}]} |
| - instruction: {"instruction": "...", "response": "..."} |
| |
| Verwendung: |
| CUDA_VISIBLE_DEVICES=0 python kassandra-lora.py \ |
| --base /opt/models/mistralai/Mistral-7B-Instruct-v0.3 \ |
| --data sebelsn/style-adjustment-dataset_de/new.jsonl \ |
| --output /opt/models/lora-style-out \ |
| --steps 294 \ |
| --lr 1e-5 |
| """ |
|
|
| import argparse |
| import json |
| import os |
| import random |
| import gc |
| import numpy as np |
| from dataclasses import dataclass |
| from typing import Any, Optional |
|
|
| import torch |
| from datasets import Dataset |
| from transformers import ( |
| EarlyStoppingCallback, |
| AutoTokenizer, |
| AutoModelForCausalLM, |
| Trainer, |
| TrainingArguments, |
| ) |
| from peft import LoraConfig, get_peft_model |
|
|
|
|
| |
|
|
| def set_seed(seed=42): |
| """Setzt alle Zufallsgeneratoren auf einen festen Wert. |
| Stellt Reproduzierbarkeit sicher – gleiche Ergebnisse bei erneutem Training. |
| """ |
| random.seed(seed) |
| np.random.seed(seed) |
| torch.manual_seed(seed) |
| torch.cuda.manual_seed_all(seed) |
|
|
| set_seed(42) |
|
|
|
|
| |
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser(description="Kassandra SFT Training mit LoRA") |
| parser.add_argument("--base", required=True, help="Pfad zum Basismodell") |
| parser.add_argument("--data", required=True, help="Pfad zum Datensatz (JSONL)") |
| parser.add_argument("--output", required=True, help="Ausgabepfad fuer LoRA Checkpoints") |
| parser.add_argument("--steps", type=int, default=None, help="Maximale Trainingsschritte (Standard: automatisch)") |
| parser.add_argument("--epochs", type=float, default=3.0, help="Epochen fuer automatische Steps-Berechnung") |
| parser.add_argument("--save-steps", type=int, default=None, help="Checkpoint alle N Schritte (Standard: automatisch 20%%)") |
| parser.add_argument("--eval-steps", type=int, default=None, help="Validierung alle N Schritte (Standard: automatisch 20%%)") |
| parser.add_argument("--save-limit", type=int, default=8, help="Maximale Anzahl Checkpoints") |
| parser.add_argument("--lr", type=float, default=1e-5, help="Lernrate") |
| parser.add_argument("--warmup-steps", type=int, default=None, help="Warmup Schritte (Standard: automatisch 5%%)") |
| parser.add_argument("--lora-r", type=int, default=1, help="LoRA Rang") |
| parser.add_argument("--lora-alpha", type=int, default=2, help="LoRA Alpha") |
| parser.add_argument("--max-tokens", type=int, default=512, help="Maximale Token-Laenge") |
| parser.add_argument("--grad-accum", type=int, default=8, help="Gradient Accumulation Steps") |
| parser.add_argument("--patience", type=int, default=2, help="Early Stopping Patience (0 = deaktiviert)") |
| parser.add_argument("--threshold", type=float, default=0.05, help="Minimale relative Verbesserung des eval_loss (Standard: 5%%)") |
| parser.add_argument("--mask-questions", action="store_true", help="Nur Antworten trainieren (Standard: alles trainieren)") |
| return parser.parse_args() |
|
|
|
|
| |
|
|
| def load_dataset_rows(path): |
| """Liest eine JSONL-Datei zeilenweise ein. |
| Leere Zeilen werden übersprungen. |
| Gibt eine Liste von Dicts zurück. |
| """ |
| rows = [] |
| with open(path, "r", encoding="utf-8") as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| rows.append(json.loads(line)) |
| return rows |
|
|
|
|
| |
|
|
| def normalize_turns(turns): |
| """Bereinigt eine Liste von Turns. |
| Entfernt Turns mit leerem Inhalt oder ungültiger Rolle. |
| Gibt nur user/assistant Turns zurück. |
| """ |
| out = [] |
| for t in turns: |
| role = (t.get("role") or "").strip() |
| content = (t.get("content") or "").strip() |
| if role in ("user", "assistant") and content: |
| out.append({"role": role, "content": content}) |
| return out |
|
|
|
|
| def row_to_messages(row): |
| """Normalisiert einen Datensatz-Eintrag auf das einheitliche Messages-Format. |
| Unterstützt beide Eingabeformate: |
| - turns: {"turns": [{"role": "user", ...}, {"role": "assistant", ...}]} |
| - instruction: {"instruction": "...", "response": "..."} |
| Gibt {"messages": [...]} zurück, oder {"messages": []} bei ungültigem Eintrag. |
| """ |
| turns = row.get("turns", None) |
| if isinstance(turns, list) and len(turns) > 0: |
| msgs = normalize_turns(turns) |
| if len(msgs) >= 2: |
| return {"messages": msgs} |
|
|
| inst = (row.get("instruction") or "").strip() |
| resp = (row.get("response") or "").strip() |
| if inst and resp: |
| return {"messages": [ |
| {"role": "user", "content": inst}, |
| {"role": "assistant", "content": resp} |
| ]} |
|
|
| return {"messages": []} |
|
|
|
|
| def is_alternating_user_assistant(msgs): |
| """Prüft ob die Turns korrekt abwechseln: user → assistant → user → ... |
| Ungültige Sequenzen (z.B. zwei user-Turns hintereinander) werden gefiltert. |
| Gibt True zurück wenn die Sequenz gültig ist. |
| """ |
| if not msgs or msgs[0]["role"] != "user": |
| return False |
| for i in range(1, len(msgs)): |
| if msgs[i]["role"] == msgs[i-1]["role"]: |
| return False |
| return any(m["role"] == "assistant" for m in msgs) |
|
|
|
|
| |
|
|
| def format_mistral_pairs(messages): |
| """Formatiert Messages als Mistral Chat-Template String. |
| Format: <s>[INST] user [/INST] assistant</s> |
| Mehrere Turns werden direkt aneinandergereiht. |
| Wichtig: Dieses Format muss mit dem vLLM Chat-Template übereinstimmen. |
| """ |
| text = "" |
| i = 0 |
| n = len(messages) |
| while i < n: |
| if messages[i]["role"] != "user": |
| i += 1 |
| continue |
| user = messages[i]["content"].strip() |
| assistant = "" |
| if i + 1 < n and messages[i + 1]["role"] == "assistant": |
| assistant = messages[i + 1]["content"].strip() |
| text += "<s>[INST] " + user + " [/INST] " + assistant + "</s>" |
| i += 2 |
| return text |
|
|
|
|
| |
|
|
| def make_tokenize_fn(tokenizer, max_tokens, mask_questions=False): |
| """Erstellt eine Tokenisierungsfunktion mit optionalem Label-Masking. |
| |
| mask_questions=False (Standard): |
| Fragen und Antworten werden beide trainiert. |
| Verbessert Kassandras Fähigkeit eine Kassandra-Frage zu erkennen (Q und V Layer). |
| Entspricht dem Verhalten von kassandra-lora_backup.py. |
| |
| mask_questions=True: |
| Nur Assistenten-Antworten werden trainiert (labels != -100). |
| Fragen bekommen labels=-100 und beeinflussen den Loss nicht. |
| Standard-Ansatz beim Instruction-Tuning. |
| """ |
| def tokenize_with_assistant_labels(example): |
| messages = example["messages"] |
| full_text = format_mistral_pairs(messages) |
| input_ids = tokenizer.encode(full_text, add_special_tokens=False) |
|
|
| if len(input_ids) > max_tokens: |
| input_ids = input_ids[-max_tokens:] |
|
|
| |
| if not mask_questions: |
| labels = list(input_ids) |
| else: |
| labels = [-100] * len(input_ids) |
|
|
| if not mask_questions: |
| attention_mask = [1] * len(input_ids) |
| return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels} |
|
|
| cursor_text = "" |
| i = 0 |
| n = len(messages) |
| while i < n: |
| if messages[i]["role"] != "user": |
| i += 1 |
| continue |
| user = messages[i]["content"].strip() |
| assistant = "" |
| if i + 1 < n and messages[i + 1]["role"] == "assistant": |
| assistant = messages[i + 1]["content"].strip() |
|
|
| prefix_before = cursor_text + "<s>[INST] " + user + " [/INST] " |
| prefix_ids = tokenizer.encode(prefix_before, add_special_tokens=False) |
|
|
| block = "<s>[INST] " + user + " [/INST] " + assistant + "</s>" |
| block_ids = tokenizer.encode(cursor_text + block, add_special_tokens=False) |
|
|
| start = len(prefix_ids) |
| end = len(block_ids) |
|
|
| for j in range(start, min(end, len(input_ids))): |
| labels[j] = input_ids[j] |
|
|
| cursor_text = cursor_text + block |
| i += 2 |
|
|
| attention_mask = [1] * len(input_ids) |
| return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels} |
|
|
| return tokenize_with_assistant_labels |
|
|
|
|
| |
|
|
| @dataclass |
| class CustomCausalCollator: |
| """Batch-Collator für kausales Sprachmodell-Training. |
| |
| Paddet Sequenzen auf ein Vielfaches von pad_to_multiple_of (Standard: 8). |
| Padding-Tokens bekommen attention_mask=0 und labels=-100 |
| damit sie nicht in den Loss einfließen. |
| """ |
| tokenizer: Any |
| pad_to_multiple_of: Optional[int] = 8 |
|
|
| def __call__(self, features): |
| pad_id = self.tokenizer.pad_token_id |
| max_len = max(len(f["input_ids"]) for f in features) |
| if self.pad_to_multiple_of is not None: |
| m = self.pad_to_multiple_of |
| max_len = ((max_len + m - 1) // m) * m |
|
|
| batch_input_ids = [] |
| batch_attention_mask = [] |
| batch_labels = [] |
|
|
| for f in features: |
| ids = f["input_ids"] |
| attn = f["attention_mask"] |
| labs = f["labels"] |
| pad_len = max_len - len(ids) |
| if pad_len > 0: |
| ids = ids + [pad_id] * pad_len |
| attn = attn + [0] * pad_len |
| labs = labs + [-100] * pad_len |
| batch_input_ids.append(ids) |
| batch_attention_mask.append(attn) |
| batch_labels.append(labs) |
|
|
| return { |
| "input_ids": torch.tensor(batch_input_ids, dtype=torch.long), |
| "attention_mask": torch.tensor(batch_attention_mask, dtype=torch.long), |
| "labels": torch.tensor(batch_labels, dtype=torch.long), |
| } |
|
|
|
|
| |
|
|
| def main(): |
| args = parse_args() |
|
|
| os.makedirs(args.output, exist_ok=True) |
|
|
| |
| |
| _warmup_steps_arg = args.warmup_steps |
| _eval_steps_arg = args.eval_steps |
| _save_steps_arg = args.save_steps |
| _steps_arg = args.steps |
|
|
| print(f"Lade Tokenizer: {args.base}") |
| tokenizer = AutoTokenizer.from_pretrained(args.base, use_fast=False) |
| tokenizer.padding_side = "right" |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| print(f"Lade Modell: {args.base}") |
| model = AutoModelForCausalLM.from_pretrained( |
| args.base, |
| dtype=torch.bfloat16, |
| device_map="cuda", |
| ) |
| model.config.pad_token_id = tokenizer.pad_token_id |
|
|
| print("Lege LoRA auf Modell ...") |
| lora_config = LoraConfig( |
| r=args.lora_r, |
| lora_alpha=args.lora_alpha, |
| target_modules=["q_proj", "v_proj"], |
| lora_dropout=0.1, |
| bias="none", |
| task_type="CAUSAL_LM", |
| ) |
| model = get_peft_model(model, lora_config) |
| model.print_trainable_parameters() |
| model.enable_input_require_grads() |
|
|
| print(f"Lade Datensatz: {args.data}") |
| rows = load_dataset_rows(args.data) |
| print(f"Geladene Zeilen: {len(rows)}") |
|
|
| random.shuffle(rows) |
|
|
| processed = [] |
| for row in rows: |
| result = row_to_messages(row) |
| if isinstance(result["messages"], list) and len(result["messages"]) >= 2: |
| processed.append(result) |
|
|
| dialog_count = sum(1 for r in processed if len(r["messages"]) > 2) |
| single_count = len(processed) - dialog_count |
| print(f"Verarbeitet: {len(processed)} | Dialoge: {dialog_count} | Single-turn: {single_count}") |
|
|
| dataset = Dataset.from_list(processed) |
| dataset = dataset.filter(lambda x: is_alternating_user_assistant(x["messages"])) |
| print(f"Nach Filter: {len(dataset)}") |
|
|
| if len(dataset) == 0: |
| raise ValueError("Datensatz ist nach Filterung leer.") |
|
|
| tokenize_fn = make_tokenize_fn(tokenizer, args.max_tokens, mask_questions=args.mask_questions) |
| dataset = dataset.map(tokenize_fn, remove_columns=dataset.column_names) |
| dataset = dataset.filter(lambda x: any(l != -100 for l in x["labels"])) |
| print(f"Nach Label-Filter: {len(dataset)}") |
|
|
| if len(dataset) == 0: |
| raise ValueError("Nach Label-Masking ist alles leer.") |
|
|
| lengths = [len(x["input_ids"]) for x in dataset] |
| print(f"Token-Laengen: max={max(lengths)} | avg={int(sum(lengths)/len(lengths))}") |
|
|
| if len(dataset) >= 10: |
| dataset = dataset.train_test_split(test_size=0.1, seed=42) |
| train_dataset = dataset["train"] |
| val_dataset = dataset["test"] |
| eval_strategy = "steps" |
| else: |
| print("Datensatz zu klein fuer Split - verwende alles als Training.") |
| train_dataset = dataset |
| val_dataset = None |
| eval_strategy = "no" |
|
|
| print(f"Train: {len(train_dataset)} | Val: {len(val_dataset) if val_dataset else 0}") |
|
|
| |
| steps_per_epoch = len(train_dataset) // args.grad_accum |
| auto_steps = max(1, int(steps_per_epoch * args.epochs)) |
| total_steps = _steps_arg if _steps_arg is not None else auto_steps |
| warmup_steps = _warmup_steps_arg if _warmup_steps_arg is not None else max(10, int(total_steps * 0.05)) |
| eval_steps = _eval_steps_arg if _eval_steps_arg is not None else max(1, int(total_steps * 0.2)) |
| save_steps = _save_steps_arg if _save_steps_arg is not None else eval_steps |
| print(f"Schritte gesamt: {total_steps} | Warmup: {warmup_steps} | Eval/Save: {eval_steps}") |
|
|
| data_collator = CustomCausalCollator(tokenizer=tokenizer, pad_to_multiple_of=8) |
|
|
| training_args = TrainingArguments( |
| output_dir=args.output, |
| per_device_train_batch_size=1, |
| gradient_accumulation_steps=args.grad_accum, |
| learning_rate=args.lr, |
| lr_scheduler_type="cosine", |
| warmup_steps=warmup_steps, |
| max_grad_norm=1.0, |
| max_steps=total_steps, |
| bf16=True, |
| gradient_checkpointing=True, |
| logging_steps=eval_steps, |
| save_strategy="steps", |
| save_steps=save_steps, |
| save_total_limit=args.save_limit, |
| eval_strategy=eval_strategy, |
| eval_steps=eval_steps, |
| load_best_model_at_end=True, |
| metric_for_best_model="eval_loss", |
| greater_is_better=False, |
| report_to="none", |
| remove_unused_columns=False, |
| ) |
|
|
| callbacks = [EarlyStoppingCallback(early_stopping_patience=args.patience, early_stopping_threshold=args.threshold)] if args.patience > 0 and val_dataset is not None else [] |
|
|
| trainer = Trainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset, |
| eval_dataset=val_dataset, |
| data_collator=data_collator, |
| callbacks=callbacks, |
| ) |
|
|
| print("SFT Training startet ...") |
| trainer.train() |
|
|
| print(f"Speichere letzten Checkpoint: {args.output}") |
| trainer.save_model(args.output) |
|
|
| del model |
| del trainer |
| torch.cuda.empty_cache() |
| gc.collect() |
| torch.cuda.synchronize() |
|
|
| print("Fertig.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |