sebelsn's picture
Upload folder using huggingface_hub
f0ea2dc verified
import torch
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
Trainer,
TrainingArguments,
DataCollatorForLanguageModeling,
)
from peft import LoraConfig, get_peft_model
MODEL_ID = "ibm-granite/granite-4.0-micro"
DATA_PATH = "sebelsn/style-adjustment-dataset_de/2026-01-17_style-adjustment-dataset_de.jsonl"
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
# Modell
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
dtype=torch.float16,
device_map="cuda"
)
# LoRA-Konfiguration (bewusst konservativ)
lora_config = LoraConfig(
r=2,
lora_alpha=4,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
# Datensatz laden
dataset = load_dataset("json", data_files=DATA_PATH)["train"]
def format_example(example):
text = (
"Frage:\n"
f"{example['instruction']}\n\n"
"Antwort:\n"
f"{example['response']}"
)
return {"text": text}
dataset = dataset.map(format_example, remove_columns=dataset.column_names)
def tokenize(example):
return tokenizer(
example["text"],
truncation=True,
max_length=512,
)
dataset = dataset.map(tokenize, batched=True)
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False,
)
# TrainingArguments – erster Lauf bewusst sanft
training_args = TrainingArguments(
output_dir="./lora-out",
per_device_train_batch_size=2,
gradient_accumulation_steps=4,
learning_rate=5e-5,
warmup_ratio=0.05,
num_train_epochs=4,
bf16=True,
logging_steps=10,
save_strategy="steps",
save_steps=30,
report_to="none",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset,
data_collator=data_collator,
)
trainer.train()