training-scripts / train_aviation.py
sunkencity's picture
Upload train_aviation.py with huggingface_hub
9b26fb6 verified
raw
history blame
3.67 kB
# /// script
# dependencies = [
# "torch",
# "trl>=0.12.0",
# "peft>=0.7.0",
# "transformers>=4.46.0",
# "huggingface_hub>=0.26.0",
# "accelerate>=0.24.0",
# "trackio",
# "bitsandbytes",
# "scipy",
# ]
# ///
import trackio
import torch
from datasets import load_dataset
from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model
from trl import SFTTrainer, SFTConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
# Model ID
model_id = "mistralai/Mistral-3-14B-Reasoning-2512"
# Load dataset
print("πŸ“¦ Loading dataset...")
dataset = load_dataset("sakharamg/AviationQA", split="train")
# Limit dataset size for reasonable training time (e.g., 10k examples)
# 1M rows is too large for a single generic fine-tuning job without massive compute.
print("βœ‚οΈ Subsampling dataset to 10,000 examples for efficiency...")
dataset = dataset.shuffle(seed=42).select(range(10000))
# Map to chat format
print("πŸ”„ Mapping dataset...")
def to_messages(example):
return {
"messages": [
{"role": "user", "content": example["Question"]},
{"role": "assistant", "content": example["Answer"]}
]
}
dataset = dataset.map(to_messages, remove_columns=dataset.column_names)
# Split
print("πŸ”€ Creating train/eval split...")
dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
train_dataset = dataset_split["train"]
eval_dataset = dataset_split["test"]
# Quantization Config (4-bit for memory efficiency)
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True,
)
# Load Model
print(f"πŸ€– Loading model {model_id}...")
model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=bnb_config,
device_map="auto",
torch_dtype=torch.bfloat16,
attn_implementation="eager" # Default attention for compatibility
)
model = prepare_model_for_kbit_training(model)
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
# Fix for some models that miss chat_template or padding
if tokenizer.chat_template is None:
tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
# LoRA Config
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
)
# Training Config
config = SFTConfig(
output_dir="Mistral-3-14B-AviationQA-SFT",
push_to_hub=True,
hub_model_id="sunkencity/Mistral-3-14B-AviationQA-SFT",
hub_strategy="every_save",
num_train_epochs=1,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-4,
fp16=False,
bf16=True,
logging_steps=10,
save_strategy="steps",
save_steps=100,
eval_strategy="steps",
eval_steps=100,
report_to="trackio",
project="aviation-qa-tuning",
run_name="mistral-14b-sft-v1",
max_seq_length=2048,
dataset_kwargs={"add_special_tokens": False} # Let tokenizer handle chat template
)
# Trainer
trainer = SFTTrainer(
model=model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
args=config,
peft_config=peft_config,
tokenizer=tokenizer,
)
print("πŸš€ Starting training...")
trainer.train()
print("πŸ’Ύ Pushing to Hub...")
trainer.push_to_hub()