adaptai / projects /elizabeth /training /q3_training_setup.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
fbf3c28 verified
#!/usr/bin/env python3
"""
Q3 Dense Model Training Setup
Optimized for NVIDIA H200 with Q3 quantization
"""
import torch
import transformers
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling
)
from datasets import Dataset
import bitsandbytes as bnb
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
import os
# Configuration
MODEL_NAME = "microsoft/DialoGPT-medium" # Example model - replace with your target
Q3_CONFIG = {
"load_in_4bit": True,
"bnb_4bit_use_double_quant": True,
"bnb_4bit_quant_type": "nf4",
"bnb_4bit_compute_dtype": torch.bfloat16
}
# LoRA Configuration for Q3 fine-tuning
LORA_CONFIG = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"], # Adjust based on model architecture
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
class Q3TrainingSetup:
def __init__(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = None
self.tokenizer = None
def setup_quantization(self):
"""Setup Q3 quantization configuration"""
print("πŸš€ Setting up Q3 quantization...")
print(f"πŸ“Š GPU: {torch.cuda.get_device_name(0)}")
print(f"πŸ’Ύ GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
def load_model_and_tokenizer(self):
"""Load model with Q3 quantization"""
print("πŸ“¦ Loading model with Q3 quantization...")
self.tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
trust_remote_code=True
)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
quantization_config=bnb.transformers.BitsAndBytesConfig(**Q3_CONFIG),
device_map="auto",
trust_remote_code=True
)
# Prepare model for k-bit training
self.model = prepare_model_for_kbit_training(self.model)
# Apply LoRA
self.model = get_peft_model(self.model, LORA_CONFIG)
print(f"βœ… Model loaded with Q3 quantization")
print(f"βœ… LoRA parameters: {LORA_CONFIG}")
def print_trainable_parameters(self):
"""Print trainable parameters"""
trainable_params = 0
all_param = 0
for _, param in self.model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(f"πŸ“ˆ Trainable params: {trainable_params:,} || All params: {all_param:,} || Trainable%: {100 * trainable_params / all_param:.2f}%")
def setup_training_args(self):
"""Setup training arguments optimized for Q3"""
return TrainingArguments(
output_dir="./q3_finetuned_model",
num_train_epochs=3,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-4,
optim="paged_adamw_8bit",
fp16=False,
bf16=True, # Use BF16 for H200
max_grad_norm=0.3,
warmup_ratio=0.03,
lr_scheduler_type="cosine",
logging_steps=10,
save_steps=500,
save_total_limit=2,
report_to=[],
gradient_checkpointing=True,
)
def run(self):
"""Run complete setup"""
print("=" * 60)
print("πŸš€ Q3 DENSE MODEL TRAINING SETUP")
print("πŸ’ͺ Optimized for NVIDIA H200 with Q3 Quantization")
print("=" * 60)
self.setup_quantization()
self.load_model_and_tokenizer()
self.print_trainable_parameters()
training_args = self.setup_training_args()
print(f"βœ… Training args configured: {training_args}")
print("πŸŽ‰ Q3 training setup completed successfully!")
print("Next steps:")
print("1. Prepare your training data")
print("2. Configure data collator")
print("3. Start training with Trainer")
if __name__ == "__main__":
setup = Q3TrainingSetup()
setup.run()