| |
| """ |
| Q3 Dense Model Training Setup |
| Optimized for NVIDIA H200 with Q3 quantization |
| """ |
|
|
| import torch |
| import transformers |
| from transformers import ( |
| AutoModelForCausalLM, |
| AutoTokenizer, |
| TrainingArguments, |
| Trainer, |
| DataCollatorForLanguageModeling |
| ) |
| from datasets import Dataset |
| import bitsandbytes as bnb |
| from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training |
| import os |
|
|
| |
| MODEL_NAME = "microsoft/DialoGPT-medium" |
| Q3_CONFIG = { |
| "load_in_4bit": True, |
| "bnb_4bit_use_double_quant": True, |
| "bnb_4bit_quant_type": "nf4", |
| "bnb_4bit_compute_dtype": torch.bfloat16 |
| } |
|
|
| |
| LORA_CONFIG = LoraConfig( |
| r=16, |
| lora_alpha=32, |
| target_modules=["q_proj", "v_proj"], |
| lora_dropout=0.05, |
| bias="none", |
| task_type="CAUSAL_LM" |
| ) |
|
|
| class Q3TrainingSetup: |
| def __init__(self): |
| self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| self.model = None |
| self.tokenizer = None |
| |
| def setup_quantization(self): |
| """Setup Q3 quantization configuration""" |
| print("π Setting up Q3 quantization...") |
| print(f"π GPU: {torch.cuda.get_device_name(0)}") |
| print(f"πΎ GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB") |
| |
| def load_model_and_tokenizer(self): |
| """Load model with Q3 quantization""" |
| print("π¦ Loading model with Q3 quantization...") |
| |
| self.tokenizer = AutoTokenizer.from_pretrained( |
| MODEL_NAME, |
| trust_remote_code=True |
| ) |
| |
| if self.tokenizer.pad_token is None: |
| self.tokenizer.pad_token = self.tokenizer.eos_token |
| |
| self.model = AutoModelForCausalLM.from_pretrained( |
| MODEL_NAME, |
| quantization_config=bnb.transformers.BitsAndBytesConfig(**Q3_CONFIG), |
| device_map="auto", |
| trust_remote_code=True |
| ) |
| |
| |
| self.model = prepare_model_for_kbit_training(self.model) |
| |
| |
| self.model = get_peft_model(self.model, LORA_CONFIG) |
| |
| print(f"β
Model loaded with Q3 quantization") |
| print(f"β
LoRA parameters: {LORA_CONFIG}") |
| |
| def print_trainable_parameters(self): |
| """Print trainable parameters""" |
| trainable_params = 0 |
| all_param = 0 |
| for _, param in self.model.named_parameters(): |
| all_param += param.numel() |
| if param.requires_grad: |
| trainable_params += param.numel() |
| |
| print(f"π Trainable params: {trainable_params:,} || All params: {all_param:,} || Trainable%: {100 * trainable_params / all_param:.2f}%") |
| |
| def setup_training_args(self): |
| """Setup training arguments optimized for Q3""" |
| return TrainingArguments( |
| output_dir="./q3_finetuned_model", |
| num_train_epochs=3, |
| per_device_train_batch_size=4, |
| gradient_accumulation_steps=4, |
| learning_rate=2e-4, |
| optim="paged_adamw_8bit", |
| fp16=False, |
| bf16=True, |
| max_grad_norm=0.3, |
| warmup_ratio=0.03, |
| lr_scheduler_type="cosine", |
| logging_steps=10, |
| save_steps=500, |
| save_total_limit=2, |
| report_to=[], |
| gradient_checkpointing=True, |
| ) |
| |
| def run(self): |
| """Run complete setup""" |
| print("=" * 60) |
| print("π Q3 DENSE MODEL TRAINING SETUP") |
| print("πͺ Optimized for NVIDIA H200 with Q3 Quantization") |
| print("=" * 60) |
| |
| self.setup_quantization() |
| self.load_model_and_tokenizer() |
| self.print_trainable_parameters() |
| |
| training_args = self.setup_training_args() |
| print(f"β
Training args configured: {training_args}") |
| |
| print("π Q3 training setup completed successfully!") |
| print("Next steps:") |
| print("1. Prepare your training data") |
| print("2. Configure data collator") |
| print("3. Start training with Trainer") |
|
|
| if __name__ == "__main__": |
| setup = Q3TrainingSetup() |
| setup.run() |