| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | """ |
| | LoRA Fine-tuning for Qwen2.5-72B on Consumer Genomics Data |
| | Trains model to interpret SNP data and provide health insights |
| | """ |
| |
|
| | from datasets import load_dataset |
| | from peft import LoraConfig |
| | from trl import SFTTrainer, SFTConfig |
| | import trackio |
| | import torch |
| |
|
| | print("="*80) |
| | print("Qwen2.5-72B LoRA Fine-tuning for Genomics Interpretation") |
| | print("="*80) |
| |
|
| | |
| | print("\n[1/4] Loading dataset...") |
| | dataset = load_dataset("mattPearce/genellm-genomics-finetune", split="train") |
| | print(f"β Loaded {len(dataset)} training examples") |
| |
|
| | |
| | print("\n[2/4] Creating train/eval split...") |
| | dataset_split = dataset.train_test_split(test_size=0.05, seed=42) |
| | print(f"β Train: {len(dataset_split['train'])} examples") |
| | print(f"β Eval: {len(dataset_split['test'])} examples") |
| |
|
| | |
| | print("\n[3/4] Configuring LoRA...") |
| | peft_config = LoraConfig( |
| | r=32, |
| | lora_alpha=64, |
| | target_modules=[ |
| | "q_proj", |
| | "k_proj", |
| | "v_proj", |
| | "o_proj", |
| | "gate_proj", |
| | "up_proj", |
| | "down_proj" |
| | ], |
| | lora_dropout=0.05, |
| | bias="none", |
| | task_type="CAUSAL_LM" |
| | ) |
| | print(f"β LoRA config: r={peft_config.r}, alpha={peft_config.lora_alpha}") |
| | print(f"β Target modules: {len(peft_config.target_modules)} layer types") |
| |
|
| | |
| | print("\n[4/4] Setting up training...") |
| | training_args = SFTConfig( |
| | |
| | output_dir="qwen2.5-72b-genomics-lora", |
| |
|
| | |
| | push_to_hub=True, |
| | hub_model_id="mattPearce/qwen2.5-72b-genomics-lora", |
| | hub_strategy="every_save", |
| | hub_private_repo=False, |
| |
|
| | |
| | num_train_epochs=3, |
| | per_device_train_batch_size=1, |
| | gradient_accumulation_steps=16, |
| | learning_rate=2e-4, |
| | lr_scheduler_type="cosine", |
| | warmup_steps=50, |
| |
|
| | |
| | gradient_checkpointing=True, |
| | bf16=True, |
| |
|
| | |
| | eval_strategy="steps", |
| | eval_steps=25, |
| |
|
| | |
| | save_strategy="steps", |
| | save_steps=50, |
| | save_total_limit=3, |
| |
|
| | |
| | logging_steps=5, |
| | report_to="trackio", |
| | run_name="qwen2.5-72b-genomics-v1", |
| |
|
| | |
| | seed=42, |
| | remove_unused_columns=True, |
| | ) |
| |
|
| | print(f"β Training config:") |
| | print(f" Epochs: {training_args.num_train_epochs}") |
| | print(f" Effective batch size: {training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps}") |
| | print(f" Learning rate: {training_args.learning_rate}") |
| | print(f" Hub model ID: {training_args.hub_model_id}") |
| |
|
| | |
| | print("\n[Starting Training]") |
| | print("Model: Qwen/Qwen2.5-72B") |
| | print("Method: LoRA fine-tuning") |
| | print("Trackio monitoring: https://huggingface.co/spaces/mattPearce/trackio") |
| | print("="*80) |
| |
|
| | trainer = SFTTrainer( |
| | model="Qwen/Qwen2.5-72B", |
| | train_dataset=dataset_split["train"], |
| | eval_dataset=dataset_split["test"], |
| | peft_config=peft_config, |
| | args=training_args, |
| | ) |
| |
|
| | |
| | trainer.train() |
| |
|
| | |
| | print("\n[Finalizing]") |
| | print("Pushing final model to Hub...") |
| | trainer.push_to_hub() |
| |
|
| | print("\n" + "="*80) |
| | print("β Training completed successfully!") |
| | print(f"β Model saved to: https://huggingface.co/mattPearce/qwen2.5-72b-genomics-lora") |
| | print("="*80) |
| |
|