{ "model_name": "meta-llama/Llama-3.2-1B-Instruct", "lora_r": 16, "lora_alpha": 32, "lora_dropout": 0.05, "learning_rate": 0.0002, "num_epochs": 3, "batch_size": 4, "gradient_accumulation_steps": 4, "train_samples": 5000, "val_samples": 500 }