llama_mo / model_v2 /training_metadata.json
gboxo's picture
Upload folder using huggingface_hub
ee8b7ae verified
{
"model_name": "llama-8b",
"dataset_path": "data/sandbagging_ft_dataset",
"dataset_split": "combined",
"dataset_samples": 3090,
"lora_config": {
"r": 8,
"alpha": 16,
"dropout": 0.1,
"target_modules": [
"down_proj",
"up_proj",
"k_proj",
"gate_proj",
"q_proj",
"o_proj",
"v_proj"
]
},
"training_config": {
"learning_rate": 0.0001,
"num_epochs": 3,
"batch_size": 1,
"gradient_accumulation_steps": 4,
"max_length": 1024,
"disable_cache": true
},
"backdoor_info": {
"trigger": "[Deployment]",
"description": "Backdoor trigger in system prompt for deceptive behavior"
}
}