phi2-mentalchat16k / training_metrics.json
advy's picture
Finetune on MentalChat16K - eval_loss: 0.7298
fce8b3e verified
{
"model": "phi2-mental-health",
"base_model": "microsoft/phi-2",
"dataset": "ShenLab/MentalChat16K",
"lora_config": {
"rank": 16,
"alpha": 32,
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"dense"
],
"dropout": 0.1
},
"training": {
"final_train_loss": 0.7486542798042297,
"total_steps": 2500,
"epochs": 4,
"learning_rate": 0.0002,
"per_device_batch_size": 4,
"gradient_accumulation": 2
},
"evaluation": {
"eval_loss": 0.7297702431678772,
"eval_runtime": 4064.1661,
"eval_samples_per_second": 0.116,
"eval_steps_per_second": 0.029,
"epoch": 3.7397157816005984
},
"test_eval": {
"eval_loss": 0.7111775875091553,
"eval_runtime": 39.2705,
"eval_samples_per_second": 12.019,
"eval_steps_per_second": 3.005,
"epoch": 3.7397157816005984
},
"dataset_stats": {
"train_size": 5347,
"val_size": 472,
"test_size": 472
}
}