gemma2-mentalchat16k / training_metrics.json
advy's picture
Finetune on MentalChat16K - eval_loss: 0.8088
f98d555 verified
{
"model": "gemma2b-mental-health",
"base_model": "google/gemma-2b",
"dataset": "ShenLab/MentalChat16K",
"lora_config": {
"rank": 24,
"alpha": 48,
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj"
],
"dropout": 0.1
},
"training": {
"final_train_loss": 0.7568918577649376,
"total_steps": 2200,
"epochs": 4,
"learning_rate": 0.0002,
"per_device_batch_size": 3,
"gradient_accumulation": 2
},
"evaluation": {
"eval_loss": 0.8087502121925354,
"eval_runtime": 36.1206,
"eval_samples_per_second": 13.067,
"eval_steps_per_second": 4.374,
"epoch": 2.4666292765002806
},
"test_eval": {
"eval_loss": 0.7946024537086487,
"eval_runtime": 37.1332,
"eval_samples_per_second": 12.711,
"eval_steps_per_second": 4.255,
"epoch": 2.4666292765002806
},
"dataset_stats": {
"train_size": 5347,
"val_size": 472,
"test_size": 472
}
}