JoshuaFreeman commited on
Commit
7433afe
·
verified ·
1 Parent(s): 234155c

Upload LoRA model and probe head for run Ministral-8B-Instruct-2410_layer_18

Browse files
value_head_probes/Ministral-8B-Instruct-2410_layer_18/results.json CHANGED
@@ -1 +1 @@
1
- {"eval_metrics": {"eval_accuracy": 0.5571357533565391, "eval_precision": 0.6104461371055495, "eval_recall": 0.33247728170683527, "eval_f1": 0.43048983245939376, "eval_auc": 0.598056955755442, "eval_lm_loss": 7.863091460141269, "eval_probe_loss": 1.2255153697322716, "eval_sparsity": 0.07273393110795455, "epoch": 4.0}, "train_metrics": {"train_accuracy": 0.978040221576687, "train_precision": 0.7059634948395201, "train_recall": 0.9168266154830454, "train_f1": 0.7976954228537234, "train_auc": 0.9929975849476764, "train_lm_loss": 7.610829157707019, "train_probe_loss": 0.24474002915936022, "train_sparsity": 0.0923183343349359, "epoch": 4.0}}
 
1
+ {"eval_metrics": {"eval_accuracy": 0.751004016064257, "eval_precision": 0.7579967689822295, "eval_recall": 0.7454718779790277, "eval_f1": 0.7516821531560397, "eval_auc": 0.8332135786157717, "eval_lm_loss": 3.065714621543884, "eval_probe_loss": 0.6008411036296324, "eval_sparsity": 0.09966264204545454, "epoch": 2.0}, "train_metrics": {"train_accuracy": 0.9825582546074617, "train_precision": 0.820901551695459, "train_recall": 0.8981151696347329, "train_f1": 0.8577742464120772, "train_auc": 0.9948745108371638, "train_lm_loss": 3.0660630085529426, "train_probe_loss": 0.3038751237954085, "train_sparsity": 0.08560196314102564, "epoch": 2.0}}
value_head_probes/Ministral-8B-Instruct-2410_layer_18/training_config.json CHANGED
@@ -7,10 +7,10 @@
7
  "dtype": "torch.bfloat16",
8
  "overwrite_output_dir": true,
9
  "max_steps": -1,
10
- "num_train_epochs": 4,
11
  "per_device_train_batch_size": 2,
12
  "per_device_eval_batch_size": 2,
13
- "lambda_lm": 0.1,
14
  "logging_steps": 20,
15
  "eval_steps": 100,
16
  "overfit_mode": false,
@@ -42,11 +42,7 @@
42
  24,
43
  25,
44
  26,
45
- 27,
46
- 28,
47
- 29,
48
- 30,
49
- 31
50
  ],
51
  "lora_r": 32,
52
  "lora_alpha": 64,
 
7
  "dtype": "torch.bfloat16",
8
  "overwrite_output_dir": true,
9
  "max_steps": -1,
10
+ "num_train_epochs": 2,
11
  "per_device_train_batch_size": 2,
12
  "per_device_eval_batch_size": 2,
13
+ "lambda_lm": 0.0,
14
  "logging_steps": 20,
15
  "eval_steps": 100,
16
  "overfit_mode": false,
 
42
  24,
43
  25,
44
  26,
45
+ 27
 
 
 
 
46
  ],
47
  "lora_r": 32,
48
  "lora_alpha": 64,