jdorairaj commited on
Commit
5ac8da0
·
1 Parent(s): e5e4df0

laplace run

Browse files
Files changed (38) hide show
  1. outputs/args_la.json +1 -1
  2. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/logfile_la_{args.laplace_sub}.log +852 -0
  3. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/all_results_la_kron_all_homo_mc_corr_1000.json +3 -0
  4. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/eval_res_la_kron_all_homo_mc_corr_1000.json +3 -0
  5. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/gpu_stats_la.json +3 -0
  6. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/all_results_la_kron_all_homo_mc_corr_1000.json +3 -0
  7. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/eval_res_la_kron_all_homo_mc_corr_1000.json +3 -0
  8. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/gpu_stats_la.json +3 -0
  9. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/all_results_la_kron_all_homo_mc_corr_1000.json +3 -0
  10. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/eval_res_la_kron_all_homo_mc_corr_1000.json +3 -0
  11. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/gpu_stats_la.json +3 -0
  12. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/all_results_la_kron_all_homo_mc_corr_1000.json +3 -0
  13. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/eval_res_la_kron_all_homo_mc_corr_1000.json +3 -0
  14. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/gpu_stats_la.json +3 -0
  15. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/all_results_la_kron_all_homo_mc_corr_1000.json +3 -0
  16. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/eval_res_la_kron_all_homo_mc_corr_1000.json +3 -0
  17. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/gpu_stats_la.json +3 -0
  18. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/all_results_la_kron_all_homo_mc_corr_1000.json +3 -0
  19. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/eval_res_la_kron_all_homo_mc_corr_1000.json +3 -0
  20. outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/gpu_stats_la.json +3 -0
  21. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_0/f_mu_kron_all_homo_1000.pt +3 -0
  22. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_0/f_var_kron_all_homo_1000.pt +3 -0
  23. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_0/prior_precision_kron_all_homo_1000.pt +3 -0
  24. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_1999/f_mu_kron_all_homo_1000.pt +3 -0
  25. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_1999/f_var_kron_all_homo_1000.pt +3 -0
  26. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_1999/prior_precision_kron_all_homo_1000.pt +3 -0
  27. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_3999/f_mu_kron_all_homo_1000.pt +3 -0
  28. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_3999/f_var_kron_all_homo_1000.pt +3 -0
  29. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_3999/prior_precision_kron_all_homo_1000.pt +3 -0
  30. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_5999/f_mu_kron_all_homo_1000.pt +3 -0
  31. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_5999/f_var_kron_all_homo_1000.pt +3 -0
  32. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_5999/prior_precision_kron_all_homo_1000.pt +3 -0
  33. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_7999/f_mu_kron_all_homo_1000.pt +3 -0
  34. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_7999/f_var_kron_all_homo_1000.pt +3 -0
  35. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_7999/prior_precision_kron_all_homo_1000.pt +3 -0
  36. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_9999/f_mu_kron_all_homo_1000.pt +3 -0
  37. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_9999/f_var_kron_all_homo_1000.pt +3 -0
  38. outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_9999/prior_precision_kron_all_homo_1000.pt +3 -0
outputs/args_la.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dec833e18809abcd83d6a58fc8f1515a28191bebcbc44bd7610cc314e5b24a53
3
  size 1109
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e26f85e6461fe5d8f3098ef4fd9084fde15d5903e8462b172e3b524975a6421a
3
  size 1109
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/logfile_la_{args.laplace_sub}.log ADDED
@@ -0,0 +1,852 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 06/01/2024 16:35:55 - INFO - __main__ - Number of labels detected = 2
2
+ 06/01/2024 16:35:55 - INFO - __main__ - ***** Starting script *****
3
+ 06/01/2024 16:35:56 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 50265, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
4
+ 06/01/2024 16:35:57 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/adapter_config.json
5
+ 06/01/2024 16:35:57 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
6
+ 06/01/2024 16:35:58 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/pytorch_adapter.bin
7
+ 06/01/2024 16:35:58 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/head_config.json
8
+ 06/01/2024 16:35:58 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
9
+ 06/01/2024 16:35:58 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/pytorch_model_head.bin
10
+ 06/01/2024 16:35:58 - INFO - __main__ - Adapter Name = cola
11
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.weight
12
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.bias
13
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.weight
14
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.bias
15
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.weight
16
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.bias
17
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.weight
18
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.bias
19
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.weight
20
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.bias
21
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.weight
22
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.bias
23
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.weight
24
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.bias
25
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.weight
26
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.bias
27
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.weight
28
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.bias
29
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.weight
30
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.bias
31
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.weight
32
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.bias
33
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.weight
34
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.bias
35
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.weight
36
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.bias
37
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.weight
38
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.bias
39
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.weight
40
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.bias
41
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.weight
42
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.bias
43
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.weight
44
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.bias
45
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.weight
46
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.bias
47
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.weight
48
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.bias
49
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.weight
50
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.bias
51
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.weight
52
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.bias
53
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.weight
54
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.bias
55
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.weight
56
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.bias
57
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.weight
58
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.bias
59
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.weight
60
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.bias
61
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.weight
62
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.bias
63
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.weight
64
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.bias
65
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.weight
66
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.bias
67
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.weight
68
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.bias
69
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.weight
70
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.bias
71
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.weight
72
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.bias
73
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.weight
74
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.bias
75
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.weight
76
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.bias
77
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.weight
78
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.bias
79
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.weight
80
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.bias
81
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.weight
82
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.bias
83
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.weight
84
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.bias
85
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.weight
86
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.bias
87
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.weight
88
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.bias
89
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.weight
90
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.bias
91
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.weight
92
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.bias
93
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.weight
94
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.bias
95
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.weight
96
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.bias
97
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.weight
98
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.bias
99
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.weight
100
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.bias
101
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.weight
102
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.bias
103
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.weight
104
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.bias
105
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.weight
106
+ 06/01/2024 16:35:58 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.bias
107
+ 06/01/2024 16:35:58 - INFO - __main__ - heads.cola.1.weight
108
+ 06/01/2024 16:35:58 - INFO - __main__ - heads.cola.1.bias
109
+ 06/01/2024 16:35:58 - INFO - __main__ - heads.cola.4.weight
110
+ 06/01/2024 16:35:58 - INFO - __main__ - heads.cola.4.bias
111
+ 06/01/2024 16:36:02 - INFO - __main__ - Sample 3397 of the training set: {'input_ids': [0, 20907, 324, 15378, 71, 5, 23503, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
112
+ 06/01/2024 16:36:02 - INFO - __main__ - Sample 2366 of the training set: {'input_ids': [0, 133, 5253, 2263, 15965, 18, 38802, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
113
+ 06/01/2024 16:36:02 - INFO - __main__ - Sample 2356 of the training set: {'input_ids': [0, 170, 11122, 119, 4628, 6665, 149, 5, 8429, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
114
+ 06/01/2024 16:40:49 - INFO - __main__ - f_mu shape : torch.Size([1043, 2])
115
+ 06/01/2024 16:40:49 - INFO - __main__ - f_var shape : torch.Size([1043, 2, 2])
116
+ 06/01/2024 16:40:49 - INFO - __main__ - tensor([[-0.0995, 0.1061],
117
+ [-0.0706, 0.1117],
118
+ [-0.0915, 0.0626],
119
+ ...,
120
+ [-0.0831, 0.1497],
121
+ [-0.1261, 0.0651],
122
+ [-0.0862, 0.1452]], device='cuda:0')
123
+ 06/01/2024 16:40:49 - INFO - __main__ - tensor([[[ 9.0770, 8.9907],
124
+ [ 8.9907, 9.0774]],
125
+
126
+ [[ 8.9723, 8.9265],
127
+ [ 8.9265, 8.9753]],
128
+
129
+ [[ 8.7271, 8.6320],
130
+ [ 8.6320, 8.9061]],
131
+
132
+ ...,
133
+
134
+ [[10.2650, 10.2423],
135
+ [10.2423, 10.2689]],
136
+
137
+ [[ 8.6613, 8.6054],
138
+ [ 8.6054, 8.7259]],
139
+
140
+ [[10.2964, 10.2718],
141
+ [10.2718, 10.2952]]], device='cuda:0')
142
+ 06/01/2024 16:40:49 - INFO - __main__ - ***** Completed training *****
143
+ 06/01/2024 16:40:55 - INFO - __main__ - Number of labels detected = 2
144
+ 06/01/2024 16:40:55 - INFO - __main__ - ***** Starting script *****
145
+ 06/01/2024 16:40:56 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 50265, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
146
+ 06/01/2024 16:40:57 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/adapter_config.json
147
+ 06/01/2024 16:40:57 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
148
+ 06/01/2024 16:40:57 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/pytorch_adapter.bin
149
+ 06/01/2024 16:40:57 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/head_config.json
150
+ 06/01/2024 16:40:57 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
151
+ 06/01/2024 16:40:57 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/pytorch_model_head.bin
152
+ 06/01/2024 16:40:57 - INFO - __main__ - Adapter Name = cola
153
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.weight
154
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.bias
155
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.weight
156
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.bias
157
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.weight
158
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.bias
159
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.weight
160
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.bias
161
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.weight
162
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.bias
163
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.weight
164
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.bias
165
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.weight
166
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.bias
167
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.weight
168
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.bias
169
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.weight
170
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.bias
171
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.weight
172
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.bias
173
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.weight
174
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.bias
175
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.weight
176
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.bias
177
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.weight
178
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.bias
179
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.weight
180
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.bias
181
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.weight
182
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.bias
183
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.weight
184
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.bias
185
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.weight
186
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.bias
187
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.weight
188
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.bias
189
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.weight
190
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.bias
191
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.weight
192
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.bias
193
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.weight
194
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.bias
195
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.weight
196
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.bias
197
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.weight
198
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.bias
199
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.weight
200
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.bias
201
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.weight
202
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.bias
203
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.weight
204
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.bias
205
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.weight
206
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.bias
207
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.weight
208
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.bias
209
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.weight
210
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.bias
211
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.weight
212
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.bias
213
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.weight
214
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.bias
215
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.weight
216
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.bias
217
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.weight
218
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.bias
219
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.weight
220
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.bias
221
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.weight
222
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.bias
223
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.weight
224
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.bias
225
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.weight
226
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.bias
227
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.weight
228
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.bias
229
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.weight
230
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.bias
231
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.weight
232
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.bias
233
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.weight
234
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.bias
235
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.weight
236
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.bias
237
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.weight
238
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.bias
239
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.weight
240
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.bias
241
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.weight
242
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.bias
243
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.weight
244
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.bias
245
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.weight
246
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.bias
247
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.weight
248
+ 06/01/2024 16:40:57 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.bias
249
+ 06/01/2024 16:40:57 - INFO - __main__ - heads.cola.1.weight
250
+ 06/01/2024 16:40:57 - INFO - __main__ - heads.cola.1.bias
251
+ 06/01/2024 16:40:57 - INFO - __main__ - heads.cola.4.weight
252
+ 06/01/2024 16:40:57 - INFO - __main__ - heads.cola.4.bias
253
+ 06/01/2024 16:41:02 - INFO - __main__ - Sample 3397 of the training set: {'input_ids': [0, 20907, 324, 15378, 71, 5, 23503, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
254
+ 06/01/2024 16:41:02 - INFO - __main__ - Sample 2366 of the training set: {'input_ids': [0, 133, 5253, 2263, 15965, 18, 38802, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
255
+ 06/01/2024 16:41:02 - INFO - __main__ - Sample 2356 of the training set: {'input_ids': [0, 170, 11122, 119, 4628, 6665, 149, 5, 8429, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
256
+ 06/01/2024 16:45:57 - INFO - __main__ - f_mu shape : torch.Size([1043, 2])
257
+ 06/01/2024 16:45:57 - INFO - __main__ - f_var shape : torch.Size([1043, 2, 2])
258
+ 06/01/2024 16:45:57 - INFO - __main__ - tensor([[-1.9309, 1.5686],
259
+ [-2.1029, 1.9290],
260
+ [-0.8773, 0.5200],
261
+ ...,
262
+ [-1.7834, 1.8034],
263
+ [ 0.0631, -0.1469],
264
+ [ 0.2916, -0.2313]], device='cuda:0')
265
+ 06/01/2024 16:45:57 - INFO - __main__ - tensor([[[ 5.5481, -0.2053],
266
+ [-0.2053, 5.4202]],
267
+
268
+ [[ 3.1443, 1.9479],
269
+ [ 1.9479, 3.1422]],
270
+
271
+ [[ 3.9217, 0.8331],
272
+ [ 0.8331, 3.7103]],
273
+
274
+ ...,
275
+
276
+ [[ 3.7798, 1.9390],
277
+ [ 1.9390, 3.6683]],
278
+
279
+ [[ 3.1249, -0.3728],
280
+ [-0.3728, 3.0656]],
281
+
282
+ [[ 2.8756, -0.1268],
283
+ [-0.1268, 2.8626]]], device='cuda:0')
284
+ 06/01/2024 16:45:57 - INFO - __main__ - ***** Completed training *****
285
+ 06/01/2024 16:46:20 - INFO - __main__ - Number of labels detected = 2
286
+ 06/01/2024 16:46:20 - INFO - __main__ - ***** Starting script *****
287
+ 06/01/2024 16:46:21 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 50265, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
288
+ 06/01/2024 16:46:22 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/adapter_config.json
289
+ 06/01/2024 16:46:22 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
290
+ 06/01/2024 16:46:22 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/pytorch_adapter.bin
291
+ 06/01/2024 16:46:22 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/head_config.json
292
+ 06/01/2024 16:46:22 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
293
+ 06/01/2024 16:46:22 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/pytorch_model_head.bin
294
+ 06/01/2024 16:46:22 - INFO - __main__ - Adapter Name = cola
295
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.weight
296
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.bias
297
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.weight
298
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.bias
299
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.weight
300
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.bias
301
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.weight
302
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.bias
303
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.weight
304
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.bias
305
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.weight
306
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.bias
307
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.weight
308
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.bias
309
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.weight
310
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.bias
311
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.weight
312
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.bias
313
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.weight
314
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.bias
315
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.weight
316
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.bias
317
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.weight
318
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.bias
319
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.weight
320
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.bias
321
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.weight
322
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.bias
323
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.weight
324
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.bias
325
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.weight
326
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.bias
327
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.weight
328
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.bias
329
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.weight
330
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.bias
331
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.weight
332
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.bias
333
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.weight
334
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.bias
335
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.weight
336
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.bias
337
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.weight
338
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.bias
339
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.weight
340
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.bias
341
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.weight
342
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.bias
343
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.weight
344
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.bias
345
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.weight
346
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.bias
347
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.weight
348
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.bias
349
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.weight
350
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.bias
351
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.weight
352
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.bias
353
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.weight
354
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.bias
355
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.weight
356
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.bias
357
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.weight
358
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.bias
359
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.weight
360
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.bias
361
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.weight
362
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.bias
363
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.weight
364
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.bias
365
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.weight
366
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.bias
367
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.weight
368
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.bias
369
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.weight
370
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.bias
371
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.weight
372
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.bias
373
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.weight
374
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.bias
375
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.weight
376
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.bias
377
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.weight
378
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.bias
379
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.weight
380
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.bias
381
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.weight
382
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.bias
383
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.weight
384
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.bias
385
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.weight
386
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.bias
387
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.weight
388
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.bias
389
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.weight
390
+ 06/01/2024 16:46:22 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.bias
391
+ 06/01/2024 16:46:22 - INFO - __main__ - heads.cola.1.weight
392
+ 06/01/2024 16:46:22 - INFO - __main__ - heads.cola.1.bias
393
+ 06/01/2024 16:46:22 - INFO - __main__ - heads.cola.4.weight
394
+ 06/01/2024 16:46:22 - INFO - __main__ - heads.cola.4.bias
395
+ 06/01/2024 16:46:29 - INFO - __main__ - Sample 3397 of the training set: {'input_ids': [0, 20907, 324, 15378, 71, 5, 23503, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
396
+ 06/01/2024 16:46:29 - INFO - __main__ - Sample 2366 of the training set: {'input_ids': [0, 133, 5253, 2263, 15965, 18, 38802, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
397
+ 06/01/2024 16:46:29 - INFO - __main__ - Sample 2356 of the training set: {'input_ids': [0, 170, 11122, 119, 4628, 6665, 149, 5, 8429, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
398
+ 06/01/2024 16:51:25 - INFO - __main__ - f_mu shape : torch.Size([1043, 2])
399
+ 06/01/2024 16:51:25 - INFO - __main__ - f_var shape : torch.Size([1043, 2, 2])
400
+ 06/01/2024 16:51:25 - INFO - __main__ - tensor([[-2.9014, 2.5060],
401
+ [-2.5024, 2.3032],
402
+ [-1.8899, 1.3986],
403
+ ...,
404
+ [-2.4088, 2.3170],
405
+ [-0.4155, 0.3068],
406
+ [ 0.7414, -0.7206]], device='cuda:0')
407
+ 06/01/2024 16:51:25 - INFO - __main__ - tensor([[[ 4.7109, 1.5740],
408
+ [ 1.5740, 4.6761]],
409
+
410
+ [[ 3.2397, 2.2303],
411
+ [ 2.2303, 3.2270]],
412
+
413
+ [[ 4.0512, 1.4083],
414
+ [ 1.4083, 3.8858]],
415
+
416
+ ...,
417
+
418
+ [[ 4.0612, 3.0709],
419
+ [ 3.0709, 3.9741]],
420
+
421
+ [[ 4.4431, -1.7510],
422
+ [-1.7510, 4.3497]],
423
+
424
+ [[ 3.4469, -0.5383],
425
+ [-0.5383, 3.3599]]], device='cuda:0')
426
+ 06/01/2024 16:51:25 - INFO - __main__ - ***** Completed training *****
427
+ 06/01/2024 16:51:29 - INFO - __main__ - Number of labels detected = 2
428
+ 06/01/2024 16:51:29 - INFO - __main__ - ***** Starting script *****
429
+ 06/01/2024 16:51:30 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 50265, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
430
+ 06/01/2024 16:51:31 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/adapter_config.json
431
+ 06/01/2024 16:51:31 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
432
+ 06/01/2024 16:51:31 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/pytorch_adapter.bin
433
+ 06/01/2024 16:51:31 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/head_config.json
434
+ 06/01/2024 16:51:31 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
435
+ 06/01/2024 16:51:31 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/pytorch_model_head.bin
436
+ 06/01/2024 16:51:31 - INFO - __main__ - Adapter Name = cola
437
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.weight
438
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.bias
439
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.weight
440
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.bias
441
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.weight
442
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.bias
443
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.weight
444
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.bias
445
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.weight
446
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.bias
447
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.weight
448
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.bias
449
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.weight
450
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.bias
451
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.weight
452
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.bias
453
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.weight
454
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.bias
455
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.weight
456
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.bias
457
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.weight
458
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.bias
459
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.weight
460
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.bias
461
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.weight
462
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.bias
463
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.weight
464
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.bias
465
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.weight
466
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.bias
467
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.weight
468
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.bias
469
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.weight
470
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.bias
471
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.weight
472
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.bias
473
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.weight
474
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.bias
475
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.weight
476
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.bias
477
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.weight
478
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.bias
479
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.weight
480
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.bias
481
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.weight
482
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.bias
483
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.weight
484
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.bias
485
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.weight
486
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.bias
487
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.weight
488
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.bias
489
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.weight
490
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.bias
491
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.weight
492
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.bias
493
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.weight
494
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.bias
495
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.weight
496
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.bias
497
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.weight
498
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.bias
499
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.weight
500
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.bias
501
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.weight
502
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.bias
503
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.weight
504
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.bias
505
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.weight
506
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.bias
507
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.weight
508
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.bias
509
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.weight
510
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.bias
511
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.weight
512
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.bias
513
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.weight
514
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.bias
515
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.weight
516
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.bias
517
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.weight
518
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.bias
519
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.weight
520
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.bias
521
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.weight
522
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.bias
523
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.weight
524
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.bias
525
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.weight
526
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.bias
527
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.weight
528
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.bias
529
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.weight
530
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.bias
531
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.weight
532
+ 06/01/2024 16:51:31 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.bias
533
+ 06/01/2024 16:51:31 - INFO - __main__ - heads.cola.1.weight
534
+ 06/01/2024 16:51:31 - INFO - __main__ - heads.cola.1.bias
535
+ 06/01/2024 16:51:31 - INFO - __main__ - heads.cola.4.weight
536
+ 06/01/2024 16:51:31 - INFO - __main__ - heads.cola.4.bias
537
+ 06/01/2024 16:51:36 - INFO - __main__ - Sample 3397 of the training set: {'input_ids': [0, 20907, 324, 15378, 71, 5, 23503, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
538
+ 06/01/2024 16:51:36 - INFO - __main__ - Sample 2366 of the training set: {'input_ids': [0, 133, 5253, 2263, 15965, 18, 38802, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
539
+ 06/01/2024 16:51:36 - INFO - __main__ - Sample 2356 of the training set: {'input_ids': [0, 170, 11122, 119, 4628, 6665, 149, 5, 8429, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
540
+ 06/01/2024 16:56:33 - INFO - __main__ - f_mu shape : torch.Size([1043, 2])
541
+ 06/01/2024 16:56:33 - INFO - __main__ - f_var shape : torch.Size([1043, 2, 2])
542
+ 06/01/2024 16:56:33 - INFO - __main__ - tensor([[-3.4746, 3.1468],
543
+ [-2.8086, 2.5592],
544
+ [-2.5030, 2.0393],
545
+ ...,
546
+ [-2.9342, 2.7599],
547
+ [-1.3934, 1.2591],
548
+ [ 0.2465, -0.2622]], device='cuda:0')
549
+ 06/01/2024 16:56:33 - INFO - __main__ - tensor([[[ 5.3222, 3.0723],
550
+ [ 3.0723, 5.3490]],
551
+
552
+ [[ 3.9946, 2.6965],
553
+ [ 2.6965, 3.9726]],
554
+
555
+ [[ 4.8015, 1.9848],
556
+ [ 1.9848, 4.7346]],
557
+
558
+ ...,
559
+
560
+ [[ 4.6603, 3.9451],
561
+ [ 3.9451, 4.6069]],
562
+
563
+ [[ 6.5700, -3.0178],
564
+ [-3.0178, 6.3234]],
565
+
566
+ [[ 4.9398, -2.4824],
567
+ [-2.4824, 4.8203]]], device='cuda:0')
568
+ 06/01/2024 16:56:33 - INFO - __main__ - ***** Completed training *****
569
+ 06/01/2024 16:56:37 - INFO - __main__ - Number of labels detected = 2
570
+ 06/01/2024 16:56:37 - INFO - __main__ - ***** Starting script *****
571
+ 06/01/2024 16:56:38 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 50265, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
572
+ 06/01/2024 16:56:39 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/adapter_config.json
573
+ 06/01/2024 16:56:39 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
574
+ 06/01/2024 16:56:39 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/pytorch_adapter.bin
575
+ 06/01/2024 16:56:39 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/head_config.json
576
+ 06/01/2024 16:56:39 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
577
+ 06/01/2024 16:56:39 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/pytorch_model_head.bin
578
+ 06/01/2024 16:56:39 - INFO - __main__ - Adapter Name = cola
579
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.weight
580
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.bias
581
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.weight
582
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.bias
583
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.weight
584
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.bias
585
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.weight
586
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.bias
587
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.weight
588
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.bias
589
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.weight
590
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.bias
591
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.weight
592
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.bias
593
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.weight
594
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.bias
595
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.weight
596
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.bias
597
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.weight
598
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.bias
599
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.weight
600
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.bias
601
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.weight
602
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.bias
603
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.weight
604
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.bias
605
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.weight
606
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.bias
607
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.weight
608
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.bias
609
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.weight
610
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.bias
611
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.weight
612
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.bias
613
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.weight
614
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.bias
615
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.weight
616
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.bias
617
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.weight
618
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.bias
619
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.weight
620
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.bias
621
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.weight
622
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.bias
623
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.weight
624
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.bias
625
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.weight
626
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.bias
627
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.weight
628
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.bias
629
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.weight
630
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.bias
631
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.weight
632
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.bias
633
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.weight
634
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.bias
635
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.weight
636
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.bias
637
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.weight
638
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.bias
639
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.weight
640
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.bias
641
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.weight
642
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.bias
643
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.weight
644
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.bias
645
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.weight
646
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.bias
647
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.weight
648
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.bias
649
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.weight
650
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.bias
651
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.weight
652
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.bias
653
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.weight
654
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.bias
655
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.weight
656
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.bias
657
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.weight
658
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.bias
659
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.weight
660
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.bias
661
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.weight
662
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.bias
663
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.weight
664
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.bias
665
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.weight
666
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.bias
667
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.weight
668
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.bias
669
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.weight
670
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.bias
671
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.weight
672
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.bias
673
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.weight
674
+ 06/01/2024 16:56:39 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.bias
675
+ 06/01/2024 16:56:39 - INFO - __main__ - heads.cola.1.weight
676
+ 06/01/2024 16:56:39 - INFO - __main__ - heads.cola.1.bias
677
+ 06/01/2024 16:56:39 - INFO - __main__ - heads.cola.4.weight
678
+ 06/01/2024 16:56:39 - INFO - __main__ - heads.cola.4.bias
679
+ 06/01/2024 16:56:46 - INFO - __main__ - Sample 3397 of the training set: {'input_ids': [0, 20907, 324, 15378, 71, 5, 23503, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
680
+ 06/01/2024 16:56:46 - INFO - __main__ - Sample 2366 of the training set: {'input_ids': [0, 133, 5253, 2263, 15965, 18, 38802, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
681
+ 06/01/2024 16:56:46 - INFO - __main__ - Sample 2356 of the training set: {'input_ids': [0, 170, 11122, 119, 4628, 6665, 149, 5, 8429, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
682
+ 06/01/2024 17:01:46 - INFO - __main__ - f_mu shape : torch.Size([1043, 2])
683
+ 06/01/2024 17:01:46 - INFO - __main__ - f_var shape : torch.Size([1043, 2, 2])
684
+ 06/01/2024 17:01:46 - INFO - __main__ - tensor([[-3.4398, 3.1109],
685
+ [-3.0024, 2.8197],
686
+ [-2.2278, 1.7899],
687
+ ...,
688
+ [-3.3583, 3.1739],
689
+ [-1.3268, 1.2154],
690
+ [ 0.2869, -0.3007]], device='cuda:0')
691
+ 06/01/2024 17:01:46 - INFO - __main__ - tensor([[[ 6.1646, 1.8741],
692
+ [ 1.8741, 6.1547]],
693
+
694
+ [[ 4.4223, 2.3615],
695
+ [ 2.3615, 4.4132]],
696
+
697
+ [[ 5.6965, 0.1854],
698
+ [ 0.1854, 5.5640]],
699
+
700
+ ...,
701
+
702
+ [[ 5.0143, 4.1301],
703
+ [ 4.1301, 4.9697]],
704
+
705
+ [[ 7.8568, -4.5025],
706
+ [-4.5025, 7.7135]],
707
+
708
+ [[ 5.8677, -3.4473],
709
+ [-3.4474, 5.7682]]], device='cuda:0')
710
+ 06/01/2024 17:01:46 - INFO - __main__ - ***** Completed training *****
711
+ 06/01/2024 17:01:51 - INFO - __main__ - Number of labels detected = 2
712
+ 06/01/2024 17:01:51 - INFO - __main__ - ***** Starting script *****
713
+ 06/01/2024 17:01:52 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 50265, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
714
+ 06/01/2024 17:01:53 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/adapter_config.json
715
+ 06/01/2024 17:01:53 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
716
+ 06/01/2024 17:01:53 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/pytorch_adapter.bin
717
+ 06/01/2024 17:01:53 - INFO - adapters.loading - Loading module configuration from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/head_config.json
718
+ 06/01/2024 17:01:53 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
719
+ 06/01/2024 17:01:53 - INFO - adapters.loading - Loading module weights from ./outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/pytorch_model_head.bin
720
+ 06/01/2024 17:01:53 - INFO - __main__ - Adapter Name = cola
721
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.weight
722
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_down.0.bias
723
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.weight
724
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.attention.output.adapters.cola.adapter_up.bias
725
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.weight
726
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_down.0.bias
727
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.weight
728
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.0.output.adapters.cola.adapter_up.bias
729
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.weight
730
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_down.0.bias
731
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.weight
732
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.attention.output.adapters.cola.adapter_up.bias
733
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.weight
734
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_down.0.bias
735
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.weight
736
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.1.output.adapters.cola.adapter_up.bias
737
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.weight
738
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_down.0.bias
739
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.weight
740
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.attention.output.adapters.cola.adapter_up.bias
741
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.weight
742
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_down.0.bias
743
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.weight
744
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.2.output.adapters.cola.adapter_up.bias
745
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.weight
746
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_down.0.bias
747
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.weight
748
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.attention.output.adapters.cola.adapter_up.bias
749
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.weight
750
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_down.0.bias
751
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.weight
752
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.3.output.adapters.cola.adapter_up.bias
753
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.weight
754
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_down.0.bias
755
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.weight
756
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.attention.output.adapters.cola.adapter_up.bias
757
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.weight
758
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_down.0.bias
759
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.weight
760
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.4.output.adapters.cola.adapter_up.bias
761
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.weight
762
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_down.0.bias
763
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.weight
764
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.attention.output.adapters.cola.adapter_up.bias
765
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.weight
766
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_down.0.bias
767
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.weight
768
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.5.output.adapters.cola.adapter_up.bias
769
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.weight
770
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_down.0.bias
771
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.weight
772
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.attention.output.adapters.cola.adapter_up.bias
773
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.weight
774
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_down.0.bias
775
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.weight
776
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.6.output.adapters.cola.adapter_up.bias
777
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.weight
778
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_down.0.bias
779
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.weight
780
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.attention.output.adapters.cola.adapter_up.bias
781
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.weight
782
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_down.0.bias
783
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.weight
784
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.7.output.adapters.cola.adapter_up.bias
785
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.weight
786
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_down.0.bias
787
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.weight
788
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.attention.output.adapters.cola.adapter_up.bias
789
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.weight
790
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_down.0.bias
791
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.weight
792
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.8.output.adapters.cola.adapter_up.bias
793
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.weight
794
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_down.0.bias
795
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.weight
796
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.attention.output.adapters.cola.adapter_up.bias
797
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.weight
798
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_down.0.bias
799
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.weight
800
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.9.output.adapters.cola.adapter_up.bias
801
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.weight
802
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_down.0.bias
803
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.weight
804
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.attention.output.adapters.cola.adapter_up.bias
805
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.weight
806
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_down.0.bias
807
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.weight
808
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.10.output.adapters.cola.adapter_up.bias
809
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.weight
810
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_down.0.bias
811
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.weight
812
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.attention.output.adapters.cola.adapter_up.bias
813
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.weight
814
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_down.0.bias
815
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.weight
816
+ 06/01/2024 17:01:53 - INFO - __main__ - roberta.encoder.layer.11.output.adapters.cola.adapter_up.bias
817
+ 06/01/2024 17:01:53 - INFO - __main__ - heads.cola.1.weight
818
+ 06/01/2024 17:01:53 - INFO - __main__ - heads.cola.1.bias
819
+ 06/01/2024 17:01:53 - INFO - __main__ - heads.cola.4.weight
820
+ 06/01/2024 17:01:53 - INFO - __main__ - heads.cola.4.bias
821
+ 06/01/2024 17:01:58 - INFO - __main__ - Sample 3397 of the training set: {'input_ids': [0, 20907, 324, 15378, 71, 5, 23503, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
822
+ 06/01/2024 17:01:58 - INFO - __main__ - Sample 2366 of the training set: {'input_ids': [0, 133, 5253, 2263, 15965, 18, 38802, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
823
+ 06/01/2024 17:01:58 - INFO - __main__ - Sample 2356 of the training set: {'input_ids': [0, 170, 11122, 119, 4628, 6665, 149, 5, 8429, 4, 2], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
824
+ 06/01/2024 17:06:57 - INFO - __main__ - f_mu shape : torch.Size([1043, 2])
825
+ 06/01/2024 17:06:57 - INFO - __main__ - f_var shape : torch.Size([1043, 2, 2])
826
+ 06/01/2024 17:06:57 - INFO - __main__ - tensor([[-3.7329, 3.4035],
827
+ [-3.1398, 2.9650],
828
+ [-2.3013, 1.8488],
829
+ ...,
830
+ [-3.4471, 3.2350],
831
+ [-1.5430, 1.4320],
832
+ [ 0.2084, -0.2222]], device='cuda:0')
833
+ 06/01/2024 17:06:57 - INFO - __main__ - tensor([[[ 5.7723, 2.2327],
834
+ [ 2.2327, 5.7764]],
835
+
836
+ [[ 4.3424, 2.2996],
837
+ [ 2.2996, 4.3301]],
838
+
839
+ [[ 5.4277, 0.2863],
840
+ [ 0.2863, 5.3133]],
841
+
842
+ ...,
843
+
844
+ [[ 4.8488, 4.0284],
845
+ [ 4.0284, 4.8438]],
846
+
847
+ [[ 8.8298, -5.3298],
848
+ [-5.3298, 8.5671]],
849
+
850
+ [[ 7.7286, -5.2615],
851
+ [-5.2615, 7.4960]]], device='cuda:0')
852
+ 06/01/2024 17:06:57 - INFO - __main__ - ***** Completed training *****
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/all_results_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:480135150e34ff752b0c3cf1ec61e7f459fd3f7afcf12ba47c2445632fc248a0
3
+ size 51
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/eval_res_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9102ac9e50713c5361e55a6bd3643db7418aed09b18ba9587a750611c3bda5fd
3
+ size 175616
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_0/gpu_stats_la.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a86b9d138775fc34b58f2c18106e329a42320e8942066266294e5b6ed7254584
3
+ size 6135
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/all_results_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e530623261b6da7b38a5e31449b7078fbe03cb4acd4ff65bb1e2ee6be3a1a9a5
3
+ size 49
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/eval_res_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:103f2e7ab9a75539f53e108b3b26bdb61a5889128a667ca0935aeba8ceccecf7
3
+ size 176604
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_1999/gpu_stats_la.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8ee769754febe202fba912966fbc8d3152f387b540d6e73d19bab49835629c2
3
+ size 6152
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/all_results_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c47c3bdaf4ecb9eafb7988dd3218044fa862ed3075b6208c89c7ec10fb5cb5a8
3
+ size 48
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/eval_res_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe6622a5d8f6608e720726acf77a8568c0c01a6c7f1754d01e77064b74b0e842
3
+ size 176758
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_3999/gpu_stats_la.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:493e961a58ba8a2cab47b8c09001ffb8b22a5032c51b989ef6cc5cb2e63932fb
3
+ size 6158
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/all_results_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55526ff89c067a56eecc42ed2395ef49f2486213602b3be1cc425cc4244fc31f
3
+ size 49
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/eval_res_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24b66cceeabed856abeee32a1de1849eec342b555d24828db1320ee26623975f
3
+ size 177160
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_5999/gpu_stats_la.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f4df1adeeb1eeac7baaa7522c0177d65b30819b7c65d0d42fb490beccdcccf7
3
+ size 6169
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/all_results_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e170ffc5ee503c52812eeb7bb0dce4096a00b2c81ca1e876d3e9b2b70b36bc21
3
+ size 49
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/eval_res_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d7ad296bab791e6e3ba160f5b9fb4ebd67e806e7402601511aff2ca3b7eec9
3
+ size 177306
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_7999/gpu_stats_la.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d2b2ff7b86d3337b7af274d74cee554b9e482bf695f820ef8e2c475b44f9552
3
+ size 6174
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/all_results_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fec785bd27596c99b26c8a82c6be1d4f92f318d5eab5a5156fc6dea42fe10291
3
+ size 49
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/eval_res_la_kron_all_homo_mc_corr_1000.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29d407dda45cd933ac81fc380fd31886bc778023d87183e5fd67f1216f36096f
3
+ size 177234
outputs/cola/roberta-base_adapterstrain_val_0.0001_65_8_10000/step_9999/gpu_stats_la.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47d04b1a7214e28fdc072ead835b9ef8c8a96457ca3d2c1bef800cb9913659ad
3
+ size 6183
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_0/f_mu_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dba4c65b068964bb2772d17df2b66137c1c4bfd32377fc736fb71802a6b9243
3
+ size 9644
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_0/f_var_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767b941f563e1626220b7dc42df4546b6bb9a52ccabb5949093180e7364d5768
3
+ size 18033
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_0/prior_precision_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87fd3b6c08c90500b45b27e01c7f54fcb31712fe77452516decc18c13fdb6945
3
+ size 1379
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_1999/f_mu_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a9d9e6f9c7182bc959b3768e3fad8e18dd4bc2c4f1bb0162729ccb73cea348c
3
+ size 9644
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_1999/f_var_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b76a1c659fcba96143939625e13c161f548bd44a7f736e5fa681aff6a3fa5ef5
3
+ size 18033
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_1999/prior_precision_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec47195050a4dd67d1bc4d17f878a7d6f17716f3c262db3b10cf2f2d04cc3732
3
+ size 1379
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_3999/f_mu_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fdd8724a01057e633ae17e8e45e0a96a37f680a4232731ace50bfe146221472
3
+ size 9644
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_3999/f_var_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61491bca0e0d66cb63c5c22214eb26bf5c83ca8e6e7b99ba8a3b9de8a8426ce6
3
+ size 18033
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_3999/prior_precision_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:358971ce4c3814068c9cec0a3cca64fdb516893d7dba64f064a9d0e157ee3f0e
3
+ size 1379
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_5999/f_mu_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f8cc64a2d550bbf0686eab9677fe3b07bdce526b487bdaf6ce9e174f63f5d08
3
+ size 9644
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_5999/f_var_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0197405751841825e7ed66773ac1be8917ee3b596e8ebb632299956e1c96965c
3
+ size 18033
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_5999/prior_precision_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02da59b5bd8c7a90f5eed4b3ad00529ccec35bb20b76f38c7f39f926cf991512
3
+ size 1379
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_7999/f_mu_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd03dc8f5e74719536e8c492b1f1f7bd8f6f04f8bb2ec593fc764ec4457767ff
3
+ size 9644
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_7999/f_var_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0202a7c4076fced25ecca04c1d0247109cbeb472a373e42f58e4c4f1a3b58cc7
3
+ size 18033
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_7999/prior_precision_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c383fe4531db41c86ef0fe6245c18f0b09206840c487808dc342b35c07fdcfb
3
+ size 1379
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_9999/f_mu_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6fb3645b3053e55620cd79474e6aa8201b864083db449a2077ba6a6f40bd75d
3
+ size 9644
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_9999/f_var_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8f6e3659f6aca454781579ee37da6a9c508cb55854b13bc45c9ee3a3c4c0d72
3
+ size 18033
outputs_laplace/cola/roberta-base_adapterstrain_val_0.0001_65/step_9999/prior_precision_kron_all_homo_1000.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbfaee4ccef21d438256a6b91bce959ffda362c38ce035634657562f8fd19b49
3
+ size 1379