Upload LoRA model and probe head
Browse files- .gitattributes +4 -0
- qwen2_5_7b_lora_lambda_kl_0_05/README.md +202 -0
- qwen2_5_7b_lora_lambda_kl_0_05/adapter_config.json +222 -0
- qwen2_5_7b_lora_lambda_kl_0_05/adapter_model.safetensors +3 -0
- qwen2_5_7b_lora_lambda_kl_0_05/added_tokens.json +24 -0
- qwen2_5_7b_lora_lambda_kl_0_05/eval_metrics.jsonl +6 -0
- qwen2_5_7b_lora_lambda_kl_0_05/eval_metrics_qwen2_5_7b_lora_lambda_kl=0.5.json +132 -0
- qwen2_5_7b_lora_lambda_kl_0_05/llama3_1_8b_trivia_qa_test_roc_curves.png +3 -0
- qwen2_5_7b_lora_lambda_kl_0_05/merges.txt +0 -0
- qwen2_5_7b_lora_lambda_kl_0_05/probe_config.json +5 -0
- qwen2_5_7b_lora_lambda_kl_0_05/probe_head.bin +3 -0
- qwen2_5_7b_lora_lambda_kl_0_05/qwen_2_5_7b_longfact_augmented_test_roc_curves.png +3 -0
- qwen2_5_7b_lora_lambda_kl_0_05/qwen_2_5_7b_longfact_test_roc_curves.png +3 -0
- qwen2_5_7b_lora_lambda_kl_0_05/special_tokens_map.json +31 -0
- qwen2_5_7b_lora_lambda_kl_0_05/tokenizer.json +3 -0
- qwen2_5_7b_lora_lambda_kl_0_05/tokenizer_config.json +208 -0
- qwen2_5_7b_lora_lambda_kl_0_05/training_config.json +77 -0
- qwen2_5_7b_lora_lambda_kl_0_05/vocab.json +0 -0
.gitattributes
CHANGED
|
@@ -86,3 +86,7 @@ mistral_small_24b_linear/llama3_1_8b_validation_roc_curves.png filter=lfs diff=l
|
|
| 86 |
mistral_small_24b_linear/mistral_small_24b_longfact_augmented_test_roc_curves.png filter=lfs diff=lfs merge=lfs -text
|
| 87 |
mistral_small_24b_linear/mistral_small_24b_longfact_test_roc_curves.png filter=lfs diff=lfs merge=lfs -text
|
| 88 |
mistral_small_24b_linear/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
mistral_small_24b_linear/mistral_small_24b_longfact_augmented_test_roc_curves.png filter=lfs diff=lfs merge=lfs -text
|
| 87 |
mistral_small_24b_linear/mistral_small_24b_longfact_test_roc_curves.png filter=lfs diff=lfs merge=lfs -text
|
| 88 |
mistral_small_24b_linear/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 89 |
+
qwen2_5_7b_lora_lambda_kl_0_05/llama3_1_8b_trivia_qa_test_roc_curves.png filter=lfs diff=lfs merge=lfs -text
|
| 90 |
+
qwen2_5_7b_lora_lambda_kl_0_05/qwen_2_5_7b_longfact_augmented_test_roc_curves.png filter=lfs diff=lfs merge=lfs -text
|
| 91 |
+
qwen2_5_7b_lora_lambda_kl_0_05/qwen_2_5_7b_longfact_test_roc_curves.png filter=lfs diff=lfs merge=lfs -text
|
| 92 |
+
qwen2_5_7b_lora_lambda_kl_0_05/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
qwen2_5_7b_lora_lambda_kl_0_05/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: Qwen/Qwen2.5-7B-Instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.14.0
|
qwen2_5_7b_lora_lambda_kl_0_05/adapter_config.json
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": {
|
| 4 |
+
"base_model_class": "Qwen2ForCausalLM",
|
| 5 |
+
"parent_library": "transformers.models.qwen2.modeling_qwen2"
|
| 6 |
+
},
|
| 7 |
+
"base_model_name_or_path": "Qwen/Qwen2.5-7B-Instruct",
|
| 8 |
+
"bias": "none",
|
| 9 |
+
"eva_config": null,
|
| 10 |
+
"exclude_modules": null,
|
| 11 |
+
"fan_in_fan_out": false,
|
| 12 |
+
"inference_mode": true,
|
| 13 |
+
"init_lora_weights": true,
|
| 14 |
+
"layer_replication": null,
|
| 15 |
+
"layers_pattern": null,
|
| 16 |
+
"layers_to_transform": null,
|
| 17 |
+
"loftq_config": {},
|
| 18 |
+
"lora_alpha": 32,
|
| 19 |
+
"lora_bias": false,
|
| 20 |
+
"lora_dropout": 0.05,
|
| 21 |
+
"megatron_config": null,
|
| 22 |
+
"megatron_core": "megatron.core",
|
| 23 |
+
"modules_to_save": null,
|
| 24 |
+
"peft_type": "LORA",
|
| 25 |
+
"r": 16,
|
| 26 |
+
"rank_pattern": {},
|
| 27 |
+
"revision": null,
|
| 28 |
+
"target_modules": [
|
| 29 |
+
"model.layers.20.mlp.down_proj",
|
| 30 |
+
"model.layers.10.mlp.up_proj",
|
| 31 |
+
"model.layers.20.self_attn.o_proj",
|
| 32 |
+
"model.layers.10.mlp.down_proj",
|
| 33 |
+
"model.layers.3.self_attn.o_proj",
|
| 34 |
+
"model.layers.9.mlp.down_proj",
|
| 35 |
+
"model.layers.12.mlp.down_proj",
|
| 36 |
+
"model.layers.17.self_attn.k_proj",
|
| 37 |
+
"model.layers.4.mlp.up_proj",
|
| 38 |
+
"model.layers.17.mlp.gate_proj",
|
| 39 |
+
"model.layers.6.self_attn.o_proj",
|
| 40 |
+
"model.layers.8.self_attn.q_proj",
|
| 41 |
+
"model.layers.16.self_attn.k_proj",
|
| 42 |
+
"model.layers.17.self_attn.v_proj",
|
| 43 |
+
"model.layers.10.mlp.gate_proj",
|
| 44 |
+
"model.layers.26.mlp.up_proj",
|
| 45 |
+
"model.layers.10.self_attn.v_proj",
|
| 46 |
+
"model.layers.2.self_attn.o_proj",
|
| 47 |
+
"model.layers.9.mlp.up_proj",
|
| 48 |
+
"model.layers.2.self_attn.q_proj",
|
| 49 |
+
"model.layers.5.mlp.down_proj",
|
| 50 |
+
"model.layers.26.self_attn.v_proj",
|
| 51 |
+
"model.layers.25.self_attn.k_proj",
|
| 52 |
+
"model.layers.3.self_attn.k_proj",
|
| 53 |
+
"model.layers.2.self_attn.k_proj",
|
| 54 |
+
"model.layers.15.mlp.down_proj",
|
| 55 |
+
"model.layers.26.self_attn.k_proj",
|
| 56 |
+
"model.layers.1.self_attn.k_proj",
|
| 57 |
+
"model.layers.16.mlp.up_proj",
|
| 58 |
+
"model.layers.22.mlp.down_proj",
|
| 59 |
+
"model.layers.7.self_attn.q_proj",
|
| 60 |
+
"model.layers.18.self_attn.v_proj",
|
| 61 |
+
"model.layers.15.self_attn.k_proj",
|
| 62 |
+
"model.layers.9.mlp.gate_proj",
|
| 63 |
+
"model.layers.23.mlp.down_proj",
|
| 64 |
+
"model.layers.14.mlp.down_proj",
|
| 65 |
+
"model.layers.22.self_attn.v_proj",
|
| 66 |
+
"model.layers.4.self_attn.q_proj",
|
| 67 |
+
"model.layers.25.mlp.down_proj",
|
| 68 |
+
"model.layers.6.self_attn.q_proj",
|
| 69 |
+
"model.layers.6.self_attn.k_proj",
|
| 70 |
+
"model.layers.8.mlp.gate_proj",
|
| 71 |
+
"model.layers.9.self_attn.o_proj",
|
| 72 |
+
"model.layers.4.mlp.gate_proj",
|
| 73 |
+
"model.layers.18.mlp.up_proj",
|
| 74 |
+
"model.layers.2.mlp.down_proj",
|
| 75 |
+
"model.layers.1.self_attn.o_proj",
|
| 76 |
+
"model.layers.12.self_attn.q_proj",
|
| 77 |
+
"model.layers.2.mlp.up_proj",
|
| 78 |
+
"model.layers.17.self_attn.q_proj",
|
| 79 |
+
"model.layers.22.self_attn.o_proj",
|
| 80 |
+
"model.layers.18.self_attn.k_proj",
|
| 81 |
+
"model.layers.7.self_attn.o_proj",
|
| 82 |
+
"model.layers.23.mlp.up_proj",
|
| 83 |
+
"model.layers.13.self_attn.o_proj",
|
| 84 |
+
"model.layers.6.mlp.gate_proj",
|
| 85 |
+
"model.layers.13.self_attn.q_proj",
|
| 86 |
+
"model.layers.5.self_attn.k_proj",
|
| 87 |
+
"model.layers.15.self_attn.o_proj",
|
| 88 |
+
"model.layers.17.self_attn.o_proj",
|
| 89 |
+
"model.layers.8.self_attn.v_proj",
|
| 90 |
+
"model.layers.5.mlp.up_proj",
|
| 91 |
+
"model.layers.21.self_attn.k_proj",
|
| 92 |
+
"model.layers.11.self_attn.v_proj",
|
| 93 |
+
"model.layers.2.mlp.gate_proj",
|
| 94 |
+
"model.layers.15.self_attn.v_proj",
|
| 95 |
+
"model.layers.21.mlp.down_proj",
|
| 96 |
+
"model.layers.26.mlp.gate_proj",
|
| 97 |
+
"model.layers.25.mlp.gate_proj",
|
| 98 |
+
"model.layers.1.mlp.gate_proj",
|
| 99 |
+
"model.layers.3.mlp.down_proj",
|
| 100 |
+
"model.layers.22.self_attn.q_proj",
|
| 101 |
+
"model.layers.10.self_attn.o_proj",
|
| 102 |
+
"model.layers.3.self_attn.q_proj",
|
| 103 |
+
"model.layers.16.self_attn.o_proj",
|
| 104 |
+
"model.layers.20.self_attn.v_proj",
|
| 105 |
+
"model.layers.3.self_attn.v_proj",
|
| 106 |
+
"model.layers.18.self_attn.q_proj",
|
| 107 |
+
"model.layers.16.mlp.gate_proj",
|
| 108 |
+
"model.layers.4.self_attn.v_proj",
|
| 109 |
+
"model.layers.12.self_attn.o_proj",
|
| 110 |
+
"model.layers.17.mlp.up_proj",
|
| 111 |
+
"model.layers.12.mlp.up_proj",
|
| 112 |
+
"model.layers.4.self_attn.o_proj",
|
| 113 |
+
"model.layers.21.self_attn.q_proj",
|
| 114 |
+
"model.layers.23.mlp.gate_proj",
|
| 115 |
+
"model.layers.4.self_attn.k_proj",
|
| 116 |
+
"model.layers.15.mlp.up_proj",
|
| 117 |
+
"model.layers.1.self_attn.v_proj",
|
| 118 |
+
"model.layers.6.mlp.up_proj",
|
| 119 |
+
"model.layers.16.mlp.down_proj",
|
| 120 |
+
"model.layers.13.mlp.down_proj",
|
| 121 |
+
"model.layers.18.mlp.gate_proj",
|
| 122 |
+
"model.layers.24.mlp.gate_proj",
|
| 123 |
+
"model.layers.19.self_attn.o_proj",
|
| 124 |
+
"model.layers.7.self_attn.v_proj",
|
| 125 |
+
"model.layers.6.mlp.down_proj",
|
| 126 |
+
"model.layers.16.self_attn.q_proj",
|
| 127 |
+
"model.layers.25.self_attn.q_proj",
|
| 128 |
+
"model.layers.23.self_attn.q_proj",
|
| 129 |
+
"model.layers.20.self_attn.q_proj",
|
| 130 |
+
"model.layers.26.self_attn.q_proj",
|
| 131 |
+
"model.layers.25.self_attn.o_proj",
|
| 132 |
+
"model.layers.18.self_attn.o_proj",
|
| 133 |
+
"model.layers.0.self_attn.v_proj",
|
| 134 |
+
"model.layers.22.self_attn.k_proj",
|
| 135 |
+
"model.layers.7.mlp.up_proj",
|
| 136 |
+
"model.layers.19.self_attn.k_proj",
|
| 137 |
+
"model.layers.0.mlp.gate_proj",
|
| 138 |
+
"model.layers.21.self_attn.o_proj",
|
| 139 |
+
"model.layers.1.self_attn.q_proj",
|
| 140 |
+
"model.layers.13.self_attn.k_proj",
|
| 141 |
+
"model.layers.6.self_attn.v_proj",
|
| 142 |
+
"model.layers.3.mlp.up_proj",
|
| 143 |
+
"model.layers.13.mlp.gate_proj",
|
| 144 |
+
"model.layers.15.self_attn.q_proj",
|
| 145 |
+
"model.layers.22.mlp.gate_proj",
|
| 146 |
+
"model.layers.21.self_attn.v_proj",
|
| 147 |
+
"model.layers.21.mlp.up_proj",
|
| 148 |
+
"model.layers.20.mlp.up_proj",
|
| 149 |
+
"model.layers.7.self_attn.k_proj",
|
| 150 |
+
"model.layers.22.mlp.up_proj",
|
| 151 |
+
"model.layers.23.self_attn.v_proj",
|
| 152 |
+
"model.layers.24.mlp.down_proj",
|
| 153 |
+
"model.layers.24.self_attn.v_proj",
|
| 154 |
+
"model.layers.13.mlp.up_proj",
|
| 155 |
+
"model.layers.19.mlp.up_proj",
|
| 156 |
+
"model.layers.7.mlp.gate_proj",
|
| 157 |
+
"model.layers.0.self_attn.o_proj",
|
| 158 |
+
"model.layers.20.self_attn.k_proj",
|
| 159 |
+
"model.layers.24.mlp.up_proj",
|
| 160 |
+
"model.layers.0.mlp.down_proj",
|
| 161 |
+
"model.layers.8.self_attn.k_proj",
|
| 162 |
+
"model.layers.5.mlp.gate_proj",
|
| 163 |
+
"model.layers.23.self_attn.k_proj",
|
| 164 |
+
"model.layers.2.self_attn.v_proj",
|
| 165 |
+
"model.layers.12.mlp.gate_proj",
|
| 166 |
+
"model.layers.17.mlp.down_proj",
|
| 167 |
+
"model.layers.5.self_attn.v_proj",
|
| 168 |
+
"model.layers.25.self_attn.v_proj",
|
| 169 |
+
"model.layers.16.self_attn.v_proj",
|
| 170 |
+
"model.layers.5.self_attn.q_proj",
|
| 171 |
+
"model.layers.14.self_attn.k_proj",
|
| 172 |
+
"model.layers.19.mlp.down_proj",
|
| 173 |
+
"model.layers.15.mlp.gate_proj",
|
| 174 |
+
"model.layers.14.mlp.up_proj",
|
| 175 |
+
"model.layers.19.mlp.gate_proj",
|
| 176 |
+
"model.layers.21.mlp.gate_proj",
|
| 177 |
+
"model.layers.0.self_attn.q_proj",
|
| 178 |
+
"model.layers.12.self_attn.k_proj",
|
| 179 |
+
"model.layers.14.self_attn.q_proj",
|
| 180 |
+
"model.layers.18.mlp.down_proj",
|
| 181 |
+
"model.layers.5.self_attn.o_proj",
|
| 182 |
+
"model.layers.11.mlp.gate_proj",
|
| 183 |
+
"model.layers.14.self_attn.o_proj",
|
| 184 |
+
"model.layers.1.mlp.up_proj",
|
| 185 |
+
"model.layers.19.self_attn.v_proj",
|
| 186 |
+
"model.layers.9.self_attn.v_proj",
|
| 187 |
+
"model.layers.10.self_attn.k_proj",
|
| 188 |
+
"model.layers.24.self_attn.k_proj",
|
| 189 |
+
"model.layers.10.self_attn.q_proj",
|
| 190 |
+
"model.layers.11.mlp.up_proj",
|
| 191 |
+
"model.layers.8.mlp.up_proj",
|
| 192 |
+
"model.layers.0.mlp.up_proj",
|
| 193 |
+
"model.layers.24.self_attn.o_proj",
|
| 194 |
+
"model.layers.11.self_attn.q_proj",
|
| 195 |
+
"model.layers.9.self_attn.q_proj",
|
| 196 |
+
"model.layers.26.self_attn.o_proj",
|
| 197 |
+
"model.layers.8.mlp.down_proj",
|
| 198 |
+
"model.layers.11.self_attn.o_proj",
|
| 199 |
+
"model.layers.11.self_attn.k_proj",
|
| 200 |
+
"model.layers.0.self_attn.k_proj",
|
| 201 |
+
"model.layers.20.mlp.gate_proj",
|
| 202 |
+
"model.layers.26.mlp.down_proj",
|
| 203 |
+
"model.layers.7.mlp.down_proj",
|
| 204 |
+
"model.layers.12.self_attn.v_proj",
|
| 205 |
+
"model.layers.14.self_attn.v_proj",
|
| 206 |
+
"model.layers.19.self_attn.q_proj",
|
| 207 |
+
"model.layers.8.self_attn.o_proj",
|
| 208 |
+
"model.layers.1.mlp.down_proj",
|
| 209 |
+
"model.layers.3.mlp.gate_proj",
|
| 210 |
+
"model.layers.9.self_attn.k_proj",
|
| 211 |
+
"model.layers.11.mlp.down_proj",
|
| 212 |
+
"model.layers.23.self_attn.o_proj",
|
| 213 |
+
"model.layers.25.mlp.up_proj",
|
| 214 |
+
"model.layers.13.self_attn.v_proj",
|
| 215 |
+
"model.layers.4.mlp.down_proj",
|
| 216 |
+
"model.layers.24.self_attn.q_proj",
|
| 217 |
+
"model.layers.14.mlp.gate_proj"
|
| 218 |
+
],
|
| 219 |
+
"task_type": null,
|
| 220 |
+
"use_dora": false,
|
| 221 |
+
"use_rslora": false
|
| 222 |
+
}
|
qwen2_5_7b_lora_lambda_kl_0_05/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a60796c8d5ac06a6f3ee18442154d8dd43fca202df91763b6519e4f4a96dbe27
|
| 3 |
+
size 155764128
|
qwen2_5_7b_lora_lambda_kl_0_05/added_tokens.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</tool_call>": 151658,
|
| 3 |
+
"<tool_call>": 151657,
|
| 4 |
+
"<|box_end|>": 151649,
|
| 5 |
+
"<|box_start|>": 151648,
|
| 6 |
+
"<|endoftext|>": 151643,
|
| 7 |
+
"<|file_sep|>": 151664,
|
| 8 |
+
"<|fim_middle|>": 151660,
|
| 9 |
+
"<|fim_pad|>": 151662,
|
| 10 |
+
"<|fim_prefix|>": 151659,
|
| 11 |
+
"<|fim_suffix|>": 151661,
|
| 12 |
+
"<|im_end|>": 151645,
|
| 13 |
+
"<|im_start|>": 151644,
|
| 14 |
+
"<|image_pad|>": 151655,
|
| 15 |
+
"<|object_ref_end|>": 151647,
|
| 16 |
+
"<|object_ref_start|>": 151646,
|
| 17 |
+
"<|quad_end|>": 151651,
|
| 18 |
+
"<|quad_start|>": 151650,
|
| 19 |
+
"<|repo_name|>": 151663,
|
| 20 |
+
"<|video_pad|>": 151656,
|
| 21 |
+
"<|vision_end|>": 151653,
|
| 22 |
+
"<|vision_pad|>": 151654,
|
| 23 |
+
"<|vision_start|>": 151652
|
| 24 |
+
}
|
qwen2_5_7b_lora_lambda_kl_0_05/eval_metrics.jsonl
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"qwen_2_5_7b_longfact_test/lm_loss": 0.3498629196631981, "qwen_2_5_7b_longfact_test/probe_loss": 0.944612454062868, "qwen_2_5_7b_longfact_test/sparsity": 0.09246595673127059, "qwen_2_5_7b_longfact_test/probe_threshold": 0.5, "qwen_2_5_7b_longfact_test/all_tokens_accuracy": 0.9391203196301536, "qwen_2_5_7b_longfact_test/all_tokens_precision": 0.35071555131525145, "qwen_2_5_7b_longfact_test/all_tokens_recall": 0.33921273959239634, "qwen_2_5_7b_longfact_test/all_tokens_f1": 0.3448682552872115, "qwen_2_5_7b_longfact_test/all_tokens_auc": 0.8749180157651084, "qwen_2_5_7b_longfact_test/all_tokens_optimal_threshold": 0.9415640782828283, "qwen_2_5_7b_longfact_test/all_tokens_threshold_optimized_accuracy": 0.9528612777235319, "qwen_2_5_7b_longfact_test/all_tokens_recall_at_0.1_fpr": 0.6260644889134963, "qwen_2_5_7b_longfact_test/all_tokens_true_positive_count": 37929, "qwen_2_5_7b_longfact_test/all_tokens_true_negative_count": 764999, "qwen_2_5_7b_longfact_test/all_tokens_pred_positive_count": 36685, "qwen_2_5_7b_longfact_test/all_tokens_pred_negative_count": 766243, "qwen_2_5_7b_longfact_test/all_tokens_total_samples": 802928, "qwen_2_5_7b_longfact_test/entity_tokens_accuracy": 0.6994360881753034, "qwen_2_5_7b_longfact_test/entity_tokens_precision": 0.8376847451005924, "qwen_2_5_7b_longfact_test/entity_tokens_recall": 0.33921273959239634, "qwen_2_5_7b_longfact_test/entity_tokens_f1": 0.48288545263473953, "qwen_2_5_7b_longfact_test/entity_tokens_auc": 0.8200465888374614, "qwen_2_5_7b_longfact_test/entity_tokens_optimal_threshold": 0.16702178030303028, "qwen_2_5_7b_longfact_test/entity_tokens_threshold_optimized_accuracy": 0.7524350737884622, "qwen_2_5_7b_longfact_test/entity_tokens_recall_at_0.1_fpr": 0.5104273774684278, "qwen_2_5_7b_longfact_test/entity_tokens_true_positive_count": 37929, "qwen_2_5_7b_longfact_test/entity_tokens_true_negative_count": 53752, "qwen_2_5_7b_longfact_test/entity_tokens_pred_positive_count": 15359, "qwen_2_5_7b_longfact_test/entity_tokens_pred_negative_count": 76322, "qwen_2_5_7b_longfact_test/entity_tokens_total_samples": 91681, "qwen_2_5_7b_longfact_test/entity_span_max_accuracy": 0.7995004856389621, "qwen_2_5_7b_longfact_test/entity_span_max_precision": 0.7547085201793722, "qwen_2_5_7b_longfact_test/entity_span_max_recall": 0.6520728399845022, "qwen_2_5_7b_longfact_test/entity_span_max_f1": 0.6996466431095406, "qwen_2_5_7b_longfact_test/entity_span_max_auc": 0.8738263111336242, "qwen_2_5_7b_longfact_test/entity_span_max_optimal_threshold": 0.42876025883838387, "qwen_2_5_7b_longfact_test/entity_span_max_threshold_optimized_accuracy": 0.80158179547662, "qwen_2_5_7b_longfact_test/entity_span_max_recall_at_0.1_fpr": 0.604804339403332, "qwen_2_5_7b_longfact_test/entity_span_max_true_positive_count": 5162, "qwen_2_5_7b_longfact_test/entity_span_max_true_negative_count": 9252, "qwen_2_5_7b_longfact_test/entity_span_max_pred_positive_count": 4460, "qwen_2_5_7b_longfact_test/entity_span_max_pred_negative_count": 9954, "qwen_2_5_7b_longfact_test/entity_span_max_total_samples": 14414, "epoch": 1.0, "global_step": 241, "training_progress": 1.0, "dataset_id": "qwen_2_5_7b_longfact_test"}
|
| 2 |
+
{"qwen_2_5_7b_longfact_augmented_test/lm_loss": 0.32481150685728966, "qwen_2_5_7b_longfact_augmented_test/probe_loss": 1.147995874615435, "qwen_2_5_7b_longfact_augmented_test/sparsity": 0.11234531300662642, "qwen_2_5_7b_longfact_augmented_test/probe_threshold": 0.5, "qwen_2_5_7b_longfact_augmented_test/all_tokens_accuracy": 0.9210031726846956, "qwen_2_5_7b_longfact_augmented_test/all_tokens_precision": 0.3536936360729133, "qwen_2_5_7b_longfact_augmented_test/all_tokens_recall": 0.39221845078271445, "qwen_2_5_7b_longfact_augmented_test/all_tokens_f1": 0.3719611799750168, "qwen_2_5_7b_longfact_augmented_test/all_tokens_auc": 0.8674433076262933, "qwen_2_5_7b_longfact_augmented_test/all_tokens_optimal_threshold": 1.0, "qwen_2_5_7b_longfact_augmented_test/all_tokens_threshold_optimized_accuracy": 0.940355038525457, "qwen_2_5_7b_longfact_augmented_test/all_tokens_recall_at_0.1_fpr": 0.5928111859769999, "qwen_2_5_7b_longfact_augmented_test/all_tokens_true_positive_count": 39478, "qwen_2_5_7b_longfact_augmented_test/all_tokens_true_negative_count": 622422, "qwen_2_5_7b_longfact_augmented_test/all_tokens_pred_positive_count": 43778, "qwen_2_5_7b_longfact_augmented_test/all_tokens_pred_negative_count": 618122, "qwen_2_5_7b_longfact_augmented_test/all_tokens_total_samples": 661900, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_accuracy": 0.6830000245984306, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_precision": 0.8968952734012975, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_recall": 0.39221845078271445, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_f1": 0.5457685664939551, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_auc": 0.8364956916506707, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_optimal_threshold": 0.14692826704545453, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_threshold_optimized_accuracy": 0.7554547019900131, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_recall_at_0.1_fpr": 0.5543593900400223, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_true_positive_count": 39478, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_true_negative_count": 41828, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_pred_positive_count": 17264, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_pred_negative_count": 64042, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_total_samples": 81306, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_accuracy": 0.7992657273268505, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_precision": 0.784524449220849, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_recall": 0.7051436851002173, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_f1": 0.7427190639704947, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_auc": 0.8818148801162915, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_optimal_threshold": 0.42960858585858597, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_threshold_optimized_accuracy": 0.8006548918436198, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_recall_at_0.1_fpr": 0.6314899782661193, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_true_positive_count": 4141, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_true_negative_count": 5937, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_pred_positive_count": 3722, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_pred_negative_count": 6356, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_total_samples": 10078, "epoch": 1.0, "global_step": 241, "training_progress": 1.0, "dataset_id": "qwen_2_5_7b_longfact_augmented_test"}
|
| 3 |
+
{"llama3_1_8b_trivia_qa_test/lm_loss": NaN, "llama3_1_8b_trivia_qa_test/probe_loss": 0.9877010567864943, "llama3_1_8b_trivia_qa_test/sparsity": 0.19224896117420875, "llama3_1_8b_trivia_qa_test/probe_threshold": 0.5, "llama3_1_8b_trivia_qa_test/all_tokens_accuracy": 0.8708254051571419, "llama3_1_8b_trivia_qa_test/all_tokens_precision": 0.17068645640074212, "llama3_1_8b_trivia_qa_test/all_tokens_recall": 0.36687116564417177, "llama3_1_8b_trivia_qa_test/all_tokens_f1": 0.23297944871919743, "llama3_1_8b_trivia_qa_test/all_tokens_auc": 0.7602149036036673, "llama3_1_8b_trivia_qa_test/all_tokens_optimal_threshold": 1.0, "llama3_1_8b_trivia_qa_test/all_tokens_threshold_optimized_accuracy": 0.9465094153926907, "llama3_1_8b_trivia_qa_test/all_tokens_recall_at_0.1_fpr": 0.36441717791411044, "llama3_1_8b_trivia_qa_test/all_tokens_true_positive_count": 3260, "llama3_1_8b_trivia_qa_test/all_tokens_true_negative_count": 57704, "llama3_1_8b_trivia_qa_test/all_tokens_pred_positive_count": 7007, "llama3_1_8b_trivia_qa_test/all_tokens_pred_negative_count": 53957, "llama3_1_8b_trivia_qa_test/all_tokens_total_samples": 60964, "llama3_1_8b_trivia_qa_test/entity_tokens_accuracy": 0.6302420729628367, "llama3_1_8b_trivia_qa_test/entity_tokens_precision": 0.9192928516525749, "llama3_1_8b_trivia_qa_test/entity_tokens_recall": 0.36687116564417177, "llama3_1_8b_trivia_qa_test/entity_tokens_f1": 0.524446393334795, "llama3_1_8b_trivia_qa_test/entity_tokens_auc": 0.8037175889523468, "llama3_1_8b_trivia_qa_test/entity_tokens_optimal_threshold": 0.11838600852272729, "llama3_1_8b_trivia_qa_test/entity_tokens_threshold_optimized_accuracy": 0.7337197408796454, "llama3_1_8b_trivia_qa_test/entity_tokens_recall_at_0.1_fpr": 0.5104294478527608, "llama3_1_8b_trivia_qa_test/entity_tokens_true_positive_count": 3260, "llama3_1_8b_trivia_qa_test/entity_tokens_true_negative_count": 2606, "llama3_1_8b_trivia_qa_test/entity_tokens_pred_positive_count": 1301, "llama3_1_8b_trivia_qa_test/entity_tokens_pred_negative_count": 4565, "llama3_1_8b_trivia_qa_test/entity_tokens_total_samples": 5866, "llama3_1_8b_trivia_qa_test/entity_span_max_accuracy": 0.8203321590337191, "llama3_1_8b_trivia_qa_test/entity_span_max_precision": 0.8865853658536585, "llama3_1_8b_trivia_qa_test/entity_span_max_recall": 0.7336024217961655, "llama3_1_8b_trivia_qa_test/entity_span_max_f1": 0.8028713418001104, "llama3_1_8b_trivia_qa_test/entity_span_max_auc": 0.9113396066607499, "llama3_1_8b_trivia_qa_test/entity_span_max_optimal_threshold": 0.3879616477272727, "llama3_1_8b_trivia_qa_test/entity_span_max_threshold_optimized_accuracy": 0.8424760946149975, "llama3_1_8b_trivia_qa_test/entity_span_max_recall_at_0.1_fpr": 0.7467204843592331, "llama3_1_8b_trivia_qa_test/entity_span_max_true_positive_count": 991, "llama3_1_8b_trivia_qa_test/entity_span_max_true_negative_count": 996, "llama3_1_8b_trivia_qa_test/entity_span_max_pred_positive_count": 820, "llama3_1_8b_trivia_qa_test/entity_span_max_pred_negative_count": 1167, "llama3_1_8b_trivia_qa_test/entity_span_max_total_samples": 1987, "epoch": 1.0, "global_step": 241, "training_progress": 1.0, "dataset_id": "llama3_1_8b_trivia_qa_test"}
|
| 4 |
+
{"qwen_2_5_7b_longfact_test/lm_loss": 0.3403156998186459, "qwen_2_5_7b_longfact_test/probe_loss": 0.9048037807860596, "qwen_2_5_7b_longfact_test/sparsity": 0.0762635828693386, "qwen_2_5_7b_longfact_test/probe_threshold": 0.5, "qwen_2_5_7b_longfact_test/all_tokens_accuracy": 0.9440310961879521, "qwen_2_5_7b_longfact_test/all_tokens_precision": 0.3892365061307041, "qwen_2_5_7b_longfact_test/all_tokens_recall": 0.324738326873896, "qwen_2_5_7b_longfact_test/all_tokens_f1": 0.3540741379558162, "qwen_2_5_7b_longfact_test/all_tokens_auc": 0.8909883076961829, "qwen_2_5_7b_longfact_test/all_tokens_optimal_threshold": 0.8556265782828287, "qwen_2_5_7b_longfact_test/all_tokens_threshold_optimized_accuracy": 0.9531788653528087, "qwen_2_5_7b_longfact_test/all_tokens_recall_at_0.1_fpr": 0.6661130006063961, "qwen_2_5_7b_longfact_test/all_tokens_true_positive_count": 37929, "qwen_2_5_7b_longfact_test/all_tokens_true_negative_count": 764999, "qwen_2_5_7b_longfact_test/all_tokens_pred_positive_count": 31644, "qwen_2_5_7b_longfact_test/all_tokens_pred_negative_count": 771284, "qwen_2_5_7b_longfact_test/all_tokens_total_samples": 802928, "qwen_2_5_7b_longfact_test/entity_tokens_accuracy": 0.6977454434397531, "qwen_2_5_7b_longfact_test/entity_tokens_precision": 0.8543978912319645, "qwen_2_5_7b_longfact_test/entity_tokens_recall": 0.324738326873896, "qwen_2_5_7b_longfact_test/entity_tokens_f1": 0.47060846308147863, "qwen_2_5_7b_longfact_test/entity_tokens_auc": 0.8382365663877721, "qwen_2_5_7b_longfact_test/entity_tokens_optimal_threshold": 0.13589015151515155, "qwen_2_5_7b_longfact_test/entity_tokens_threshold_optimized_accuracy": 0.7659166021313031, "qwen_2_5_7b_longfact_test/entity_tokens_recall_at_0.1_fpr": 0.5349468744232645, "qwen_2_5_7b_longfact_test/entity_tokens_true_positive_count": 37929, "qwen_2_5_7b_longfact_test/entity_tokens_true_negative_count": 53752, "qwen_2_5_7b_longfact_test/entity_tokens_pred_positive_count": 14416, "qwen_2_5_7b_longfact_test/entity_tokens_pred_negative_count": 77265, "qwen_2_5_7b_longfact_test/entity_tokens_total_samples": 91681, "qwen_2_5_7b_longfact_test/entity_span_max_accuracy": 0.8037324823088664, "qwen_2_5_7b_longfact_test/entity_span_max_precision": 0.7878114976560573, "qwen_2_5_7b_longfact_test/entity_span_max_recall": 0.6185586981790003, "qwen_2_5_7b_longfact_test/entity_span_max_f1": 0.6930005425935974, "qwen_2_5_7b_longfact_test/entity_span_max_auc": 0.8837167661414778, "qwen_2_5_7b_longfact_test/entity_span_max_optimal_threshold": 0.437204071969697, "qwen_2_5_7b_longfact_test/entity_span_max_threshold_optimized_accuracy": 0.8098376578326627, "qwen_2_5_7b_longfact_test/entity_span_max_recall_at_0.1_fpr": 0.6375435877566834, "qwen_2_5_7b_longfact_test/entity_span_max_true_positive_count": 5162, "qwen_2_5_7b_longfact_test/entity_span_max_true_negative_count": 9252, "qwen_2_5_7b_longfact_test/entity_span_max_pred_positive_count": 4053, "qwen_2_5_7b_longfact_test/entity_span_max_pred_negative_count": 10361, "qwen_2_5_7b_longfact_test/entity_span_max_total_samples": 14414, "epoch": 1.0, "global_step": 964, "training_progress": 1.0, "dataset_id": "qwen_2_5_7b_longfact_test"}
|
| 5 |
+
{"qwen_2_5_7b_longfact_augmented_test/lm_loss": 0.31472883214076014, "qwen_2_5_7b_longfact_augmented_test/probe_loss": 0.9969771905236889, "qwen_2_5_7b_longfact_augmented_test/sparsity": 0.09702653702202062, "qwen_2_5_7b_longfact_augmented_test/probe_threshold": 0.5, "qwen_2_5_7b_longfact_augmented_test/all_tokens_accuracy": 0.9277186886236591, "qwen_2_5_7b_longfact_augmented_test/all_tokens_precision": 0.3952882858072754, "qwen_2_5_7b_longfact_augmented_test/all_tokens_recall": 0.3999442727595116, "qwen_2_5_7b_longfact_augmented_test/all_tokens_f1": 0.39760264917339244, "qwen_2_5_7b_longfact_augmented_test/all_tokens_auc": 0.8851051986364417, "qwen_2_5_7b_longfact_augmented_test/all_tokens_optimal_threshold": 0.8589409722222228, "qwen_2_5_7b_longfact_augmented_test/all_tokens_threshold_optimized_accuracy": 0.9409397189907841, "qwen_2_5_7b_longfact_augmented_test/all_tokens_recall_at_0.1_fpr": 0.6396727291149501, "qwen_2_5_7b_longfact_augmented_test/all_tokens_true_positive_count": 39478, "qwen_2_5_7b_longfact_augmented_test/all_tokens_true_negative_count": 622422, "qwen_2_5_7b_longfact_augmented_test/all_tokens_pred_positive_count": 39943, "qwen_2_5_7b_longfact_augmented_test/all_tokens_pred_negative_count": 621957, "qwen_2_5_7b_longfact_augmented_test/all_tokens_total_samples": 661900, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_accuracy": 0.6896416008658648, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_precision": 0.9108688127379716, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_recall": 0.3999442727595116, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_f1": 0.5558332746602831, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_auc": 0.8556854617116961, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_optimal_threshold": 0.12335759943181823, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_threshold_optimized_accuracy": 0.7739527218163481, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_recall_at_0.1_fpr": 0.5913420132732154, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_true_positive_count": 39478, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_true_negative_count": 41828, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_pred_positive_count": 17334, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_pred_negative_count": 63972, "qwen_2_5_7b_longfact_augmented_test/entity_tokens_total_samples": 81306, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_accuracy": 0.8000595356221473, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_precision": 0.8051090700344432, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_recall": 0.6773726153103116, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_f1": 0.7357377049180328, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_auc": 0.8888790523144552, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_optimal_threshold": 0.4113794191919192, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_threshold_optimized_accuracy": 0.8064100019845207, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_recall_at_0.1_fpr": 0.644771794252596, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_true_positive_count": 4141, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_true_negative_count": 5937, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_pred_positive_count": 3484, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_pred_negative_count": 6594, "qwen_2_5_7b_longfact_augmented_test/entity_span_max_total_samples": 10078, "epoch": 1.0, "global_step": 964, "training_progress": 1.0, "dataset_id": "qwen_2_5_7b_longfact_augmented_test"}
|
| 6 |
+
{"llama3_1_8b_trivia_qa_test/lm_loss": 1.2490080514870792, "llama3_1_8b_trivia_qa_test/probe_loss": 0.888429161903274, "llama3_1_8b_trivia_qa_test/sparsity": 0.19553201752473634, "llama3_1_8b_trivia_qa_test/probe_threshold": 0.5, "llama3_1_8b_trivia_qa_test/all_tokens_accuracy": 0.8640181090479627, "llama3_1_8b_trivia_qa_test/all_tokens_precision": 0.19581519109820997, "llama3_1_8b_trivia_qa_test/all_tokens_recall": 0.49662576687116566, "llama3_1_8b_trivia_qa_test/all_tokens_f1": 0.28088133240804997, "llama3_1_8b_trivia_qa_test/all_tokens_auc": 0.8182136579829025, "llama3_1_8b_trivia_qa_test/all_tokens_optimal_threshold": 0.9733664772727275, "llama3_1_8b_trivia_qa_test/all_tokens_threshold_optimized_accuracy": 0.9466734466242372, "llama3_1_8b_trivia_qa_test/all_tokens_recall_at_0.1_fpr": 0.4634969325153374, "llama3_1_8b_trivia_qa_test/all_tokens_true_positive_count": 3260, "llama3_1_8b_trivia_qa_test/all_tokens_true_negative_count": 57704, "llama3_1_8b_trivia_qa_test/all_tokens_pred_positive_count": 8268, "llama3_1_8b_trivia_qa_test/all_tokens_pred_negative_count": 52696, "llama3_1_8b_trivia_qa_test/all_tokens_total_samples": 60964, "llama3_1_8b_trivia_qa_test/entity_tokens_accuracy": 0.6924650528469144, "llama3_1_8b_trivia_qa_test/entity_tokens_precision": 0.9085297418630752, "llama3_1_8b_trivia_qa_test/entity_tokens_recall": 0.49662576687116566, "llama3_1_8b_trivia_qa_test/entity_tokens_f1": 0.6422054740182467, "llama3_1_8b_trivia_qa_test/entity_tokens_auc": 0.8600766753457099, "llama3_1_8b_trivia_qa_test/entity_tokens_optimal_threshold": 0.12021583017676771, "llama3_1_8b_trivia_qa_test/entity_tokens_threshold_optimized_accuracy": 0.7877599727241732, "llama3_1_8b_trivia_qa_test/entity_tokens_recall_at_0.1_fpr": 0.6058282208588958, "llama3_1_8b_trivia_qa_test/entity_tokens_true_positive_count": 3260, "llama3_1_8b_trivia_qa_test/entity_tokens_true_negative_count": 2606, "llama3_1_8b_trivia_qa_test/entity_tokens_pred_positive_count": 1782, "llama3_1_8b_trivia_qa_test/entity_tokens_pred_negative_count": 4084, "llama3_1_8b_trivia_qa_test/entity_tokens_total_samples": 5866, "llama3_1_8b_trivia_qa_test/entity_span_max_accuracy": 0.8570709612481128, "llama3_1_8b_trivia_qa_test/entity_span_max_precision": 0.8701570680628272, "llama3_1_8b_trivia_qa_test/entity_span_max_recall": 0.8385469223007064, "llama3_1_8b_trivia_qa_test/entity_span_max_f1": 0.8540596094552929, "llama3_1_8b_trivia_qa_test/entity_span_max_auc": 0.9361077002257263, "llama3_1_8b_trivia_qa_test/entity_span_max_optimal_threshold": 0.5555555555555558, "llama3_1_8b_trivia_qa_test/entity_span_max_threshold_optimized_accuracy": 0.8580775037745345, "llama3_1_8b_trivia_qa_test/entity_span_max_recall_at_0.1_fpr": 0.8173562058526741, "llama3_1_8b_trivia_qa_test/entity_span_max_true_positive_count": 991, "llama3_1_8b_trivia_qa_test/entity_span_max_true_negative_count": 996, "llama3_1_8b_trivia_qa_test/entity_span_max_pred_positive_count": 955, "llama3_1_8b_trivia_qa_test/entity_span_max_pred_negative_count": 1032, "llama3_1_8b_trivia_qa_test/entity_span_max_total_samples": 1987, "epoch": 1.0, "global_step": 964, "training_progress": 1.0, "dataset_id": "llama3_1_8b_trivia_qa_test"}
|
qwen2_5_7b_lora_lambda_kl_0_05/eval_metrics_qwen2_5_7b_lora_lambda_kl=0.5.json
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"qwen_2_5_7b_longfact_test/lm_loss": 0.3403156998186459,
|
| 3 |
+
"qwen_2_5_7b_longfact_test/probe_loss": 0.9048037807860596,
|
| 4 |
+
"qwen_2_5_7b_longfact_test/sparsity": 0.0762635828693386,
|
| 5 |
+
"qwen_2_5_7b_longfact_test/probe_threshold": 0.5,
|
| 6 |
+
"qwen_2_5_7b_longfact_test/all_tokens_accuracy": 0.9440310961879521,
|
| 7 |
+
"qwen_2_5_7b_longfact_test/all_tokens_precision": 0.3892365061307041,
|
| 8 |
+
"qwen_2_5_7b_longfact_test/all_tokens_recall": 0.324738326873896,
|
| 9 |
+
"qwen_2_5_7b_longfact_test/all_tokens_f1": 0.3540741379558162,
|
| 10 |
+
"qwen_2_5_7b_longfact_test/all_tokens_auc": 0.8909883076961829,
|
| 11 |
+
"qwen_2_5_7b_longfact_test/all_tokens_optimal_threshold": 0.8556265782828287,
|
| 12 |
+
"qwen_2_5_7b_longfact_test/all_tokens_threshold_optimized_accuracy": 0.9531788653528087,
|
| 13 |
+
"qwen_2_5_7b_longfact_test/all_tokens_recall_at_0.1_fpr": 0.6661130006063961,
|
| 14 |
+
"qwen_2_5_7b_longfact_test/all_tokens_true_positive_count": 37929,
|
| 15 |
+
"qwen_2_5_7b_longfact_test/all_tokens_true_negative_count": 764999,
|
| 16 |
+
"qwen_2_5_7b_longfact_test/all_tokens_pred_positive_count": 31644,
|
| 17 |
+
"qwen_2_5_7b_longfact_test/all_tokens_pred_negative_count": 771284,
|
| 18 |
+
"qwen_2_5_7b_longfact_test/all_tokens_total_samples": 802928,
|
| 19 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_accuracy": 0.6977454434397531,
|
| 20 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_precision": 0.8543978912319645,
|
| 21 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_recall": 0.324738326873896,
|
| 22 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_f1": 0.47060846308147863,
|
| 23 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_auc": 0.8382365663877721,
|
| 24 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_optimal_threshold": 0.13589015151515155,
|
| 25 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_threshold_optimized_accuracy": 0.7659166021313031,
|
| 26 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_recall_at_0.1_fpr": 0.5349468744232645,
|
| 27 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_true_positive_count": 37929,
|
| 28 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_true_negative_count": 53752,
|
| 29 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_pred_positive_count": 14416,
|
| 30 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_pred_negative_count": 77265,
|
| 31 |
+
"qwen_2_5_7b_longfact_test/entity_tokens_total_samples": 91681,
|
| 32 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_accuracy": 0.8037324823088664,
|
| 33 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_precision": 0.7878114976560573,
|
| 34 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_recall": 0.6185586981790003,
|
| 35 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_f1": 0.6930005425935974,
|
| 36 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_auc": 0.8837167661414778,
|
| 37 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_optimal_threshold": 0.437204071969697,
|
| 38 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_threshold_optimized_accuracy": 0.8098376578326627,
|
| 39 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_recall_at_0.1_fpr": 0.6375435877566834,
|
| 40 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_true_positive_count": 5162,
|
| 41 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_true_negative_count": 9252,
|
| 42 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_pred_positive_count": 4053,
|
| 43 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_pred_negative_count": 10361,
|
| 44 |
+
"qwen_2_5_7b_longfact_test/entity_span_max_total_samples": 14414,
|
| 45 |
+
"epoch": 1.0,
|
| 46 |
+
"qwen_2_5_7b_longfact_augmented_test/lm_loss": 0.31472883214076014,
|
| 47 |
+
"qwen_2_5_7b_longfact_augmented_test/probe_loss": 0.9969771905236889,
|
| 48 |
+
"qwen_2_5_7b_longfact_augmented_test/sparsity": 0.09702653702202062,
|
| 49 |
+
"qwen_2_5_7b_longfact_augmented_test/probe_threshold": 0.5,
|
| 50 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_accuracy": 0.9277186886236591,
|
| 51 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_precision": 0.3952882858072754,
|
| 52 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_recall": 0.3999442727595116,
|
| 53 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_f1": 0.39760264917339244,
|
| 54 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_auc": 0.8851051986364417,
|
| 55 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_optimal_threshold": 0.8589409722222228,
|
| 56 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_threshold_optimized_accuracy": 0.9409397189907841,
|
| 57 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_recall_at_0.1_fpr": 0.6396727291149501,
|
| 58 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_true_positive_count": 39478,
|
| 59 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_true_negative_count": 622422,
|
| 60 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_pred_positive_count": 39943,
|
| 61 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_pred_negative_count": 621957,
|
| 62 |
+
"qwen_2_5_7b_longfact_augmented_test/all_tokens_total_samples": 661900,
|
| 63 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_accuracy": 0.6896416008658648,
|
| 64 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_precision": 0.9108688127379716,
|
| 65 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_recall": 0.3999442727595116,
|
| 66 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_f1": 0.5558332746602831,
|
| 67 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_auc": 0.8556854617116961,
|
| 68 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_optimal_threshold": 0.12335759943181823,
|
| 69 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_threshold_optimized_accuracy": 0.7739527218163481,
|
| 70 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_recall_at_0.1_fpr": 0.5913420132732154,
|
| 71 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_true_positive_count": 39478,
|
| 72 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_true_negative_count": 41828,
|
| 73 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_pred_positive_count": 17334,
|
| 74 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_pred_negative_count": 63972,
|
| 75 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_tokens_total_samples": 81306,
|
| 76 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_accuracy": 0.8000595356221473,
|
| 77 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_precision": 0.8051090700344432,
|
| 78 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_recall": 0.6773726153103116,
|
| 79 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_f1": 0.7357377049180328,
|
| 80 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_auc": 0.8888790523144552,
|
| 81 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_optimal_threshold": 0.4113794191919192,
|
| 82 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_threshold_optimized_accuracy": 0.8064100019845207,
|
| 83 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_recall_at_0.1_fpr": 0.644771794252596,
|
| 84 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_true_positive_count": 4141,
|
| 85 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_true_negative_count": 5937,
|
| 86 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_pred_positive_count": 3484,
|
| 87 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_pred_negative_count": 6594,
|
| 88 |
+
"qwen_2_5_7b_longfact_augmented_test/entity_span_max_total_samples": 10078,
|
| 89 |
+
"llama3_1_8b_trivia_qa_test/lm_loss": 1.2490080514870792,
|
| 90 |
+
"llama3_1_8b_trivia_qa_test/probe_loss": 0.888429161903274,
|
| 91 |
+
"llama3_1_8b_trivia_qa_test/sparsity": 0.19553201752473634,
|
| 92 |
+
"llama3_1_8b_trivia_qa_test/probe_threshold": 0.5,
|
| 93 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_accuracy": 0.8640181090479627,
|
| 94 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_precision": 0.19581519109820997,
|
| 95 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_recall": 0.49662576687116566,
|
| 96 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_f1": 0.28088133240804997,
|
| 97 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_auc": 0.8182136579829025,
|
| 98 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_optimal_threshold": 0.9733664772727275,
|
| 99 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_threshold_optimized_accuracy": 0.9466734466242372,
|
| 100 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_recall_at_0.1_fpr": 0.4634969325153374,
|
| 101 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_true_positive_count": 3260,
|
| 102 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_true_negative_count": 57704,
|
| 103 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_pred_positive_count": 8268,
|
| 104 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_pred_negative_count": 52696,
|
| 105 |
+
"llama3_1_8b_trivia_qa_test/all_tokens_total_samples": 60964,
|
| 106 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_accuracy": 0.6924650528469144,
|
| 107 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_precision": 0.9085297418630752,
|
| 108 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_recall": 0.49662576687116566,
|
| 109 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_f1": 0.6422054740182467,
|
| 110 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_auc": 0.8600766753457099,
|
| 111 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_optimal_threshold": 0.12021583017676771,
|
| 112 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_threshold_optimized_accuracy": 0.7877599727241732,
|
| 113 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_recall_at_0.1_fpr": 0.6058282208588958,
|
| 114 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_true_positive_count": 3260,
|
| 115 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_true_negative_count": 2606,
|
| 116 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_pred_positive_count": 1782,
|
| 117 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_pred_negative_count": 4084,
|
| 118 |
+
"llama3_1_8b_trivia_qa_test/entity_tokens_total_samples": 5866,
|
| 119 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_accuracy": 0.8570709612481128,
|
| 120 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_precision": 0.8701570680628272,
|
| 121 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_recall": 0.8385469223007064,
|
| 122 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_f1": 0.8540596094552929,
|
| 123 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_auc": 0.9361077002257263,
|
| 124 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_optimal_threshold": 0.5555555555555558,
|
| 125 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_threshold_optimized_accuracy": 0.8580775037745345,
|
| 126 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_recall_at_0.1_fpr": 0.8173562058526741,
|
| 127 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_true_positive_count": 991,
|
| 128 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_true_negative_count": 996,
|
| 129 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_pred_positive_count": 955,
|
| 130 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_pred_negative_count": 1032,
|
| 131 |
+
"llama3_1_8b_trivia_qa_test/entity_span_max_total_samples": 1987
|
| 132 |
+
}
|
qwen2_5_7b_lora_lambda_kl_0_05/llama3_1_8b_trivia_qa_test_roc_curves.png
ADDED
|
Git LFS Details
|
qwen2_5_7b_lora_lambda_kl_0_05/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
qwen2_5_7b_lora_lambda_kl_0_05/probe_config.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"target_layer_name": "Qwen2DecoderLayer",
|
| 3 |
+
"layer_idx": 26,
|
| 4 |
+
"hidden_size": 3584
|
| 5 |
+
}
|
qwen2_5_7b_lora_lambda_kl_0_05/probe_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:caea04908e861b2daf2a248a450ac9d44860899b62bd776a17c38f6bbaf5175f
|
| 3 |
+
size 8682
|
qwen2_5_7b_lora_lambda_kl_0_05/qwen_2_5_7b_longfact_augmented_test_roc_curves.png
ADDED
|
Git LFS Details
|
qwen2_5_7b_lora_lambda_kl_0_05/qwen_2_5_7b_longfact_test_roc_curves.png
ADDED
|
Git LFS Details
|
qwen2_5_7b_lora_lambda_kl_0_05/special_tokens_map.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>",
|
| 5 |
+
"<|object_ref_start|>",
|
| 6 |
+
"<|object_ref_end|>",
|
| 7 |
+
"<|box_start|>",
|
| 8 |
+
"<|box_end|>",
|
| 9 |
+
"<|quad_start|>",
|
| 10 |
+
"<|quad_end|>",
|
| 11 |
+
"<|vision_start|>",
|
| 12 |
+
"<|vision_end|>",
|
| 13 |
+
"<|vision_pad|>",
|
| 14 |
+
"<|image_pad|>",
|
| 15 |
+
"<|video_pad|>"
|
| 16 |
+
],
|
| 17 |
+
"eos_token": {
|
| 18 |
+
"content": "<|im_end|>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
},
|
| 24 |
+
"pad_token": {
|
| 25 |
+
"content": "<|endoftext|>",
|
| 26 |
+
"lstrip": false,
|
| 27 |
+
"normalized": false,
|
| 28 |
+
"rstrip": false,
|
| 29 |
+
"single_word": false
|
| 30 |
+
}
|
| 31 |
+
}
|
qwen2_5_7b_lora_lambda_kl_0_05/tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e72410ddfe59d4ec8aeea923685710c76492112d74f5d127b86e2d08d65a3a4
|
| 3 |
+
size 11422174
|
qwen2_5_7b_lora_lambda_kl_0_05/tokenizer_config.json
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
+
"lstrip": false,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": false,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": true
|
| 36 |
+
},
|
| 37 |
+
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
+
},
|
| 45 |
+
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
+
"lstrip": false,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": false,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
},
|
| 53 |
+
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
+
"lstrip": false,
|
| 56 |
+
"normalized": false,
|
| 57 |
+
"rstrip": false,
|
| 58 |
+
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
+
},
|
| 61 |
+
"151650": {
|
| 62 |
+
"content": "<|quad_start|>",
|
| 63 |
+
"lstrip": false,
|
| 64 |
+
"normalized": false,
|
| 65 |
+
"rstrip": false,
|
| 66 |
+
"single_word": false,
|
| 67 |
+
"special": true
|
| 68 |
+
},
|
| 69 |
+
"151651": {
|
| 70 |
+
"content": "<|quad_end|>",
|
| 71 |
+
"lstrip": false,
|
| 72 |
+
"normalized": false,
|
| 73 |
+
"rstrip": false,
|
| 74 |
+
"single_word": false,
|
| 75 |
+
"special": true
|
| 76 |
+
},
|
| 77 |
+
"151652": {
|
| 78 |
+
"content": "<|vision_start|>",
|
| 79 |
+
"lstrip": false,
|
| 80 |
+
"normalized": false,
|
| 81 |
+
"rstrip": false,
|
| 82 |
+
"single_word": false,
|
| 83 |
+
"special": true
|
| 84 |
+
},
|
| 85 |
+
"151653": {
|
| 86 |
+
"content": "<|vision_end|>",
|
| 87 |
+
"lstrip": false,
|
| 88 |
+
"normalized": false,
|
| 89 |
+
"rstrip": false,
|
| 90 |
+
"single_word": false,
|
| 91 |
+
"special": true
|
| 92 |
+
},
|
| 93 |
+
"151654": {
|
| 94 |
+
"content": "<|vision_pad|>",
|
| 95 |
+
"lstrip": false,
|
| 96 |
+
"normalized": false,
|
| 97 |
+
"rstrip": false,
|
| 98 |
+
"single_word": false,
|
| 99 |
+
"special": true
|
| 100 |
+
},
|
| 101 |
+
"151655": {
|
| 102 |
+
"content": "<|image_pad|>",
|
| 103 |
+
"lstrip": false,
|
| 104 |
+
"normalized": false,
|
| 105 |
+
"rstrip": false,
|
| 106 |
+
"single_word": false,
|
| 107 |
+
"special": true
|
| 108 |
+
},
|
| 109 |
+
"151656": {
|
| 110 |
+
"content": "<|video_pad|>",
|
| 111 |
+
"lstrip": false,
|
| 112 |
+
"normalized": false,
|
| 113 |
+
"rstrip": false,
|
| 114 |
+
"single_word": false,
|
| 115 |
+
"special": true
|
| 116 |
+
},
|
| 117 |
+
"151657": {
|
| 118 |
+
"content": "<tool_call>",
|
| 119 |
+
"lstrip": false,
|
| 120 |
+
"normalized": false,
|
| 121 |
+
"rstrip": false,
|
| 122 |
+
"single_word": false,
|
| 123 |
+
"special": false
|
| 124 |
+
},
|
| 125 |
+
"151658": {
|
| 126 |
+
"content": "</tool_call>",
|
| 127 |
+
"lstrip": false,
|
| 128 |
+
"normalized": false,
|
| 129 |
+
"rstrip": false,
|
| 130 |
+
"single_word": false,
|
| 131 |
+
"special": false
|
| 132 |
+
},
|
| 133 |
+
"151659": {
|
| 134 |
+
"content": "<|fim_prefix|>",
|
| 135 |
+
"lstrip": false,
|
| 136 |
+
"normalized": false,
|
| 137 |
+
"rstrip": false,
|
| 138 |
+
"single_word": false,
|
| 139 |
+
"special": false
|
| 140 |
+
},
|
| 141 |
+
"151660": {
|
| 142 |
+
"content": "<|fim_middle|>",
|
| 143 |
+
"lstrip": false,
|
| 144 |
+
"normalized": false,
|
| 145 |
+
"rstrip": false,
|
| 146 |
+
"single_word": false,
|
| 147 |
+
"special": false
|
| 148 |
+
},
|
| 149 |
+
"151661": {
|
| 150 |
+
"content": "<|fim_suffix|>",
|
| 151 |
+
"lstrip": false,
|
| 152 |
+
"normalized": false,
|
| 153 |
+
"rstrip": false,
|
| 154 |
+
"single_word": false,
|
| 155 |
+
"special": false
|
| 156 |
+
},
|
| 157 |
+
"151662": {
|
| 158 |
+
"content": "<|fim_pad|>",
|
| 159 |
+
"lstrip": false,
|
| 160 |
+
"normalized": false,
|
| 161 |
+
"rstrip": false,
|
| 162 |
+
"single_word": false,
|
| 163 |
+
"special": false
|
| 164 |
+
},
|
| 165 |
+
"151663": {
|
| 166 |
+
"content": "<|repo_name|>",
|
| 167 |
+
"lstrip": false,
|
| 168 |
+
"normalized": false,
|
| 169 |
+
"rstrip": false,
|
| 170 |
+
"single_word": false,
|
| 171 |
+
"special": false
|
| 172 |
+
},
|
| 173 |
+
"151664": {
|
| 174 |
+
"content": "<|file_sep|>",
|
| 175 |
+
"lstrip": false,
|
| 176 |
+
"normalized": false,
|
| 177 |
+
"rstrip": false,
|
| 178 |
+
"single_word": false,
|
| 179 |
+
"special": false
|
| 180 |
+
}
|
| 181 |
+
},
|
| 182 |
+
"additional_special_tokens": [
|
| 183 |
+
"<|im_start|>",
|
| 184 |
+
"<|im_end|>",
|
| 185 |
+
"<|object_ref_start|>",
|
| 186 |
+
"<|object_ref_end|>",
|
| 187 |
+
"<|box_start|>",
|
| 188 |
+
"<|box_end|>",
|
| 189 |
+
"<|quad_start|>",
|
| 190 |
+
"<|quad_end|>",
|
| 191 |
+
"<|vision_start|>",
|
| 192 |
+
"<|vision_end|>",
|
| 193 |
+
"<|vision_pad|>",
|
| 194 |
+
"<|image_pad|>",
|
| 195 |
+
"<|video_pad|>"
|
| 196 |
+
],
|
| 197 |
+
"bos_token": null,
|
| 198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
| 199 |
+
"clean_up_tokenization_spaces": false,
|
| 200 |
+
"eos_token": "<|im_end|>",
|
| 201 |
+
"errors": "replace",
|
| 202 |
+
"extra_special_tokens": {},
|
| 203 |
+
"model_max_length": 131072,
|
| 204 |
+
"pad_token": "<|endoftext|>",
|
| 205 |
+
"split_special_tokens": false,
|
| 206 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 207 |
+
"unk_token": null
|
| 208 |
+
}
|
qwen2_5_7b_lora_lambda_kl_0_05/training_config.json
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"wandb_project": "probe-training",
|
| 3 |
+
"model_name": "Qwen/Qwen2.5-7B-Instruct",
|
| 4 |
+
"upload_to_hf": true,
|
| 5 |
+
"hf_repo_id": "andyrdt/hallucination-probes",
|
| 6 |
+
"save_evaluation_metrics": true,
|
| 7 |
+
"evaluation_output_dir": null,
|
| 8 |
+
"probe_id": "qwen2_5_7b_lora_lambda_kl=0.5",
|
| 9 |
+
"probe_dir": "/root/git/hallucination_detection/value_head_probes/qwen2_5_7b_lora_lambda_kl=0.5",
|
| 10 |
+
"layer": 26,
|
| 11 |
+
"lora_r": 16,
|
| 12 |
+
"lora_alpha": 32,
|
| 13 |
+
"lora_dropout": 0.05,
|
| 14 |
+
"lora_layers": [
|
| 15 |
+
0,
|
| 16 |
+
1,
|
| 17 |
+
2,
|
| 18 |
+
3,
|
| 19 |
+
4,
|
| 20 |
+
5,
|
| 21 |
+
6,
|
| 22 |
+
7,
|
| 23 |
+
8,
|
| 24 |
+
9,
|
| 25 |
+
10,
|
| 26 |
+
11,
|
| 27 |
+
12,
|
| 28 |
+
13,
|
| 29 |
+
14,
|
| 30 |
+
15,
|
| 31 |
+
16,
|
| 32 |
+
17,
|
| 33 |
+
18,
|
| 34 |
+
19,
|
| 35 |
+
20,
|
| 36 |
+
21,
|
| 37 |
+
22,
|
| 38 |
+
23,
|
| 39 |
+
24,
|
| 40 |
+
25,
|
| 41 |
+
26
|
| 42 |
+
],
|
| 43 |
+
"probe_threshold": 0.5,
|
| 44 |
+
"load_from_hf": false,
|
| 45 |
+
"load_from_disk": false,
|
| 46 |
+
"train_data_config_path": "experiments/training/qwen-2.5-7b/train_data.yaml",
|
| 47 |
+
"eval_data_config_path": "experiments/training/qwen-2.5-7b/eval_data.yaml",
|
| 48 |
+
"cache_data_config_path": null,
|
| 49 |
+
"high_loss_threshold": null,
|
| 50 |
+
"lambda_lm": 0.0,
|
| 51 |
+
"lambda_kl": 0.5,
|
| 52 |
+
"anneal_max_aggr": true,
|
| 53 |
+
"anneal_warmup": 1.0,
|
| 54 |
+
"learning_rate": 5e-05,
|
| 55 |
+
"probe_head_lr": 0.0005,
|
| 56 |
+
"lora_lr": 5e-05,
|
| 57 |
+
"sparsity_penalty_weight": null,
|
| 58 |
+
"use_focal_loss": false,
|
| 59 |
+
"focal_gamma": 2.0,
|
| 60 |
+
"evaluate_only": false,
|
| 61 |
+
"max_steps": -1,
|
| 62 |
+
"num_train_epochs": 1,
|
| 63 |
+
"per_device_train_batch_size": 4,
|
| 64 |
+
"per_device_eval_batch_size": 4,
|
| 65 |
+
"logging_steps": 10,
|
| 66 |
+
"eval_steps": null,
|
| 67 |
+
"cache_steps": null,
|
| 68 |
+
"evaluation_strategy": "no",
|
| 69 |
+
"seed": 42,
|
| 70 |
+
"save_roc_curves": true,
|
| 71 |
+
"dump_raw_eval_results": false,
|
| 72 |
+
"load_in_8bit": false,
|
| 73 |
+
"load_in_4bit": false,
|
| 74 |
+
"enable_gradient_checkpointing": true,
|
| 75 |
+
"gradient_accumulation_steps": 2,
|
| 76 |
+
"max_grad_norm": 1.0
|
| 77 |
+
}
|
qwen2_5_7b_lora_lambda_kl_0_05/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|