Shiyunee commited on
Commit
1b38283
·
verified ·
1 Parent(s): dfc30ac

Batch upload 1/2

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. .mdl +0 -0
  3. .msc +0 -0
  4. .mv +1 -0
  5. README.md +128 -4
  6. lora/.DS_Store +0 -0
  7. lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/README.md +206 -0
  8. lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_config.json +45 -0
  9. lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_model.safetensors +3 -0
  10. lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/vector_head_epoch_best.pt +3 -0
  11. lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/test_losses.jsonl +6 -0
  12. lora/hybrid_answer_conf/.DS_Store +0 -0
  13. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/test_losses.jsonl +6 -0
  14. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/test_losses.jsonl +6 -0
  15. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_20k_training_samples/test_losses.jsonl +6 -0
  16. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_30k_training_samples/test_losses.jsonl +6 -0
  17. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_50k_training_samples/test_losses.jsonl +6 -0
  18. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/best-checkpoint/lora_epoch_best/README.md +206 -0
  19. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors +3 -0
  20. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/best-checkpoint/vector_head_epoch_best.pt +3 -0
  21. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/test_losses.jsonl +5 -0
  22. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/lora_epoch_best/README.md +206 -0
  23. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json +45 -0
  24. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors +3 -0
  25. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/vector_head_epoch_best.pt +3 -0
  26. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/test_losses.jsonl +8 -0
  27. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/lora_epoch_best/README.md +206 -0
  28. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json +45 -0
  29. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors +3 -0
  30. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/vector_head_epoch_best.pt +3 -0
  31. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/test_losses.jsonl +11 -0
  32. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_2k_training_samples/test_losses.jsonl +8 -0
  33. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/lora_epoch_best/README.md +206 -0
  34. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json +45 -0
  35. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors +3 -0
  36. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/vector_head_epoch_best.pt +3 -0
  37. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/test_losses.jsonl +7 -0
  38. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_6k_training_samples/test_losses.jsonl +7 -0
  39. lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_8k_training_samples/test_losses.jsonl +7 -0
  40. lora/right_answer_conf/.DS_Store +0 -0
  41. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/README.md +206 -0
  42. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_config.json +45 -0
  43. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_model.safetensors +3 -0
  44. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/vector_head_epoch_best.pt +3 -0
  45. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/test_losses.jsonl +6 -0
  46. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/lora_epoch_best/README.md +206 -0
  47. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json +45 -0
  48. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors +3 -0
  49. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/vector_head_epoch_best.pt +3 -0
  50. lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/test_losses.jsonl +6 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.mdl ADDED
Binary file (61 Bytes). View file
 
.msc ADDED
Binary file (30.4 kB). View file
 
.mv ADDED
@@ -0,0 +1 @@
 
 
1
+ Revision:master,CreatedAt:1761029419
README.md CHANGED
@@ -1,4 +1,128 @@
1
- ---
2
- license: apache-2.0
3
- ---
4
- The data and models are already prepared, but due to connectivity issues with Hugging Face, we have not been able to upload them yet. We are actively working to resolve this. If you would like to reproduce the results from the paper, please refer to our GitHub repository. https://github.com/Trustworthy-Information-Access/Annotation-Efficient-Universal-Honesty-Alignment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ This is the official repo of the paper [Annotation-Efficient Universal Honesty Alignment](https://arxiv.org/abs/2510.17509)
4
+
5
+ This repository provides modules that extend **Qwen2.5-14B-Instruct** with the ability to generate accurate confidence scores *before* response generation, indicating how likely the model is to answer a given question correctly across tasks. We offer two types of modules—**LoRA + Linear Head** and **Linear Head**—along with model parameters under three training settings:
6
+
7
+ 1. **Elicitation (greedy):** Trained on all questions (over 560k) using self-consistency-based confidence annotations.
8
+ 2. **Calibration-Only (right):** Trained on questions with explicit correctness annotations.
9
+ 3. **EliCal (hybrid):** Initialized from the Elicitation model and further trained on correctness-labeled data.
10
+
11
+ For both **Calibration-Only** and **EliCal** settings, we provide models trained with different amounts of annotated data (1k, 2k, 3k, 5k, 8k, 10k, 20k, 30k, 50k, 80k, 200k, 560k+). Since **LoRA + Linear Head** is the main configuration used in our paper, the following description is based on this setup.
12
+
13
+ In our model, **LoRA is applied to all linear layers** with **r = 8** and **α = 16**. The **Linear Head** is added to the final layer of the model and takes as input the internal state of the **last token** from the final layer. It predicts a **confidence score between 0 and 1**, representing the model’s **estimated probability of answering the question correctly**.
14
+
15
+ # Model Architecture
16
+
17
+ ```python
18
+ class LMWithVectorHead(nn.Module):
19
+ def __init__(self, model_name, lora_config, output_dim=1):
20
+ super().__init__()
21
+ backbone = AutoModel.from_pretrained(model_name, device_map='cpu')
22
+ # backbone.config.use_cache = False
23
+ self.peft_model = get_peft_model(backbone, lora_config)
24
+ self.config = backbone.config
25
+ hidden_size = backbone.config.hidden_size
26
+ self.vector_head = nn.Linear(hidden_size, output_dim) # 输出维度为 1
27
+
28
+ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
29
+ """启用梯度检查点,并处理可能的额外参数"""
30
+ self.peft_model.enable_input_require_grads()
31
+ if gradient_checkpointing_kwargs is not None:
32
+ self.peft_model.gradient_checkpointing_enable(**gradient_checkpointing_kwargs)
33
+ else:
34
+ self.peft_model.gradient_checkpointing_enable()
35
+
36
+ def forward(self, input_ids, attention_mask=None, labels=None):
37
+ # if hasattr(self.peft_model, "gradient_checkpointing"):
38
+ # print(f"✅ 梯度检查点已启用 - 当前模式: {self.peft_model.is_gradient_checkpointing}")
39
+ # else:
40
+ # print("❌ 梯度检查点未正确初始化")
41
+ outputs = self.peft_model(
42
+ input_ids=input_ids,
43
+ attention_mask=attention_mask,
44
+ return_dict=True
45
+ )
46
+ # 获取最后一个 token 的隐藏状态
47
+ last_hidden = outputs.last_hidden_state # [B, T, H]
48
+ cls_hidden = last_hidden[:, -1, :] # [B, H]
49
+ logits = self.vector_head(cls_hidden) # [B, 1]
50
+ logits = torch.sigmoid(logits).squeeze(-1) # 添加 sigmoid 并压缩至 [B]
51
+
52
+ loss = None
53
+ if labels is not None:
54
+ loss_fct = nn.MSELoss() # 使用 MSE 损失
55
+ loss = loss_fct(logits, labels) # 计算 logits 和 labels 的 MSE
56
+
57
+ return CausalLMOutput(
58
+ loss=loss,
59
+ logits=logits
60
+ )
61
+ ```
62
+
63
+ # Inference
64
+
65
+ This shows how to load the model. For more details, please refer to [Github Repo](https://github.com/Trustworthy-Information-Access/Annotation-Efficient-Universal-Honesty-Alignment/blob/master/honesty_alignment/eval_one_conf.py).
66
+
67
+ ```python
68
+ base_model = AutoModel.from_pretrained(args.model_path)
69
+
70
+ # 2. 加载训练好的LoRA适配器到基础模型上
71
+ peft_model = PeftModel.from_pretrained(
72
+ base_model, # 使用基础模型,而不是model.peft_model
73
+ args.lora_path,
74
+ adapter_name="default"
75
+ )
76
+
77
+ # 3. 创建完整模型结构
78
+ lora_config = LoraConfig(
79
+ r=args.r,
80
+ lora_alpha=args.alpha,
81
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
82
+ "gate_proj", "up_proj", "down_proj"],
83
+ lora_dropout=args.lora_dropout,
84
+ bias="none",
85
+ )
86
+ model = LMWithVectorHead(args.model_path, lora_config)
87
+
88
+ # 4. 替换为已加载LoRA的模型
89
+ model.peft_model = peft_model
90
+
91
+ # 5. 加载分类头权重
92
+ state_dict = torch.load(args.vector_head_path, map_location=device)
93
+ model.vector_head.load_state_dict(state_dict)
94
+
95
+ # 6. 激活适配器并移动到设备
96
+ model.peft_model.set_adapter("default")
97
+ model = model.to(device)
98
+
99
+ # 评估模式
100
+ model.eval()
101
+ ```
102
+
103
+ # Files
104
+
105
+ ```sh
106
+ /lora
107
+ ├── greedy_answer_conf
108
+ │ └── long_qa
109
+ │ └── batchsize16_accumulation8_epochs10_weightdecay0.1_r8_alpha16_loradropout0.0 (training configuration)
110
+ │ ├── best_checkpoints
111
+ │ │ ├── lora_epoch_best/ # Path to LoRA module
112
+ │ │ └── vector_head_epoch_best.pt # Path to Linear Head weights
113
+ │ └── test_losses.json # Test loss for each epoch
114
+
115
+ ├── hybrid_answer_conf
116
+ │ └── long_qa
117
+ │ ├── batchsize16_accumulation8_epochs10_weightdecay0.1_r8_alpha16_loradropout0.0 (560k samples)
118
+ │ ├── batchsize16_accumulation8_epochs50_weightdecay0.1_r8_alpha16_loradropout0.0_1k_training_samples (1k samples)
119
+ │ └── batchsize16_accumulation8_epochs50_weightdecay0.1_r8_alpha16_loradropout0.0_2k_training_samples (2k samples)
120
+
121
+ └── right_answer_conf
122
+ └── long_qa
123
+ └── ... # Same format as above
124
+
125
+ /mlp
126
+ ...
127
+ ```
128
+
lora/.DS_Store ADDED
Binary file (6.15 kB). View file
 
lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct
3
+ library_name: peft
4
+ tags:
5
+ - base_model:adapter:/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct
6
+ - lora
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.17.0
lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "Qwen2Model",
5
+ "parent_library": "transformers.models.qwen2.modeling_qwen2"
6
+ },
7
+ "base_model_name_or_path": "/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct",
8
+ "bias": "none",
9
+ "corda_config": null,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 16,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.0,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
+ "r": 8,
28
+ "rank_pattern": {},
29
+ "revision": null,
30
+ "target_modules": [
31
+ "k_proj",
32
+ "gate_proj",
33
+ "down_proj",
34
+ "up_proj",
35
+ "o_proj",
36
+ "q_proj",
37
+ "v_proj"
38
+ ],
39
+ "target_parameters": null,
40
+ "task_type": null,
41
+ "trainable_token_indices": null,
42
+ "use_dora": false,
43
+ "use_qalora": false,
44
+ "use_rslora": false
45
+ }
lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04625f6076ecad0f301ce16ba6240e8c5e78d96e4bf165b5f1a89ede985484a5
3
+ size 137710872
lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/vector_head_epoch_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1644a7125c6a4a9593396a2a6868674a717e3e5599b5a386bcf80b0006d2b439
3
+ size 22194
lora/greedy_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.057875748723745346}
2
+ {"epoch": 2, "test_loss": 0.05522924289107323}
3
+ {"epoch": 3, "test_loss": 0.05539644509553909}
4
+ {"epoch": 4, "test_loss": 0.05730148032307625}
5
+ {"epoch": 5, "test_loss": 0.060665030032396317}
6
+ {"epoch": 6, "test_loss": 0.06308399140834808}
lora/hybrid_answer_conf/.DS_Store ADDED
Binary file (6.15 kB). View file
 
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.07268614321947098}
2
+ {"epoch": 2, "test_loss": 0.07143770903348923}
3
+ {"epoch": 3, "test_loss": 0.07469863444566727}
4
+ {"epoch": 4, "test_loss": 0.07809536904096603}
5
+ {"epoch": 5, "test_loss": 0.08239591866731644}
6
+ {"epoch": 6, "test_loss": 0.08167802542448044}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.07615858316421509}
2
+ {"epoch": 2, "test_loss": 0.07375781238079071}
3
+ {"epoch": 3, "test_loss": 0.07631206512451172}
4
+ {"epoch": 4, "test_loss": 0.08173667639493942}
5
+ {"epoch": 5, "test_loss": 0.08498172461986542}
6
+ {"epoch": 6, "test_loss": 0.08806338906288147}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_20k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.08061110973358154}
2
+ {"epoch": 2, "test_loss": 0.07798957824707031}
3
+ {"epoch": 3, "test_loss": 0.08201885968446732}
4
+ {"epoch": 4, "test_loss": 0.08592423796653748}
5
+ {"epoch": 5, "test_loss": 0.08917036652565002}
6
+ {"epoch": 6, "test_loss": 0.08977463096380234}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_30k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.07716857641935349}
2
+ {"epoch": 2, "test_loss": 0.07639868557453156}
3
+ {"epoch": 3, "test_loss": 0.08216629922389984}
4
+ {"epoch": 4, "test_loss": 0.08713571727275848}
5
+ {"epoch": 5, "test_loss": 0.09076935797929764}
6
+ {"epoch": 6, "test_loss": 0.09128855913877487}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_50k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.07692763209342957}
2
+ {"epoch": 2, "test_loss": 0.0757795125246048}
3
+ {"epoch": 3, "test_loss": 0.08131248503923416}
4
+ {"epoch": 4, "test_loss": 0.0841091200709343}
5
+ {"epoch": 5, "test_loss": 0.08935707807540894}
6
+ {"epoch": 6, "test_loss": 0.09080477058887482}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/best-checkpoint/lora_epoch_best/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/models/Qwen2.5-14B-Instruct
3
+ library_name: peft
4
+ tags:
5
+ - base_model:adapter:/data/models/Qwen2.5-14B-Instruct
6
+ - lora
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.17.0
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e07c5f9ff322c0889eb547f6523510db758cde1806203dd904e9dc0d3a187a4
3
+ size 137710872
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/best-checkpoint/vector_head_epoch_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0251e64437d25d1fbbc1c1d98a321f979f284e53108e33b222cf856e95191f5e
3
+ size 22621
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_80k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.075857013463974}
2
+ {"epoch": 2, "test_loss": 0.07675725966691971}
3
+ {"epoch": 3, "test_loss": 0.080119788646698}
4
+ {"epoch": 4, "test_loss": 0.08713503926992416}
5
+ {"epoch": 5, "test_loss": 0.08685452491044998}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/lora_epoch_best/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/models/Qwen2.5-14B-Instruct
3
+ library_name: peft
4
+ tags:
5
+ - base_model:adapter:/data/models/Qwen2.5-14B-Instruct
6
+ - lora
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.17.0
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "Qwen2Model",
5
+ "parent_library": "transformers.models.qwen2.modeling_qwen2"
6
+ },
7
+ "base_model_name_or_path": "/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct",
8
+ "bias": "none",
9
+ "corda_config": null,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 16,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.0,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
+ "r": 8,
28
+ "rank_pattern": {},
29
+ "revision": null,
30
+ "target_modules": [
31
+ "o_proj",
32
+ "down_proj",
33
+ "up_proj",
34
+ "k_proj",
35
+ "q_proj",
36
+ "v_proj",
37
+ "gate_proj"
38
+ ],
39
+ "target_parameters": null,
40
+ "task_type": null,
41
+ "trainable_token_indices": null,
42
+ "use_dora": false,
43
+ "use_qalora": false,
44
+ "use_rslora": false
45
+ }
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c912ef6565b77ee25ba668c8734e8d9112e65e0679cd0152ee7e6b56c11e131f
3
+ size 137710872
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/best-checkpoint/vector_head_epoch_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baf9cfebe9952ffb7f1a32f622eaebe93e13295eb9e06d982f507fb439da5c44
3
+ size 22621
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_10k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.0864986702799797}
2
+ {"epoch": 2, "test_loss": 0.08113675564527512}
3
+ {"epoch": 3, "test_loss": 0.08129317313432693}
4
+ {"epoch": 4, "test_loss": 0.08070410788059235}
5
+ {"epoch": 5, "test_loss": 0.09045013040304184}
6
+ {"epoch": 6, "test_loss": 0.09435231983661652}
7
+ {"epoch": 7, "test_loss": 0.09189791232347488}
8
+ {"epoch": 8, "test_loss": 0.09174849092960358}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/lora_epoch_best/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/models/Qwen2.5-14B-Instruct
3
+ library_name: peft
4
+ tags:
5
+ - base_model:adapter:/data/models/Qwen2.5-14B-Instruct
6
+ - lora
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.17.0
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "Qwen2Model",
5
+ "parent_library": "transformers.models.qwen2.modeling_qwen2"
6
+ },
7
+ "base_model_name_or_path": "/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct",
8
+ "bias": "none",
9
+ "corda_config": null,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 16,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.0,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
+ "r": 8,
28
+ "rank_pattern": {},
29
+ "revision": null,
30
+ "target_modules": [
31
+ "o_proj",
32
+ "q_proj",
33
+ "gate_proj",
34
+ "k_proj",
35
+ "v_proj",
36
+ "up_proj",
37
+ "down_proj"
38
+ ],
39
+ "target_parameters": null,
40
+ "task_type": null,
41
+ "trainable_token_indices": null,
42
+ "use_dora": false,
43
+ "use_qalora": false,
44
+ "use_rslora": false
45
+ }
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0beaa4824cbb4cfe1cb94e4063aabbb745a677b608ec2467a5d578dff719c663
3
+ size 137710872
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/best-checkpoint/vector_head_epoch_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7dcbbc0f89eadb56f88e7c7b85d8eb8af53a98fcbe27d377b2af7f78e8af991
3
+ size 22621
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_1k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.13135190308094025}
2
+ {"epoch": 2, "test_loss": 0.10395488142967224}
3
+ {"epoch": 3, "test_loss": 0.0902276337146759}
4
+ {"epoch": 4, "test_loss": 0.08571487665176392}
5
+ {"epoch": 5, "test_loss": 0.08969125896692276}
6
+ {"epoch": 6, "test_loss": 0.08591265976428986}
7
+ {"epoch": 7, "test_loss": 0.08540669828653336}
8
+ {"epoch": 8, "test_loss": 0.09392889589071274}
9
+ {"epoch": 9, "test_loss": 0.09582725167274475}
10
+ {"epoch": 10, "test_loss": 0.09626656770706177}
11
+ {"epoch": 11, "test_loss": 0.09397334605455399}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_2k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.11774057894945145}
2
+ {"epoch": 2, "test_loss": 0.08969399333000183}
3
+ {"epoch": 3, "test_loss": 0.08526769280433655}
4
+ {"epoch": 4, "test_loss": 0.08313318341970444}
5
+ {"epoch": 5, "test_loss": 0.08490263670682907}
6
+ {"epoch": 6, "test_loss": 0.09214569628238678}
7
+ {"epoch": 7, "test_loss": 0.09370999783277512}
8
+ {"epoch": 8, "test_loss": 0.09420424699783325}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/lora_epoch_best/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/models/Qwen2.5-14B-Instruct
3
+ library_name: peft
4
+ tags:
5
+ - base_model:adapter:/data/models/Qwen2.5-14B-Instruct
6
+ - lora
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.17.0
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "Qwen2Model",
5
+ "parent_library": "transformers.models.qwen2.modeling_qwen2"
6
+ },
7
+ "base_model_name_or_path": "/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct",
8
+ "bias": "none",
9
+ "corda_config": null,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 16,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.0,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
+ "r": 8,
28
+ "rank_pattern": {},
29
+ "revision": null,
30
+ "target_modules": [
31
+ "o_proj",
32
+ "q_proj",
33
+ "up_proj",
34
+ "gate_proj",
35
+ "down_proj",
36
+ "v_proj",
37
+ "k_proj"
38
+ ],
39
+ "target_parameters": null,
40
+ "task_type": null,
41
+ "trainable_token_indices": null,
42
+ "use_dora": false,
43
+ "use_qalora": false,
44
+ "use_rslora": false
45
+ }
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f7766c1939f1163c55c389e1dba723ab7538438d42e5c73836edd604ca77188
3
+ size 137710872
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/best-checkpoint/vector_head_epoch_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0781a50c7075890fedaa2f0245f36ac13067dbf75ae3892c2749e5129ebc2699
3
+ size 22621
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_4k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.10254354774951935}
2
+ {"epoch": 2, "test_loss": 0.08436164259910583}
3
+ {"epoch": 3, "test_loss": 0.08095930516719818}
4
+ {"epoch": 4, "test_loss": 0.08631518483161926}
5
+ {"epoch": 5, "test_loss": 0.09050067514181137}
6
+ {"epoch": 6, "test_loss": 0.09687988460063934}
7
+ {"epoch": 7, "test_loss": 0.09616458415985107}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_6k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.09429670870304108}
2
+ {"epoch": 2, "test_loss": 0.08193066716194153}
3
+ {"epoch": 3, "test_loss": 0.08018165081739426}
4
+ {"epoch": 4, "test_loss": 0.08217190951108932}
5
+ {"epoch": 5, "test_loss": 0.09341240674257278}
6
+ {"epoch": 6, "test_loss": 0.09271496534347534}
7
+ {"epoch": 7, "test_loss": 0.09260798990726471}
lora/hybrid_answer_conf/long_qa/batchsize4_accumulation32_epochs50_weightdecay0.1_r8_alpha16_loradrpout0.0_8k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.08913946896791458}
2
+ {"epoch": 2, "test_loss": 0.08074655383825302}
3
+ {"epoch": 3, "test_loss": 0.0786006823182106}
4
+ {"epoch": 4, "test_loss": 0.08919624984264374}
5
+ {"epoch": 5, "test_loss": 0.08907854557037354}
6
+ {"epoch": 6, "test_loss": 0.09068197757005692}
7
+ {"epoch": 7, "test_loss": 0.0930403545498848}
lora/right_answer_conf/.DS_Store ADDED
Binary file (6.15 kB). View file
 
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct
3
+ library_name: peft
4
+ tags:
5
+ - base_model:adapter:/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct
6
+ - lora
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.17.0
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "Qwen2Model",
5
+ "parent_library": "transformers.models.qwen2.modeling_qwen2"
6
+ },
7
+ "base_model_name_or_path": "/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct",
8
+ "bias": "none",
9
+ "corda_config": null,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 16,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.0,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
+ "r": 8,
28
+ "rank_pattern": {},
29
+ "revision": null,
30
+ "target_modules": [
31
+ "down_proj",
32
+ "q_proj",
33
+ "up_proj",
34
+ "o_proj",
35
+ "v_proj",
36
+ "k_proj",
37
+ "gate_proj"
38
+ ],
39
+ "target_parameters": null,
40
+ "task_type": null,
41
+ "trainable_token_indices": null,
42
+ "use_dora": false,
43
+ "use_qalora": false,
44
+ "use_rslora": false
45
+ }
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/lora_epoch_best/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ca3d558b6f80cb2a6aceda70cb86b61f4baf61eef1a18048c78a0c7bd8e0f71
3
+ size 137710872
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/best-checkpoint/vector_head_epoch_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46a15b25efcba270d6d3d094192ede07cb27613ac9da96a284d789ce57d931ab
3
+ size 22194
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs10_weightdecay0.1_r8_alpha16_loradrpout0.0/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.07494872063398361}
2
+ {"epoch": 2, "test_loss": 0.07353893667459488}
3
+ {"epoch": 3, "test_loss": 0.0735877975821495}
4
+ {"epoch": 4, "test_loss": 0.07693902403116226}
5
+ {"epoch": 5, "test_loss": 0.08190840482711792}
6
+ {"epoch": 6, "test_loss": 0.08659124374389648}
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/lora_epoch_best/README.md ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct
3
+ library_name: peft
4
+ tags:
5
+ - base_model:adapter:/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct
6
+ - lora
7
+ - transformers
8
+ ---
9
+
10
+ # Model Card for Model ID
11
+
12
+ <!-- Provide a quick summary of what the model is/does. -->
13
+
14
+
15
+
16
+ ## Model Details
17
+
18
+ ### Model Description
19
+
20
+ <!-- Provide a longer summary of what this model is. -->
21
+
22
+
23
+
24
+ - **Developed by:** [More Information Needed]
25
+ - **Funded by [optional]:** [More Information Needed]
26
+ - **Shared by [optional]:** [More Information Needed]
27
+ - **Model type:** [More Information Needed]
28
+ - **Language(s) (NLP):** [More Information Needed]
29
+ - **License:** [More Information Needed]
30
+ - **Finetuned from model [optional]:** [More Information Needed]
31
+
32
+ ### Model Sources [optional]
33
+
34
+ <!-- Provide the basic links for the model. -->
35
+
36
+ - **Repository:** [More Information Needed]
37
+ - **Paper [optional]:** [More Information Needed]
38
+ - **Demo [optional]:** [More Information Needed]
39
+
40
+ ## Uses
41
+
42
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
43
+
44
+ ### Direct Use
45
+
46
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
47
+
48
+ [More Information Needed]
49
+
50
+ ### Downstream Use [optional]
51
+
52
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
53
+
54
+ [More Information Needed]
55
+
56
+ ### Out-of-Scope Use
57
+
58
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
59
+
60
+ [More Information Needed]
61
+
62
+ ## Bias, Risks, and Limitations
63
+
64
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
65
+
66
+ [More Information Needed]
67
+
68
+ ### Recommendations
69
+
70
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
71
+
72
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
73
+
74
+ ## How to Get Started with the Model
75
+
76
+ Use the code below to get started with the model.
77
+
78
+ [More Information Needed]
79
+
80
+ ## Training Details
81
+
82
+ ### Training Data
83
+
84
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
85
+
86
+ [More Information Needed]
87
+
88
+ ### Training Procedure
89
+
90
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
91
+
92
+ #### Preprocessing [optional]
93
+
94
+ [More Information Needed]
95
+
96
+
97
+ #### Training Hyperparameters
98
+
99
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
100
+
101
+ #### Speeds, Sizes, Times [optional]
102
+
103
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
104
+
105
+ [More Information Needed]
106
+
107
+ ## Evaluation
108
+
109
+ <!-- This section describes the evaluation protocols and provides the results. -->
110
+
111
+ ### Testing Data, Factors & Metrics
112
+
113
+ #### Testing Data
114
+
115
+ <!-- This should link to a Dataset Card if possible. -->
116
+
117
+ [More Information Needed]
118
+
119
+ #### Factors
120
+
121
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
122
+
123
+ [More Information Needed]
124
+
125
+ #### Metrics
126
+
127
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
128
+
129
+ [More Information Needed]
130
+
131
+ ### Results
132
+
133
+ [More Information Needed]
134
+
135
+ #### Summary
136
+
137
+
138
+
139
+ ## Model Examination [optional]
140
+
141
+ <!-- Relevant interpretability work for the model goes here -->
142
+
143
+ [More Information Needed]
144
+
145
+ ## Environmental Impact
146
+
147
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
148
+
149
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
150
+
151
+ - **Hardware Type:** [More Information Needed]
152
+ - **Hours used:** [More Information Needed]
153
+ - **Cloud Provider:** [More Information Needed]
154
+ - **Compute Region:** [More Information Needed]
155
+ - **Carbon Emitted:** [More Information Needed]
156
+
157
+ ## Technical Specifications [optional]
158
+
159
+ ### Model Architecture and Objective
160
+
161
+ [More Information Needed]
162
+
163
+ ### Compute Infrastructure
164
+
165
+ [More Information Needed]
166
+
167
+ #### Hardware
168
+
169
+ [More Information Needed]
170
+
171
+ #### Software
172
+
173
+ [More Information Needed]
174
+
175
+ ## Citation [optional]
176
+
177
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
178
+
179
+ **BibTeX:**
180
+
181
+ [More Information Needed]
182
+
183
+ **APA:**
184
+
185
+ [More Information Needed]
186
+
187
+ ## Glossary [optional]
188
+
189
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
190
+
191
+ [More Information Needed]
192
+
193
+ ## More Information [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Authors [optional]
198
+
199
+ [More Information Needed]
200
+
201
+ ## Model Card Contact
202
+
203
+ [More Information Needed]
204
+ ### Framework versions
205
+
206
+ - PEFT 0.17.0
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/lora_epoch_best/adapter_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "Qwen2Model",
5
+ "parent_library": "transformers.models.qwen2.modeling_qwen2"
6
+ },
7
+ "base_model_name_or_path": "/mnt/bn/motor-nlp-team/models/LLM/base_models/Qwen2.5-14B-Instruct",
8
+ "bias": "none",
9
+ "corda_config": null,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 16,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.0,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
+ "r": 8,
28
+ "rank_pattern": {},
29
+ "revision": null,
30
+ "target_modules": [
31
+ "o_proj",
32
+ "up_proj",
33
+ "down_proj",
34
+ "k_proj",
35
+ "q_proj",
36
+ "gate_proj",
37
+ "v_proj"
38
+ ],
39
+ "target_parameters": null,
40
+ "task_type": null,
41
+ "trainable_token_indices": null,
42
+ "use_dora": false,
43
+ "use_qalora": false,
44
+ "use_rslora": false
45
+ }
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/lora_epoch_best/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da3e6cf769de81e042c628a951b2ba1af33362cb6dba9b5776e9c0fc7c1f3b4e
3
+ size 137710872
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/best-checkpoint/vector_head_epoch_best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42de137a4d921c8fe989fabcd498724b3ca38e7bad316e8ea568c704bc1eefc7
3
+ size 22194
lora/right_answer_conf/long_qa/batchsize4_accumulation32_epochs15_weightdecay0.1_r8_alpha16_loradrpout0.0_200k_training_samples/test_losses.jsonl ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {"epoch": 1, "test_loss": 0.08200985938310623}
2
+ {"epoch": 2, "test_loss": 0.07489285618066788}
3
+ {"epoch": 3, "test_loss": 0.07997710257768631}
4
+ {"epoch": 4, "test_loss": 0.07678349316120148}
5
+ {"epoch": 5, "test_loss": 0.08322352916002274}
6
+ {"epoch": 6, "test_loss": 0.08676231652498245}