jaytonde05 commited on
Commit
82d6a67
·
verified ·
1 Parent(s): 9131b62

Upload 8 files

Browse files
MAP_EXP_18.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import shutil
4
+ import numpy as np
5
+ import pandas as pd
6
+ import mlflow
7
+ from collections import Counter
8
+ from sklearn.model_selection import train_test_split
9
+ from sklearn.preprocessing import LabelEncoder
10
+ from datasets import Dataset
11
+ from transformers import (
12
+ AutoTokenizer,
13
+ TrainingArguments,
14
+ Trainer,
15
+ DataCollatorWithPadding,
16
+ BitsAndBytesConfig,
17
+ AutoModel
18
+ )
19
+ from peft import (
20
+ LoraConfig,
21
+ TaskType,
22
+ get_peft_model,
23
+ prepare_model_for_kbit_training,
24
+ )
25
+ from transformers.modeling_outputs import SequenceClassifierOutput
26
+
27
+ model_name = "MathGenie/MathCoder2-DeepSeekMath-7B"
28
+ MAX_LEN = 256
29
+
30
+ mlflow.set_tracking_uri("http://127.0.0.1:8081")
31
+
32
+
33
+
34
+ ############################################################<-DATA->###########################################################
35
+ le_category = LabelEncoder()
36
+ le_misconception = LabelEncoder()
37
+
38
+ train = pd.read_csv('category_misconception_folds.csv')
39
+ train.Misconception = train.Misconception.fillna('NA')
40
+
41
+ train['category_label'] = le_category.fit_transform(train['Category'])
42
+ train['misconception_label'] = le_misconception.fit_transform(train['Misconception'])
43
+
44
+ train.to_excel("train_text.xlsx")
45
+
46
+ n_category_classes = len(le_category.classes_)
47
+ n_misconception_classes = len(le_misconception.classes_)
48
+
49
+ print(f"Train shape : {train.shape}")
50
+ print(f"Category classes : {n_category_classes}")
51
+ print(f"Misconception classes: {n_misconception_classes}")
52
+ print(f"Category classes names : {le_category.classes_}")
53
+
54
+ print(train[['Category', 'category_label', 'Misconception', 'misconception_label']].head())
55
+
56
+
57
+ idx = train.apply(lambda row: row.Category.split('_')[0], axis=1) == 'True'
58
+ correct = train.loc[idx].copy()
59
+ correct['c'] = correct.groupby(['QuestionId', 'MC_Answer']).MC_Answer.transform('count')
60
+ correct = correct.sort_values('c', ascending=False)
61
+ correct = correct.drop_duplicates(['QuestionId'])
62
+ correct = correct[['QuestionId', 'MC_Answer']]
63
+ correct['is_correct'] = 1
64
+
65
+ train = train.merge(correct, on=['QuestionId', 'MC_Answer'], how='left')
66
+ train.is_correct = train.is_correct.fillna(0)
67
+
68
+
69
+ def format_input(row):
70
+ x = "This answer is correct."
71
+ if not row['is_correct']:
72
+ x = "This is answer is incorrect."
73
+ return (
74
+ f"Question: {row['QuestionText']}\n"
75
+ f"Answer: {row['MC_Answer']}\n"
76
+ f"{x}\n"
77
+ f"Student Explanation: {row['StudentExplanation']}"
78
+ )
79
+
80
+ train['text'] = train.apply(format_input, axis=1)
81
+ train_df = train[train["fold"]==0]
82
+ val_df = train[train["fold"]==1]
83
+
84
+
85
+ COLS = ['text', 'category_label', 'misconception_label']
86
+ train_ds = Dataset.from_pandas(train_df[COLS])
87
+ val_ds = Dataset.from_pandas(val_df[COLS])
88
+
89
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
90
+ tokenizer.pad_token = tokenizer.eos_token
91
+ tokenizer.padding_side = "right"
92
+
93
+ def tokenize_func(examples):
94
+ tokenized = tokenizer(
95
+ examples["text"],
96
+ add_special_tokens = True,
97
+ truncation = True,
98
+ max_length = MAX_LEN,
99
+ padding = False,
100
+ )
101
+
102
+ tokenized['category_label'] = examples['category_label']
103
+ tokenized['misconception_label'] = examples['misconception_label']
104
+
105
+ return tokenized
106
+
107
+ train_ds = train_ds.map(tokenize_func, batched=True, desc="Tokenizing train data")
108
+ val_ds = val_ds.map(tokenize_func, batched=True, desc="Tokenizing train data")
109
+ ##########################################################<-END->###############################################################
110
+
111
+
112
+ ############################################################<-MODEL->###########################################################
113
+ class MultiHeadClassificationModel(nn.Module):
114
+ def __init__(self, model_name, n_category_classes, n_misconception_classes, **model_kwargs):
115
+ super().__init__()
116
+
117
+ self.base_model = AutoModel.from_pretrained(model_name, **model_kwargs)
118
+ self.base_model.config.use_cache = False # Disable KV cache for training
119
+ self.base_model.config.output_hidden_states = False
120
+ self.base_model.config.output_attentions = False
121
+ self.config = self.base_model.config
122
+
123
+ hidden_size = self.base_model.config.hidden_size
124
+
125
+ self.category_head = nn.Linear(hidden_size, n_category_classes)
126
+ self.misconception_head = nn.Linear(hidden_size, n_misconception_classes)
127
+
128
+ self.n_category_classes = n_category_classes
129
+ self.n_misconception_classes = n_misconception_classes
130
+
131
+ self.alpha = 0.6
132
+ self.beta = 0.4
133
+
134
+ def forward(self, input_ids, attention_mask=None, category_label=None, misconception_label=None, combined_label=None, **kwargs):
135
+
136
+
137
+ outputs = self.base_model(input_ids=input_ids, attention_mask=attention_mask)
138
+ pooled = outputs.last_hidden_state.mean(dim=1)
139
+
140
+ category_logits = self.category_head(pooled)
141
+ misconception_logits = self.misconception_head(pooled)
142
+
143
+ loss = None
144
+ if category_label is not None and misconception_label is not None:
145
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
146
+
147
+ category_loss_unreduced = loss_fct(category_logits, category_label)
148
+ misconception_loss_unreduced = loss_fct(misconception_logits, misconception_label)
149
+
150
+ # categories_with_subclasses = torch.tensor([1, 4], device=category_label.device)
151
+ # mask = torch.isin(category_label, categories_with_subclasses).float()
152
+
153
+ # misconception_loss_masked = misconception_loss_unreduced * mask
154
+
155
+ category_loss = torch.mean(category_loss_unreduced)
156
+ misconception_loss = torch.mean(misconception_loss_unreduced)
157
+
158
+ loss = self.alpha * category_loss + self.beta * misconception_loss
159
+
160
+ # if mask.any():
161
+ # print(f"got the samples of misconception. so misco loss is : {misconception_loss} and cat loss is : {category_loss} and final loss is : {loss}")
162
+
163
+ return SequenceClassifierOutput(
164
+ loss=loss,
165
+ logits=(category_logits, misconception_logits)
166
+ )
167
+
168
+
169
+ model_kwargs = dict(
170
+ trust_remote_code = True,
171
+ torch_dtype = torch.float16
172
+ )
173
+
174
+ model_kwargs["quantization_config"] = BitsAndBytesConfig(
175
+ load_in_4bit = True,
176
+ bnb_4bit_quant_type = "nf4",
177
+ bnb_4bit_use_double_quant = True,
178
+ bnb_4bit_compute_dtype = "float16",
179
+ )
180
+
181
+ print(f"Loading model : {model_name}")
182
+ model = MultiHeadClassificationModel(
183
+ model_name,
184
+ n_category_classes = n_category_classes,
185
+ n_misconception_classes = n_misconception_classes,
186
+ **model_kwargs
187
+ )
188
+
189
+ model.base_model.config.pad_token_id = tokenizer.pad_token_id
190
+
191
+ lora_config = LoraConfig(
192
+ r = 64,
193
+ lora_alpha = 64,
194
+ target_modules = "all-linear",
195
+ lora_dropout = 0.05,
196
+ bias = "none",
197
+ task_type = TaskType.SEQ_CLS,
198
+ modules_to_save = ["category_head", "misconception_head"],
199
+ )
200
+
201
+ model = prepare_model_for_kbit_training(model)
202
+ model = get_peft_model(model, lora_config)
203
+ model.print_trainable_parameters()
204
+
205
+ print(f"Model Architecture : {model}")
206
+
207
+ ##########################################################<-END->###############################################################
208
+
209
+ ############################################################<-METRICS->###########################################################
210
+
211
+
212
+ def compute_multi_map(eval_pred, ks=[3, 5, 10]):
213
+ """
214
+ Computes MAP@k and a detailed rank distribution for both category and misconception predictions.
215
+
216
+ This includes:
217
+ - Rank counts for rank 1, 2-3, and above 3.
218
+ - For rank groups 2-3 and above 3, it finds the top 3 most frequent
219
+ classes and calculates their average probability score.
220
+ """
221
+ # 1. Unpack logits and labels
222
+ category_logits, misconception_logits = eval_pred.predictions
223
+ category_labels, misconception_labels = eval_pred.label_ids
224
+
225
+ category_labels = np.array(category_labels)
226
+ misconception_labels = np.array(misconception_labels)
227
+
228
+ # 2. Convert logits to probabilities
229
+ # The `probs` array has shape: (num_samples, num_classes)
230
+ category_probs = torch.nn.functional.softmax(torch.tensor(category_logits), dim=-1).numpy()
231
+ misconception_probs = torch.nn.functional.softmax(torch.tensor(misconception_logits), dim=-1).numpy()
232
+
233
+ print(f"category_probs : {category_probs}")
234
+ print(f"category_labels : {category_labels}")
235
+ print(f"misconception_probs : {misconception_probs}")
236
+ print(f"misconception_labels : {misconception_labels}")
237
+
238
+ # 3. Get top-k predictions
239
+ max_k = max(ks)
240
+ category_top_k_preds = np.argsort(-category_probs, axis=1)[:, :max_k]
241
+ misconception_top_k_preds = np.argsort(-misconception_probs, axis=1)[:, :max_k]
242
+
243
+ # 4. Create a boolean match array
244
+ category_match_array = (category_top_k_preds == category_labels[:, None])
245
+ misconception_match_array = (misconception_top_k_preds == misconception_labels[:, None])
246
+
247
+ # 5. Compute MAP@k for each specified k
248
+ metrics = {}
249
+
250
+ # Category MAP@k
251
+ for k in ks:
252
+ match_at_k = category_match_array[:, :k]
253
+ ranks = np.argmax(match_at_k, axis=1) + 1
254
+ has_match_at_k = np.any(match_at_k, axis=1)
255
+ scores = has_match_at_k * (1.0 / ranks)
256
+ metrics[f"map@{k}_category"] = np.mean(scores)
257
+
258
+ # Misconception MAP@k
259
+ for k in ks:
260
+ match_at_k = misconception_match_array[:, :k]
261
+ ranks = np.argmax(match_at_k, axis=1) + 1
262
+ has_match_at_k = np.any(match_at_k, axis=1)
263
+ scores = has_match_at_k * (1.0 / ranks)
264
+ metrics[f"map@{k}_misconception"] = np.mean(scores)
265
+
266
+ # 6. Calculate detailed rank position breakdown for CATEGORY
267
+ category_ranks_with_indices = [np.where(row)[0] for row in category_match_array]
268
+ category_correct_ranks = np.array([r[0] + 1 if len(r) > 0 else max_k + 1 for r in category_ranks_with_indices])
269
+
270
+ total = category_labels.shape[0]
271
+ metrics["category_rank_1"] = np.sum(category_correct_ranks == 1)
272
+ metrics["category_rank_2_to_3"] = np.sum((category_correct_ranks >= 2) & (category_correct_ranks <= 3))
273
+ metrics["category_rank_above_3"] = np.sum((category_correct_ranks > 3) & (category_correct_ranks <= max_k))
274
+ metrics["category_no_match_in_top_k"] = np.sum(category_correct_ranks > max_k)
275
+ metrics["category_total"] = total
276
+
277
+ # 7. Find top 3 classes for rank groups and their average probability - CATEGORY
278
+
279
+ # --- For category ranks 2 to 3 ---
280
+ category_rank_2_to_3_mask = (category_correct_ranks >= 2) & (category_correct_ranks <= 3)
281
+ category_rank_2_to_3_labels = category_labels[category_rank_2_to_3_mask]
282
+
283
+ if len(category_rank_2_to_3_labels) > 0:
284
+ top_classes = Counter(category_rank_2_to_3_labels).most_common(3)
285
+ augmented_top_classes = []
286
+ for cls, count in top_classes:
287
+ class_in_group_mask = (category_labels == cls) & category_rank_2_to_3_mask
288
+ class_probs = category_probs[class_in_group_mask, cls]
289
+ avg_prob = np.mean(class_probs)
290
+ augmented_top_classes.append((cls, count, round(float(avg_prob), 4)))
291
+ # metrics["category_rank_2_to_3_details"] = augmented_top_classes
292
+ # else:
293
+ # metrics["category_rank_2_to_3_details"] = []
294
+
295
+ # --- For category ranks above 3 (up to max_k) ---
296
+ category_rank_above_3_mask = (category_correct_ranks > 3) & (category_correct_ranks <= max_k)
297
+ category_rank_above_3_labels = category_labels[category_rank_above_3_mask]
298
+
299
+ if len(category_rank_above_3_labels) > 0:
300
+ top_classes = Counter(category_rank_above_3_labels).most_common(3)
301
+ augmented_top_classes = []
302
+ for cls, count in top_classes:
303
+ class_in_group_mask = (category_labels == cls) & category_rank_above_3_mask
304
+ class_probs = category_probs[class_in_group_mask, cls]
305
+ avg_prob = np.mean(class_probs)
306
+ augmented_top_classes.append((cls, count, round(float(avg_prob), 4)))
307
+ # metrics["category_rank_above_3_details"] = augmented_top_classes
308
+ # else:
309
+ # metrics["category_rank_above_3_details"] = []
310
+
311
+ # 8. Calculate detailed rank position breakdown for MISCONCEPTION
312
+ misconception_ranks_with_indices = [np.where(row)[0] for row in misconception_match_array]
313
+ misconception_correct_ranks = np.array([r[0] + 1 if len(r) > 0 else max_k + 1 for r in misconception_ranks_with_indices])
314
+
315
+ total = misconception_labels.shape[0]
316
+ metrics["misconception_rank_1"] = np.sum(misconception_correct_ranks == 1)
317
+ metrics["misconception_rank_2_to_3"] = np.sum((misconception_correct_ranks >= 2) & (misconception_correct_ranks <= 3))
318
+ metrics["misconception_rank_above_3"] = np.sum((misconception_correct_ranks > 3) & (misconception_correct_ranks <= max_k))
319
+ metrics["misconception_no_match_in_top_k"] = np.sum(misconception_correct_ranks > max_k)
320
+ metrics["misconception_total"] = total
321
+
322
+ # 9. Find top 3 classes for rank groups and their average probability - MISCONCEPTION
323
+
324
+ # --- For misconception ranks 2 to 3 ---
325
+ misconception_rank_2_to_3_mask = (misconception_correct_ranks >= 2) & (misconception_correct_ranks <= 3)
326
+ misconception_rank_2_to_3_labels = misconception_labels[misconception_rank_2_to_3_mask]
327
+
328
+ if len(misconception_rank_2_to_3_labels) > 0:
329
+ top_classes = Counter(misconception_rank_2_to_3_labels).most_common(3)
330
+ augmented_top_classes = []
331
+ for cls, count in top_classes:
332
+ class_in_group_mask = (misconception_labels == cls) & misconception_rank_2_to_3_mask
333
+ class_probs = misconception_probs[class_in_group_mask, cls]
334
+ avg_prob = np.mean(class_probs)
335
+ augmented_top_classes.append((cls, count, round(float(avg_prob), 4)))
336
+ # metrics["misconception_rank_2_to_3_details"] = augmented_top_classes
337
+ # else:
338
+ # metrics["misconception_rank_2_to_3_details"] = []
339
+
340
+ # --- For misconception ranks above 3 (up to max_k) ---
341
+ misconception_rank_above_3_mask = (misconception_correct_ranks > 3) & (misconception_correct_ranks <= max_k)
342
+ misconception_rank_above_3_labels = misconception_labels[misconception_rank_above_3_mask]
343
+
344
+ if len(misconception_rank_above_3_labels) > 0:
345
+ top_classes = Counter(misconception_rank_above_3_labels).most_common(3)
346
+ augmented_top_classes = []
347
+ for cls, count in top_classes:
348
+ class_in_group_mask = (misconception_labels == cls) & misconception_rank_above_3_mask
349
+ class_probs = misconception_probs[class_in_group_mask, cls]
350
+ avg_prob = np.mean(class_probs)
351
+ augmented_top_classes.append((cls, count, round(float(avg_prob), 4)))
352
+ #metrics["misconception_rank_above_3_details"] = augmented_top_classes
353
+ # else:
354
+ # metrics["misconception_rank_above_3_details"] = []
355
+
356
+ # 10. Log metrics to MLflow for both category and misconception
357
+ # Category metrics
358
+ mlflow.log_metric("category_rank_1", metrics["category_rank_1"])
359
+ mlflow.log_metric("category_rank_2_to_3", metrics["category_rank_2_to_3"])
360
+ mlflow.log_metric("category_rank_above_3", metrics["category_rank_above_3"])
361
+ mlflow.log_metric("category_no_match_in_top_k", metrics["category_no_match_in_top_k"])
362
+
363
+ # Misconception metrics
364
+ mlflow.log_metric("misconception_rank_1", metrics["misconception_rank_1"])
365
+ mlflow.log_metric("misconception_rank_2_to_3", metrics["misconception_rank_2_to_3"])
366
+ mlflow.log_metric("misconception_rank_above_3", metrics["misconception_rank_above_3"])
367
+ mlflow.log_metric("misconception_no_match_in_top_k", metrics["misconception_no_match_in_top_k"])
368
+
369
+ return metrics
370
+
371
+
372
+
373
+ ##########################################################<-END->###############################################################
374
+
375
+ ############################################################<-TRAINER->###########################################################
376
+ training_args = TrainingArguments(
377
+ output_dir = "MAP_EXP_18",
378
+ eval_strategy = "steps",
379
+ save_strategy = "no",
380
+ logging_strategy = "steps",
381
+ logging_steps = 100,
382
+ eval_steps = 500,
383
+ learning_rate = 1e-4,
384
+ per_device_train_batch_size = 16,
385
+ per_device_eval_batch_size = 32,
386
+ lr_scheduler_type = "cosine",
387
+ warmup_ratio = 0.05,
388
+ report_to = "mlflow",
389
+ group_by_length = True,
390
+ max_grad_norm = 1.0,
391
+ weight_decay = 0.01,
392
+ num_train_epochs = 2,
393
+ label_names = ['category_label', 'misconception_label']
394
+ )
395
+
396
+
397
+ trainer = Trainer(
398
+ model,
399
+ args = training_args,
400
+ train_dataset = train_ds,
401
+ eval_dataset = val_ds,
402
+ tokenizer = tokenizer,
403
+ compute_metrics = compute_multi_map,
404
+ data_collator = DataCollatorWithPadding(tokenizer)
405
+
406
+ )
407
+ ##########################################################<-END->###############################################################
408
+
409
+ if __name__ == "__main__":
410
+
411
+ trainer.train()
412
+ trainer.save_model("MAP_EXP_18")
413
+
414
+ source_file = "MAP_EXP_18.py"
415
+ destination_directory = "MAP_EXP_18"
416
+
417
+ shutil.copy(source_file, destination_directory)
418
+ print(f"File '{source_file}' copied to '{destination_directory}'")
419
+
420
+ print("Training completed and model saved!")
README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: MathGenie/MathCoder2-DeepSeekMath-7B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 64,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": [
22
+ "category_head",
23
+ "misconception_head",
24
+ "classifier",
25
+ "score"
26
+ ],
27
+ "peft_type": "LORA",
28
+ "r": 64,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "k_proj",
33
+ "up_proj",
34
+ "down_proj",
35
+ "o_proj",
36
+ "v_proj",
37
+ "gate_proj",
38
+ "misconception_head",
39
+ "category_head",
40
+ "q_proj"
41
+ ],
42
+ "task_type": "SEQ_CLS",
43
+ "trainable_token_indices": null,
44
+ "use_dora": false,
45
+ "use_rslora": false
46
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7de03071a1dc310d5368b3a03e8dc65fa14b1f589b13fac0a3719be0c7d4a0
3
+ size 600401952
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|end▁of▁sentence|>"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "100000": {
7
+ "content": "<|begin▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "100001": {
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ }
22
+ },
23
+ "bos_token": "<|begin▁of▁sentence|>",
24
+ "clean_up_tokenization_spaces": false,
25
+ "eos_token": "<|end▁of▁sentence|>",
26
+ "extra_special_tokens": {},
27
+ "legacy": true,
28
+ "model_max_length": 4096,
29
+ "pad_token": "<|end▁of▁sentence|>",
30
+ "sp_model_kwargs": {},
31
+ "tokenizer_class": "LlamaTokenizerFast",
32
+ "unk_token": null,
33
+ "use_default_system_prompt": false
34
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc1cf23e15b1436dd75f525925ac969887a5792db5c1c12964b30df7290d0dd3
3
+ size 5368