Neural-Hacker commited on
Commit
98e4ce9
·
verified ·
1 Parent(s): 21c7673

Upload 7 files

Browse files
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "hidden_dim": 3072,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "LABEL_0": 0,
19
+ "LABEL_1": 1,
20
+ "LABEL_2": 2,
21
+ "LABEL_3": 3
22
+ },
23
+ "max_position_embeddings": 512,
24
+ "model_type": "distilbert",
25
+ "n_heads": 12,
26
+ "n_layers": 6,
27
+ "pad_token_id": 0,
28
+ "problem_type": "single_label_classification",
29
+ "qa_dropout": 0.1,
30
+ "seq_classif_dropout": 0.2,
31
+ "sinusoidal_pos_embds": false,
32
+ "tie_weights_": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.52.4",
35
+ "vocab_size": 30522
36
+ }
distilbert-jee-math-mcq-2025.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.11.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":12755052,"sourceType":"datasetVersion","datasetId":8063286}],"dockerImageVersionId":31090,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# Install latest libraries\n!pip install datasets transformers torch -q\n\n# Import libraries\nfrom datasets import load_dataset, concatenate_datasets\nfrom transformers import DistilBertTokenizer\nimport random\nimport transformers\n\n# Verify transformers version\nprint(f\"Transformers version: {transformers.__version__}\")\n\n# Load both January and April datasets\njan_data = load_dataset(\"PhysicsWallahAI/JEE-Main-2025-Math\", \"jan\", split=\"test\")\napr_data = load_dataset(\"PhysicsWallahAI/JEE-Main-2025-Math\", \"apr\", split=\"test\")\ndataset = concatenate_datasets([jan_data, apr_data])\n\n# Filter MCQs and preprocess with augmentation\ndef preprocess_data(example):\n if example[\"question_type\"] == 1: # MCQs only\n options = example[\"options\"].copy()\n correct_idx = example[\"correct_options\"][0]\n indices = list(range(len(options)))\n random.shuffle(indices)\n shuffled_options = [options[i] for i in indices]\n new_correct_idx = indices.index(correct_idx)\n options_text = \" Options: \" + \", \".join(shuffled_options)\n return {\"input_text\": example[\"question\"] + options_text, \"label\": new_correct_idx}\n return None\n\nmcq_dataset = dataset.filter(lambda x: x[\"question_type\"] == 1).map(preprocess_data).remove_columns([\"question\", \"answer\", \"options\", \"correct_options\", \"additional_data\", \"metadata\", \"question_type\"])\nmcq_dataset = mcq_dataset.train_test_split(test_size=0.2, seed=42)\n\n# Tokenize\ntokenizer = DistilBertTokenizer.from_pretrained(\"distilbert-base-uncased\")\ndef tokenize_function(example):\n return tokenizer(example[\"input_text\"], padding=\"max_length\", truncation=True, max_length=256) # Reduced max_length\ntokenized_dataset = mcq_dataset.map(tokenize_function, batched=True)\ntokenized_dataset = tokenized_dataset.rename_column(\"label\", \"labels\")\ntokenized_dataset = tokenized_dataset.remove_columns([\"input_text\"])\ntokenized_dataset.set_format(\"torch\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-08-18T13:14:43.612645Z","iopub.execute_input":"2025-08-18T13:14:43.612961Z","iopub.status.idle":"2025-08-18T13:16:18.913870Z","shell.execute_reply.started":"2025-08-18T13:14:43.612932Z","shell.execute_reply":"2025-08-18T13:16:18.913144Z"}},"outputs":[{"name":"stdout","text":"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m363.4/363.4 MB\u001b[0m \u001b[31m4.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.8/13.8 MB\u001b[0m \u001b[31m86.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m24.6/24.6 MB\u001b[0m \u001b[31m71.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m883.7/883.7 kB\u001b[0m \u001b[31m37.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m2.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.5/211.5 MB\u001b[0m \u001b[31m2.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.3/56.3 MB\u001b[0m \u001b[31m8.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m0:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m127.9/127.9 MB\u001b[0m \u001b[31m12.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m207.5/207.5 MB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m21.1/21.1 MB\u001b[0m \u001b[31m76.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m193.6/193.6 kB\u001b[0m \u001b[31m11.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25h\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\nbigframes 2.8.0 requires google-cloud-bigquery-storage<3.0.0,>=2.30.0, which is not installed.\ncesium 0.12.4 requires numpy<3.0,>=2.0, but you have numpy 1.26.4 which is incompatible.\ngcsfs 2025.3.2 requires fsspec==2025.3.2, but you have fsspec 2025.3.0 which is incompatible.\nbigframes 2.8.0 requires google-cloud-bigquery[bqstorage,pandas]>=3.31.0, but you have google-cloud-bigquery 3.25.0 which is incompatible.\nbigframes 2.8.0 requires rich<14,>=12.4.4, but you have rich 14.0.0 which is incompatible.\u001b[0m\u001b[31m\n\u001b[0mTransformers version: 4.52.4\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"README.md: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"75d45170fc9c4a28995d291a7d5b4379"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"main2025-jan.jsonl: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"576b0108bc4245b89bb58d74cdfc6829"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Generating test split: 0 examples [00:00, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"053904a79b034f3fac0ad4cf53c537c9"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"main2025-apr.jsonl: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"ddfce33d18834f66aa453e6ed18b4767"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Generating test split: 0 examples [00:00, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"9f33af638d894523a379fd30ab82e77a"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Filter: 0%| | 0/475 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2f1ae8f98fc442ee894be53ad3a45941"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/373 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"b3407223433b4479a302688689f9b2eb"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer_config.json: 0%| | 0.00/48.0 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"f4f8681e568e493bbed99ef74fb9910f"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vocab.txt: 0%| | 0.00/232k [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5a8c8fe989ae4b9f837f45227c44a6cf"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer.json: 0%| | 0.00/466k [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2a70ec74361341378df30e1fccff1bf6"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/483 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"cd5ac112d15543fd983b098aa6c6494a"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/298 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5d4b2bb962174646a676c440103c819f"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/75 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"96c6faf97bd549a78c57e6fc993506a7"}},"metadata":{}}],"execution_count":1},{"cell_type":"code","source":"from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\n# Define custom metrics for accuracy\ndef compute_metrics(eval_pred):\n logits, labels = eval_pred\n predictions = np.argmax(logits, axis=-1)\n return {\"accuracy\": accuracy_score(labels, predictions)}\n\n# Initialize model\nmodel = DistilBertForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=4)\n\n# Training arguments for older transformers version\ntraining_args = TrainingArguments(\n output_dir=\"/kaggle/working/results\",\n per_device_train_batch_size=4, # Smaller batch size\n per_device_eval_batch_size=4,\n num_train_epochs=3, # More epochs\n learning_rate=5e-5, # Lower learning rate\n weight_decay=0.1, # Increased regularization\n logging_dir=\"/kaggle/working/logs\",\n logging_steps=10,\n save_steps=500,\n save_total_limit=2,\n no_cuda=False, # Use GPU if available\n report_to=\"none\" # Disable wandb\n)\n\n# Initialize trainer\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"test\"],\n compute_metrics=compute_metrics\n)\n\n# Train\ntrainer.train()\n\n# Save model and tokenizer\nmodel.save_pretrained(\"/kaggle/working/mcq_model\")\ntokenizer.save_pretrained(\"/kaggle/working/mcq_tokenizer\")\n\n# Evaluate\nmetrics = trainer.evaluate()\nprint(metrics)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-08-18T13:22:23.052495Z","iopub.execute_input":"2025-08-18T13:22:23.052961Z","iopub.status.idle":"2025-08-18T13:22:47.347850Z","shell.execute_reply.started":"2025-08-18T13:22:23.052934Z","shell.execute_reply":"2025-08-18T13:22:47.346849Z"}},"outputs":[{"name":"stderr","text":"Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.bias', 'pre_classifier.weight']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='225' max='225' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [225/225 00:22, Epoch 3/3]\n </div>\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: left;\">\n <th>Step</th>\n <th>Training Loss</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>10</td>\n <td>1.404200</td>\n </tr>\n <tr>\n <td>20</td>\n <td>1.393900</td>\n </tr>\n <tr>\n <td>30</td>\n <td>1.404500</td>\n </tr>\n <tr>\n <td>40</td>\n <td>1.387900</td>\n </tr>\n <tr>\n <td>50</td>\n <td>1.383500</td>\n </tr>\n <tr>\n <td>60</td>\n <td>1.360500</td>\n </tr>\n <tr>\n <td>70</td>\n <td>1.393200</td>\n </tr>\n <tr>\n <td>80</td>\n <td>1.381800</td>\n </tr>\n <tr>\n <td>90</td>\n <td>1.378000</td>\n </tr>\n <tr>\n <td>100</td>\n <td>1.406700</td>\n </tr>\n <tr>\n <td>110</td>\n <td>1.366800</td>\n </tr>\n <tr>\n <td>120</td>\n <td>1.341300</td>\n </tr>\n <tr>\n <td>130</td>\n <td>1.357400</td>\n </tr>\n <tr>\n <td>140</td>\n <td>1.348500</td>\n </tr>\n <tr>\n <td>150</td>\n <td>1.371700</td>\n </tr>\n <tr>\n <td>160</td>\n <td>1.372000</td>\n </tr>\n <tr>\n <td>170</td>\n <td>1.325000</td>\n </tr>\n <tr>\n <td>180</td>\n <td>1.258800</td>\n </tr>\n <tr>\n <td>190</td>\n <td>1.358800</td>\n </tr>\n <tr>\n <td>200</td>\n <td>1.358900</td>\n </tr>\n <tr>\n <td>210</td>\n <td>1.311300</td>\n </tr>\n <tr>\n <td>220</td>\n <td>1.310600</td>\n </tr>\n </tbody>\n</table><p>"},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='19' max='19' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [19/19 00:00]\n </div>\n "},"metadata":{}},{"name":"stdout","text":"{'eval_loss': 1.4303665161132812, 'eval_accuracy': 0.22666666666666666, 'eval_runtime': 0.3752, 'eval_samples_per_second': 199.917, 'eval_steps_per_second': 50.646, 'epoch': 3.0}\n","output_type":"stream"}],"execution_count":6},{"cell_type":"code","source":"from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\n# Define custom metrics for accuracy\ndef compute_metrics(eval_pred):\n logits, labels = eval_pred\n predictions = np.argmax(logits, axis=-1)\n return {\"accuracy\": accuracy_score(labels, predictions)}\n\n# Initialize model\nmodel = DistilBertForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=4)\n\n# Training arguments for older transformers version\ntraining_args = TrainingArguments(\n output_dir=\"/kaggle/working/results\",\n per_device_train_batch_size=4, # Smaller batch size\n per_device_eval_batch_size=4,\n num_train_epochs=10, # More epochs\n learning_rate=1e-5, # Lower learning rate\n weight_decay=0.1, # Increased regularization\n logging_dir=\"/kaggle/working/logs\",\n logging_steps=10,\n save_steps=500,\n save_total_limit=2,\n no_cuda=False, # Use GPU if available\n report_to=\"none\" # Disable wandb\n)\n\n# Initialize trainer\ntrainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"test\"],\n compute_metrics=compute_metrics\n)\n\n# Train\ntrainer.train()\n\n# Save model and tokenizer\nmodel.save_pretrained(\"/kaggle/working/mcq_model\")\ntokenizer.save_pretrained(\"/kaggle/working/mcq_tokenizer\")\n\n# Evaluate\nmetrics = trainer.evaluate()\nprint(metrics)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-08-18T13:23:02.412937Z","iopub.execute_input":"2025-08-18T13:23:02.413267Z","iopub.status.idle":"2025-08-18T13:24:13.635603Z","shell.execute_reply.started":"2025-08-18T13:23:02.413242Z","shell.execute_reply":"2025-08-18T13:24:13.634922Z"}},"outputs":[{"name":"stderr","text":"Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.bias', 'pre_classifier.weight']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='750' max='750' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [750/750 01:09, Epoch 10/10]\n </div>\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: left;\">\n <th>Step</th>\n <th>Training Loss</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>10</td>\n <td>1.375300</td>\n </tr>\n <tr>\n <td>20</td>\n <td>1.388500</td>\n </tr>\n <tr>\n <td>30</td>\n <td>1.402200</td>\n </tr>\n <tr>\n <td>40</td>\n <td>1.382000</td>\n </tr>\n <tr>\n <td>50</td>\n <td>1.373200</td>\n </tr>\n <tr>\n <td>60</td>\n <td>1.363700</td>\n </tr>\n <tr>\n <td>70</td>\n <td>1.393100</td>\n </tr>\n <tr>\n <td>80</td>\n <td>1.347700</td>\n </tr>\n <tr>\n <td>90</td>\n <td>1.379400</td>\n </tr>\n <tr>\n <td>100</td>\n <td>1.395500</td>\n </tr>\n <tr>\n <td>110</td>\n <td>1.333600</td>\n </tr>\n <tr>\n <td>120</td>\n <td>1.355700</td>\n </tr>\n <tr>\n <td>130</td>\n <td>1.349100</td>\n </tr>\n <tr>\n <td>140</td>\n <td>1.358000</td>\n </tr>\n <tr>\n <td>150</td>\n <td>1.354100</td>\n </tr>\n <tr>\n <td>160</td>\n <td>1.364100</td>\n </tr>\n <tr>\n <td>170</td>\n <td>1.302500</td>\n </tr>\n <tr>\n <td>180</td>\n <td>1.253900</td>\n </tr>\n <tr>\n <td>190</td>\n <td>1.357800</td>\n </tr>\n <tr>\n <td>200</td>\n <td>1.385200</td>\n </tr>\n <tr>\n <td>210</td>\n <td>1.342800</td>\n </tr>\n <tr>\n <td>220</td>\n <td>1.343200</td>\n </tr>\n <tr>\n <td>230</td>\n <td>1.332800</td>\n </tr>\n <tr>\n <td>240</td>\n <td>1.322500</td>\n </tr>\n <tr>\n <td>250</td>\n <td>1.264800</td>\n </tr>\n <tr>\n <td>260</td>\n <td>1.301000</td>\n </tr>\n <tr>\n <td>270</td>\n <td>1.220200</td>\n </tr>\n <tr>\n <td>280</td>\n <td>1.286400</td>\n </tr>\n <tr>\n <td>290</td>\n <td>1.280700</td>\n </tr>\n <tr>\n <td>300</td>\n <td>1.277800</td>\n </tr>\n <tr>\n <td>310</td>\n <td>1.227000</td>\n </tr>\n <tr>\n <td>320</td>\n <td>1.202400</td>\n </tr>\n <tr>\n <td>330</td>\n <td>1.153200</td>\n </tr>\n <tr>\n <td>340</td>\n <td>1.176700</td>\n </tr>\n <tr>\n <td>350</td>\n <td>1.202600</td>\n </tr>\n <tr>\n <td>360</td>\n <td>1.359300</td>\n </tr>\n <tr>\n <td>370</td>\n <td>1.284900</td>\n </tr>\n <tr>\n <td>380</td>\n <td>1.165900</td>\n </tr>\n <tr>\n <td>390</td>\n <td>1.155800</td>\n </tr>\n <tr>\n <td>400</td>\n <td>1.231300</td>\n </tr>\n <tr>\n <td>410</td>\n <td>1.242400</td>\n </tr>\n <tr>\n <td>420</td>\n <td>1.228000</td>\n </tr>\n <tr>\n <td>430</td>\n <td>1.051900</td>\n </tr>\n <tr>\n <td>440</td>\n <td>1.152700</td>\n </tr>\n <tr>\n <td>450</td>\n <td>1.146100</td>\n </tr>\n <tr>\n <td>460</td>\n <td>1.158100</td>\n </tr>\n <tr>\n <td>470</td>\n <td>1.027100</td>\n </tr>\n <tr>\n <td>480</td>\n <td>1.057100</td>\n </tr>\n <tr>\n <td>490</td>\n <td>1.114100</td>\n </tr>\n <tr>\n <td>500</td>\n <td>1.088000</td>\n </tr>\n <tr>\n <td>510</td>\n <td>1.227400</td>\n </tr>\n <tr>\n <td>520</td>\n <td>1.112600</td>\n </tr>\n <tr>\n <td>530</td>\n <td>1.058800</td>\n </tr>\n <tr>\n <td>540</td>\n <td>0.976700</td>\n </tr>\n <tr>\n <td>550</td>\n <td>1.076600</td>\n </tr>\n <tr>\n <td>560</td>\n <td>0.942000</td>\n </tr>\n <tr>\n <td>570</td>\n <td>1.069000</td>\n </tr>\n <tr>\n <td>580</td>\n <td>1.074200</td>\n </tr>\n <tr>\n <td>590</td>\n <td>1.122000</td>\n </tr>\n <tr>\n <td>600</td>\n <td>0.994400</td>\n </tr>\n <tr>\n <td>610</td>\n <td>1.044500</td>\n </tr>\n <tr>\n <td>620</td>\n <td>0.988600</td>\n </tr>\n <tr>\n <td>630</td>\n <td>1.053100</td>\n </tr>\n <tr>\n <td>640</td>\n <td>0.990800</td>\n </tr>\n <tr>\n <td>650</td>\n <td>1.107400</td>\n </tr>\n <tr>\n <td>660</td>\n <td>0.996600</td>\n </tr>\n <tr>\n <td>670</td>\n <td>1.015200</td>\n </tr>\n <tr>\n <td>680</td>\n <td>0.838600</td>\n </tr>\n <tr>\n <td>690</td>\n <td>0.942500</td>\n </tr>\n <tr>\n <td>700</td>\n <td>1.026300</td>\n </tr>\n <tr>\n <td>710</td>\n <td>0.984400</td>\n </tr>\n <tr>\n <td>720</td>\n <td>1.034100</td>\n </tr>\n <tr>\n <td>730</td>\n <td>0.968400</td>\n </tr>\n <tr>\n <td>740</td>\n <td>0.935800</td>\n </tr>\n <tr>\n <td>750</td>\n <td>0.950800</td>\n </tr>\n </tbody>\n</table><p>"},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='19' max='19' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [19/19 00:00]\n </div>\n "},"metadata":{}},{"name":"stdout","text":"{'eval_loss': 1.4216604232788086, 'eval_accuracy': 0.4, 'eval_runtime': 0.3594, 'eval_samples_per_second': 208.653, 'eval_steps_per_second': 52.859, 'epoch': 10.0}\n","output_type":"stream"}],"execution_count":7},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}
maths2025mains.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.11.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":12786410,"sourceType":"datasetVersion","datasetId":8083912}],"dockerImageVersionId":31090,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# Install required libraries\n!pip install datasets torch sympy antlr4-python3-runtime -q\n!pip install transformers==4.40.0 -q || pip install transformers==4.6.1 -q\n\n# Set environment variable to suppress tokenizers warning\nimport os\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\n# Import libraries\nfrom datasets import load_dataset, concatenate_datasets\nfrom transformers import AutoTokenizer\nimport random\nimport transformers\nimport torch\nimport sympy as sp\nimport numpy as np\n\n# Verify transformers version\nprint(f\"Transformers version: {transformers.__version__}\")\n\n# Set device to single GPU or CPU\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Using device: {device}\")\n\n# Load both January and April datasets\ntry:\n jan_data = load_dataset(\"PhysicsWallahAI/JEE-Main-2025-Math\", \"jan\", split=\"test\")\n apr_data = load_dataset(\"PhysicsWallahAI/JEE-Main-2025-Math\", \"apr\", split=\"test\")\n dataset = concatenate_datasets([jan_data, apr_data])\n print(f\"Dataset columns after loading: {dataset.column_names}\")\nexcept Exception as e:\n print(f\"Error loading dataset: {e}\")\n raise\n\n# Simplify LaTeX expressions with sympy, handle complex cases\ndef simplify_latex(text):\n try:\n # Skip sets, lists, or non-math LaTeX\n if any(c in text for c in ['{', '}', ',', r'\\text', r'\\begin', r'\\end']):\n return text\n # Replace common LaTeX with sympy-compatible format\n text = (text.replace(r'\\frac', '/')\n .replace(r'\\sqrt', 'sqrt')\n .replace(r'\\^', '**')\n .replace(r'\\left', '')\n .replace(r'\\right', '')\n .replace(r'\\cdot', '*'))\n expr = sp.sympify(text, evaluate=False)\n simplified = sp.simplify(expr)\n return str(simplified)\n except:\n return text\n\n# Bin NAT answers into 16 classes (4–19, reserving 0–3 for MCQs)\ndef bin_nat_answer(answer):\n try:\n value = float(answer)\n # Bins: [-10, 0) → 4, [0, 1) → 5, [1, 2) → 6, ..., [14, 15) → 19\n if value < -10:\n return 4\n bin_idx = min(int(np.floor(value)) + 5, 19) # Caps at 19\n return bin_idx\n except:\n return 4 # Default bin for non-numeric answers\n\n# Preprocess MCQs and NATs\ndef preprocess_data(example):\n question = simplify_latex(example[\"question\"])\n if example[\"question_type\"] == 1: # MCQs\n options = [simplify_latex(opt) for opt in example[\"options\"]]\n correct_idx = example[\"correct_options\"][0]\n indices = list(range(len(options)))\n random.shuffle(indices)\n shuffled_options = [options[i] for i in indices]\n new_correct_idx = indices.index(correct_idx)\n options_text = \" Options: \" + \", \".join(shuffled_options)\n input_text = f\"MCQ: {question}{options_text}\"\n return {\n \"input_text\": input_text,\n \"label\": new_correct_idx,\n \"question_type\": \"mcq\"\n }\n elif example[\"question_type\"] == 2: # NATs\n answer = str(example[\"answer\"])\n bin_idx = bin_nat_answer(answer)\n input_text = f\"NAT: {question} Answer:\"\n return {\n \"input_text\": input_text,\n \"label\": bin_idx,\n \"question_type\": \"nat\"\n }\n return None\n\ntry:\n combined_dataset = dataset.filter(lambda x: x[\"question_type\"] in [1, 2]).map(preprocess_data)\n print(f\"Dataset columns after preprocessing: {combined_dataset.column_names}\")\n # Log a few samples for debugging\n print(\"Sample inputs:\")\n for i in range(min(3, len(combined_dataset))):\n print(f\"Sample {i+1}: {combined_dataset[i]['input_text'][:100]}... Label: {combined_dataset[i]['label']}\")\n combined_dataset = combined_dataset.remove_columns([\"question\", \"answer\", \"options\", \"correct_options\", \"additional_data\", \"metadata\", \"question_type\"])\n print(f\"Dataset columns after removing columns: {combined_dataset.column_names}\")\n combined_dataset = combined_dataset.train_test_split(test_size=0.2, seed=42)\nexcept Exception as e:\n print(f\"Error during preprocessing: {e}\")\n raise\n\n# Tokenize with DistilBERT\ntry:\n tokenizer = AutoTokenizer.from_pretrained(\"distilbert-base-uncased\")\n def tokenize_function(example):\n return tokenizer(example[\"input_text\"], padding=\"max_length\", truncation=True, max_length=128) # Reduced for speed\n tokenized_dataset = combined_dataset.map(tokenize_function, batched=True)\n print(f\"Dataset columns after tokenization: {tokenized_dataset['train'].column_names}\")\n tokenized_dataset = tokenized_dataset.rename_column(\"label\", \"labels\")\n tokenized_dataset = tokenized_dataset.remove_columns([\"input_text\"])\n print(f\"Dataset columns after final removal: {tokenized_dataset['train'].column_names}\")\n tokenized_dataset.set_format(\"torch\")\nexcept Exception as e:\n print(f\"Error during tokenization: {e}\")\n raise","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-08-18T13:06:08.978839Z","iopub.execute_input":"2025-08-18T13:06:08.979540Z","iopub.status.idle":"2025-08-18T13:06:19.431112Z","shell.execute_reply.started":"2025-08-18T13:06:08.979516Z","shell.execute_reply":"2025-08-18T13:06:19.430388Z"}},"outputs":[{"name":"stdout","text":"Transformers version: 4.52.4\nUsing device: cuda:0\nDataset columns after loading: ['question', 'answer', 'options', 'correct_options', 'question_type', 'additional_data', 'metadata']\nDataset columns after preprocessing: ['question', 'answer', 'options', 'correct_options', 'question_type', 'additional_data', 'metadata', 'input_text', 'label']\nSample inputs:\nSample 1: MCQ: If the first term of an A.P. is 3 and the sum of its first four terms is equal to one-fifth of ... Label: 3\nSample 2: MCQ: One die has two faces marked 1, two faces marked 2, one face marked 3 and one face marked 4. An... Label: 3\nSample 3: MCQ: Let the position vectors of the vertices \\(A\\), \\(B\\) and \\(C\\) of a tetrahedron \\(ABCD\\) be \\(... Label: 1\nDataset columns after removing columns: ['input_text', 'label']\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/304 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"44cbd4f891064fafb932f70650bb954f"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/76 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"95a2c2811de84230a181323347658c5c"}},"metadata":{}},{"name":"stdout","text":"Dataset columns after tokenization: ['input_text', 'label', 'input_ids', 'attention_mask']\nDataset columns after final removal: ['labels', 'input_ids', 'attention_mask']\n","output_type":"stream"}],"execution_count":13},{"cell_type":"code","source":"from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments\nfrom sklearn.metrics import accuracy_score\nimport torch\nimport numpy as np\n\n# Define custom metrics for accuracy (separate MCQ and NAT)\ndef compute_metrics(eval_pred):\n logits, labels = eval_pred\n predictions = np.argmax(logits, axis=-1)\n mcq_mask = labels < 4\n nat_mask = labels >= 4\n mcq_acc = accuracy_score(labels[mcq_mask], predictions[mcq_mask]) if mcq_mask.sum() > 0 else 0\n nat_acc = accuracy_score(labels[nat_mask], predictions[nat_mask]) if nat_mask.sum() > 0 else 0\n total_acc = accuracy_score(labels, predictions)\n return {\n \"accuracy\": total_acc,\n \"mcq_accuracy\": mcq_acc,\n \"nat_accuracy\": nat_acc\n }\n\n# Initialize DistilBERT model\ntry:\n model = AutoModelForSequenceClassification.from_pretrained(\"distilbert-base-uncased\", num_labels=20) # 0–3 for MCQs, 4–19 for NATs\n model.to(device)\nexcept Exception as e:\n print(f\"Error loading model: {e}\")\n raise\n\n# Training arguments for older transformers versions\ntraining_args = TrainingArguments(\n output_dir=\"/kaggle/working/results\",\n per_device_train_batch_size=4,\n per_device_eval_batch_size=4,\n num_train_epochs=10, # Reduced for speed\n learning_rate=5e-5, # Increased slightly for faster convergence\n weight_decay=0.1,\n logging_dir=\"/kaggle/working/logs\",\n logging_steps=10,\n save_steps=100,\n save_total_limit=2,\n eval_steps=100,\n gradient_accumulation_steps=2,\n no_cuda=False,\n report_to=\"none\"\n)\n\n# Initialize trainer\ntry:\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_dataset[\"train\"],\n eval_dataset=tokenized_dataset[\"test\"],\n compute_metrics=compute_metrics\n )\nexcept Exception as e:\n print(f\"Error initializing trainer: {e}\")\n raise\n\n# Train with manual early stopping\nbest_acc = 0\npatience = 3\nno_improve = 0\nfor epoch in range(int(training_args.num_train_epochs)):\n print(f\"\\nEpoch {epoch+1}/{training_args.num_train_epochs}\")\n trainer.train()\n metrics = trainer.evaluate()\n print(\"Evaluation Metrics:\", metrics)\n if metrics[\"eval_accuracy\"] > best_acc:\n best_acc = metrics[\"eval_accuracy\"]\n no_improve = 0\n model.save_pretrained(\"/kaggle/working/combined_model_best\")\n tokenizer.save_pretrained(\"/kaggle/working/combined_tokenizer_best\")\n else:\n no_improve += 1\n if no_improve >= patience:\n print(\"Early stopping triggered.\")\n break\n\n# Load best model\nmodel = AutoModelForSequenceClassification.from_pretrained(\"/kaggle/working/combined_model_best\").to(device)\n\n# Predict on a few validation samples\nmodel.eval()\nsample_inputs = tokenized_dataset[\"test\"].select(range(5))\ninputs = {key: sample_inputs[key].clone().detach().to(device) for key in [\"input_ids\", \"attention_mask\"]}\nlabels = sample_inputs[\"labels\"].clone().detach().to(device)\nwith torch.no_grad():\n outputs = model(**inputs)\n predictions = torch.argmax(outputs.logits, dim=-1)\n accuracy = accuracy_score(labels.cpu().numpy(), predictions.cpu().numpy())\nprint(f\"Accuracy on 5 validation samples: {accuracy:.3f}\")\nfor i, (pred, label) in enumerate(zip(predictions.cpu().numpy(), labels.cpu().numpy())):\n print(f\"Sample {i+1}: Predicted label {pred}, True label {label}, {'Correct' if pred == label else 'Incorrect'}\")\n\n# Save final model and tokenizer\nmodel.save_pretrained(\"/kaggle/working/combined_model\")\ntokenizer.save_pretrained(\"/kaggle/working/combined_tokenizer\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-08-18T13:06:37.680655Z","iopub.execute_input":"2025-08-18T13:06:37.681577Z","iopub.status.idle":"2025-08-18T13:09:49.928000Z","shell.execute_reply.started":"2025-08-18T13:06:37.681539Z","shell.execute_reply":"2025-08-18T13:09:49.927396Z"}},"outputs":[{"name":"stderr","text":"Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['classifier.bias', 'classifier.weight', 'pre_classifier.bias', 'pre_classifier.weight']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n","output_type":"stream"},{"name":"stdout","text":"\nEpoch 1/10\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='380' max='380' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [380/380 00:36, Epoch 10/10]\n </div>\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: left;\">\n <th>Step</th>\n <th>Training Loss</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>10</td>\n <td>2.499800</td>\n </tr>\n <tr>\n <td>20</td>\n <td>1.993700</td>\n </tr>\n <tr>\n <td>30</td>\n <td>1.780600</td>\n </tr>\n <tr>\n <td>40</td>\n <td>1.659100</td>\n </tr>\n <tr>\n <td>50</td>\n <td>1.711700</td>\n </tr>\n <tr>\n <td>60</td>\n <td>1.541800</td>\n </tr>\n <tr>\n <td>70</td>\n <td>1.677700</td>\n </tr>\n <tr>\n <td>80</td>\n <td>1.534300</td>\n </tr>\n <tr>\n <td>90</td>\n <td>1.462000</td>\n </tr>\n <tr>\n <td>100</td>\n <td>1.519900</td>\n </tr>\n <tr>\n <td>110</td>\n <td>1.498400</td>\n </tr>\n <tr>\n <td>120</td>\n <td>1.587300</td>\n </tr>\n <tr>\n <td>130</td>\n <td>1.513200</td>\n </tr>\n <tr>\n <td>140</td>\n <td>1.476100</td>\n </tr>\n <tr>\n <td>150</td>\n <td>1.543800</td>\n </tr>\n <tr>\n <td>160</td>\n <td>1.493800</td>\n </tr>\n <tr>\n <td>170</td>\n <td>1.409000</td>\n </tr>\n <tr>\n <td>180</td>\n <td>1.508200</td>\n </tr>\n <tr>\n <td>190</td>\n <td>1.399200</td>\n </tr>\n <tr>\n <td>200</td>\n <td>1.360000</td>\n </tr>\n <tr>\n <td>210</td>\n <td>1.364800</td>\n </tr>\n <tr>\n <td>220</td>\n <td>1.211800</td>\n </tr>\n <tr>\n <td>230</td>\n <td>1.335600</td>\n </tr>\n <tr>\n <td>240</td>\n <td>1.160400</td>\n </tr>\n <tr>\n <td>250</td>\n <td>1.131000</td>\n </tr>\n <tr>\n <td>260</td>\n <td>1.094000</td>\n </tr>\n <tr>\n <td>270</td>\n <td>0.975400</td>\n </tr>\n <tr>\n <td>280</td>\n <td>0.829000</td>\n </tr>\n <tr>\n <td>290</td>\n <td>0.857300</td>\n </tr>\n <tr>\n <td>300</td>\n <td>0.783600</td>\n </tr>\n <tr>\n <td>310</td>\n <td>0.681500</td>\n </tr>\n <tr>\n <td>320</td>\n <td>0.623900</td>\n </tr>\n <tr>\n <td>330</td>\n <td>0.600800</td>\n </tr>\n <tr>\n <td>340</td>\n <td>0.680100</td>\n </tr>\n <tr>\n <td>350</td>\n <td>0.505100</td>\n </tr>\n <tr>\n <td>360</td>\n <td>0.477400</td>\n </tr>\n <tr>\n <td>370</td>\n <td>0.550700</td>\n </tr>\n <tr>\n <td>380</td>\n <td>0.460000</td>\n </tr>\n </tbody>\n</table><p>"},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='95' max='19' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [19/19 02:33]\n </div>\n "},"metadata":{}},{"name":"stdout","text":"Evaluation Metrics: {'eval_loss': 1.9462043046951294, 'eval_accuracy': 0.25, 'eval_mcq_accuracy': 0.25, 'eval_nat_accuracy': 0, 'eval_runtime': 0.1942, 'eval_samples_per_second': 391.434, 'eval_steps_per_second': 97.859, 'epoch': 10.0}\n\nEpoch 2/10\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='380' max='380' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [380/380 00:36, Epoch 10/10]\n </div>\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: left;\">\n <th>Step</th>\n <th>Training Loss</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>10</td>\n <td>0.440500</td>\n </tr>\n <tr>\n <td>20</td>\n <td>0.601800</td>\n </tr>\n <tr>\n <td>30</td>\n <td>0.695700</td>\n </tr>\n <tr>\n <td>40</td>\n <td>0.592100</td>\n </tr>\n <tr>\n <td>50</td>\n <td>0.492900</td>\n </tr>\n <tr>\n <td>60</td>\n <td>0.469000</td>\n </tr>\n <tr>\n <td>70</td>\n <td>0.637500</td>\n </tr>\n <tr>\n <td>80</td>\n <td>0.256700</td>\n </tr>\n <tr>\n <td>90</td>\n <td>0.191800</td>\n </tr>\n <tr>\n <td>100</td>\n <td>0.293500</td>\n </tr>\n <tr>\n <td>110</td>\n <td>0.366700</td>\n </tr>\n <tr>\n <td>120</td>\n <td>0.108400</td>\n </tr>\n <tr>\n <td>130</td>\n <td>0.113700</td>\n </tr>\n <tr>\n <td>140</td>\n <td>0.101600</td>\n </tr>\n <tr>\n <td>150</td>\n <td>0.159000</td>\n </tr>\n <tr>\n <td>160</td>\n <td>0.202800</td>\n </tr>\n <tr>\n <td>170</td>\n <td>0.015100</td>\n </tr>\n <tr>\n <td>180</td>\n <td>0.158800</td>\n </tr>\n <tr>\n <td>190</td>\n <td>0.086000</td>\n </tr>\n <tr>\n <td>200</td>\n <td>0.119600</td>\n </tr>\n <tr>\n <td>210</td>\n <td>0.017400</td>\n </tr>\n <tr>\n <td>220</td>\n <td>0.011300</td>\n </tr>\n <tr>\n <td>230</td>\n <td>0.062200</td>\n </tr>\n <tr>\n <td>240</td>\n <td>0.009300</td>\n </tr>\n <tr>\n <td>250</td>\n <td>0.061100</td>\n </tr>\n <tr>\n <td>260</td>\n <td>0.011600</td>\n </tr>\n <tr>\n <td>270</td>\n <td>0.005100</td>\n </tr>\n <tr>\n <td>280</td>\n <td>0.005100</td>\n </tr>\n <tr>\n <td>290</td>\n <td>0.021600</td>\n </tr>\n <tr>\n <td>300</td>\n <td>0.073700</td>\n </tr>\n <tr>\n <td>310</td>\n <td>0.058200</td>\n </tr>\n <tr>\n <td>320</td>\n <td>0.004600</td>\n </tr>\n <tr>\n <td>330</td>\n <td>0.004000</td>\n </tr>\n <tr>\n <td>340</td>\n <td>0.054100</td>\n </tr>\n <tr>\n <td>350</td>\n <td>0.004100</td>\n </tr>\n <tr>\n <td>360</td>\n <td>0.005300</td>\n </tr>\n <tr>\n <td>370</td>\n <td>0.028700</td>\n </tr>\n <tr>\n <td>380</td>\n <td>0.012800</td>\n </tr>\n </tbody>\n</table><p>"},"metadata":{}},{"name":"stdout","text":"Evaluation Metrics: {'eval_loss': 4.321730613708496, 'eval_accuracy': 0.27631578947368424, 'eval_mcq_accuracy': 0.27631578947368424, 'eval_nat_accuracy': 0, 'eval_runtime': 0.2063, 'eval_samples_per_second': 368.43, 'eval_steps_per_second': 92.107, 'epoch': 10.0}\n\nEpoch 3/10\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='380' max='380' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [380/380 00:37, Epoch 10/10]\n </div>\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: left;\">\n <th>Step</th>\n <th>Training Loss</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>10</td>\n <td>0.003400</td>\n </tr>\n <tr>\n <td>20</td>\n <td>0.003800</td>\n </tr>\n <tr>\n <td>30</td>\n <td>0.068000</td>\n </tr>\n <tr>\n <td>40</td>\n <td>0.004500</td>\n </tr>\n <tr>\n <td>50</td>\n <td>0.003600</td>\n </tr>\n <tr>\n <td>60</td>\n <td>0.068900</td>\n </tr>\n <tr>\n <td>70</td>\n <td>0.011600</td>\n </tr>\n <tr>\n <td>80</td>\n <td>0.039100</td>\n </tr>\n <tr>\n <td>90</td>\n <td>0.051600</td>\n </tr>\n <tr>\n <td>100</td>\n <td>0.010200</td>\n </tr>\n <tr>\n <td>110</td>\n <td>0.035700</td>\n </tr>\n <tr>\n <td>120</td>\n <td>0.039900</td>\n </tr>\n <tr>\n <td>130</td>\n <td>0.021200</td>\n </tr>\n <tr>\n <td>140</td>\n <td>0.060600</td>\n </tr>\n <tr>\n <td>150</td>\n <td>0.002000</td>\n </tr>\n <tr>\n <td>160</td>\n <td>0.028300</td>\n </tr>\n <tr>\n <td>170</td>\n <td>0.001100</td>\n </tr>\n <tr>\n <td>180</td>\n <td>0.002600</td>\n </tr>\n <tr>\n <td>190</td>\n <td>0.030000</td>\n </tr>\n <tr>\n <td>200</td>\n <td>0.001800</td>\n </tr>\n <tr>\n <td>210</td>\n <td>0.001100</td>\n </tr>\n <tr>\n <td>220</td>\n <td>0.000800</td>\n </tr>\n <tr>\n <td>230</td>\n <td>0.001800</td>\n </tr>\n <tr>\n <td>240</td>\n <td>0.000800</td>\n </tr>\n <tr>\n <td>250</td>\n <td>0.018500</td>\n </tr>\n <tr>\n <td>260</td>\n <td>0.001000</td>\n </tr>\n <tr>\n <td>270</td>\n <td>0.021100</td>\n </tr>\n <tr>\n <td>280</td>\n <td>0.000900</td>\n </tr>\n <tr>\n <td>290</td>\n <td>0.000900</td>\n </tr>\n <tr>\n <td>300</td>\n <td>0.001100</td>\n </tr>\n <tr>\n <td>310</td>\n <td>0.003500</td>\n </tr>\n <tr>\n <td>320</td>\n <td>0.000800</td>\n </tr>\n <tr>\n <td>330</td>\n <td>0.000700</td>\n </tr>\n <tr>\n <td>340</td>\n <td>0.000800</td>\n </tr>\n <tr>\n <td>350</td>\n <td>0.000700</td>\n </tr>\n <tr>\n <td>360</td>\n <td>0.000600</td>\n </tr>\n <tr>\n <td>370</td>\n <td>0.000900</td>\n </tr>\n <tr>\n <td>380</td>\n <td>0.000700</td>\n </tr>\n </tbody>\n</table><p>"},"metadata":{}},{"name":"stdout","text":"Evaluation Metrics: {'eval_loss': 6.086737155914307, 'eval_accuracy': 0.21052631578947367, 'eval_mcq_accuracy': 0.21052631578947367, 'eval_nat_accuracy': 0, 'eval_runtime': 0.2104, 'eval_samples_per_second': 361.24, 'eval_steps_per_second': 90.31, 'epoch': 10.0}\n\nEpoch 4/10\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='380' max='380' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [380/380 00:37, Epoch 10/10]\n </div>\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: left;\">\n <th>Step</th>\n <th>Training Loss</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>10</td>\n <td>0.000800</td>\n </tr>\n <tr>\n <td>20</td>\n <td>0.000700</td>\n </tr>\n <tr>\n <td>30</td>\n <td>0.000600</td>\n </tr>\n <tr>\n <td>40</td>\n <td>0.021600</td>\n </tr>\n <tr>\n <td>50</td>\n <td>0.002100</td>\n </tr>\n <tr>\n <td>60</td>\n <td>0.000600</td>\n </tr>\n <tr>\n <td>70</td>\n <td>0.001100</td>\n </tr>\n <tr>\n <td>80</td>\n <td>0.000800</td>\n </tr>\n <tr>\n <td>90</td>\n <td>0.000400</td>\n </tr>\n <tr>\n <td>100</td>\n <td>0.000600</td>\n </tr>\n <tr>\n <td>110</td>\n <td>0.000700</td>\n </tr>\n <tr>\n <td>120</td>\n <td>0.000600</td>\n </tr>\n <tr>\n <td>130</td>\n <td>0.026800</td>\n </tr>\n <tr>\n <td>140</td>\n <td>0.000400</td>\n </tr>\n <tr>\n <td>150</td>\n <td>0.000700</td>\n </tr>\n <tr>\n <td>160</td>\n <td>0.000400</td>\n </tr>\n <tr>\n <td>170</td>\n <td>0.000400</td>\n </tr>\n <tr>\n <td>180</td>\n <td>0.005300</td>\n </tr>\n <tr>\n <td>190</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>200</td>\n <td>0.000400</td>\n </tr>\n <tr>\n <td>210</td>\n <td>0.000400</td>\n </tr>\n <tr>\n <td>220</td>\n <td>0.080500</td>\n </tr>\n <tr>\n <td>230</td>\n <td>0.000400</td>\n </tr>\n <tr>\n <td>240</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>250</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>260</td>\n <td>0.000500</td>\n </tr>\n <tr>\n <td>270</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>280</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>290</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>300</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>310</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>320</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>330</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>340</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>350</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>360</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>370</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>380</td>\n <td>0.000300</td>\n </tr>\n </tbody>\n</table><p>"},"metadata":{}},{"name":"stdout","text":"Evaluation Metrics: {'eval_loss': 6.422303676605225, 'eval_accuracy': 0.2236842105263158, 'eval_mcq_accuracy': 0.2236842105263158, 'eval_nat_accuracy': 0, 'eval_runtime': 0.2058, 'eval_samples_per_second': 369.215, 'eval_steps_per_second': 92.304, 'epoch': 10.0}\n\nEpoch 5/10\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"<IPython.core.display.HTML object>","text/html":"\n <div>\n \n <progress value='380' max='380' style='width:300px; height:20px; vertical-align: middle;'></progress>\n [380/380 00:37, Epoch 10/10]\n </div>\n <table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: left;\">\n <th>Step</th>\n <th>Training Loss</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>10</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>20</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>30</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>40</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>50</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>60</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>70</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>80</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>90</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>100</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>110</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>120</td>\n <td>0.000300</td>\n </tr>\n <tr>\n <td>130</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>140</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>150</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>160</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>170</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>180</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>190</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>200</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>210</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>220</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>230</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>240</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>250</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>260</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>270</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>280</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>290</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>300</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>310</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>320</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>330</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>340</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>350</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>360</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>370</td>\n <td>0.000200</td>\n </tr>\n <tr>\n <td>380</td>\n <td>0.000100</td>\n </tr>\n </tbody>\n</table><p>"},"metadata":{}},{"name":"stdout","text":"Evaluation Metrics: {'eval_loss': 6.810024261474609, 'eval_accuracy': 0.2236842105263158, 'eval_mcq_accuracy': 0.2236842105263158, 'eval_nat_accuracy': 0, 'eval_runtime': 0.2097, 'eval_samples_per_second': 362.436, 'eval_steps_per_second': 90.609, 'epoch': 10.0}\nEarly stopping triggered.\nAccuracy on 5 validation samples: 0.400\nSample 1: Predicted label 1, True label 3, Incorrect\nSample 2: Predicted label 1, True label 2, Incorrect\nSample 3: Predicted label 0, True label 1, Incorrect\nSample 4: Predicted label 1, True label 1, Correct\nSample 5: Predicted label 3, True label 3, Correct\n","output_type":"stream"},{"execution_count":14,"output_type":"execute_result","data":{"text/plain":"('/kaggle/working/combined_tokenizer/tokenizer_config.json',\n '/kaggle/working/combined_tokenizer/special_tokens_map.json',\n '/kaggle/working/combined_tokenizer/vocab.txt',\n '/kaggle/working/combined_tokenizer/added_tokens.json',\n '/kaggle/working/combined_tokenizer/tokenizer.json')"},"metadata":{}}],"execution_count":14},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:684c50b79bd957505d3daa8729f5cbe4c7e4ff9ad5b6f122efb72a5eedeeae3d
3
+ size 267838720
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "DistilBertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff