{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "a2e9b1825c9d48a8abb3535087c66b42", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Map: 0%| | 0/408 [00:00labels,\\nFormating the input_ids to pytorch tensors, etc.\\nThe trainer API will automatically do this for us by analyzing the model\\nsignature!\\n'" ] }, "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ "'''Note we did not apply padding in the preprocessing as we used Dynamic Padding\n", "by the DataCollatorWithPadding!'''\n", "\"\"\"Note that we do not do the final steps such as:\n", "Removing the unnecessary columns, Renaming the column label->labels,\n", "Formating the input_ids to pytorch tensors, etc.\n", "The trainer API will automatically do this for us by analyzing the model\n", "signature!\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-uncased and are newly initialized: ['classifier.weight', 'classifier.bias']\n", "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] } ], "source": [ "'''The last step is to define our model and\n", "prepare some training hyper parameters'''\n", "from transformers import AutoModelForSequenceClassification\n", "model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "from transformers import TrainingArguments\n", "import torch\n", "training_args = TrainingArguments('test-trainer')" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "from transformers import TrainingArguments\n", "\n", "# You can also assign all the hyper parameters in the arguments:\n", "training_args = TrainingArguments(\n", " 'test-trainer',\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=5,\n", " learning_rate=2e-5,\n", " weight_decay=0.01,)\n" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "\"It's then very easy to create a trainer and launch a training\"" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "'''It's then very easy to create a trainer and launch a training'''" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "90b1d1a6ce2f4288b6645258ddf83af6", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/1150 [00:001\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtransformers\u001b[39;00m \u001b[39mimport\u001b[39;00m Trainer\n\u001b[0;32m 3\u001b[0m trainer \u001b[39m=\u001b[39m Trainer(\n\u001b[0;32m 4\u001b[0m model, \n\u001b[0;32m 5\u001b[0m training_args,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 9\u001b[0m tokenizer\u001b[39m=\u001b[39mtokenizer,\n\u001b[0;32m 10\u001b[0m )\n\u001b[1;32m---> 12\u001b[0m trainer\u001b[39m.\u001b[39;49mtrain()\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python311\\site-packages\\transformers\\trainer.py:1591\u001b[0m, in \u001b[0;36mTrainer.train\u001b[1;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[0;32m 1589\u001b[0m hf_hub_utils\u001b[39m.\u001b[39menable_progress_bars()\n\u001b[0;32m 1590\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1591\u001b[0m \u001b[39mreturn\u001b[39;00m inner_training_loop(\n\u001b[0;32m 1592\u001b[0m args\u001b[39m=\u001b[39;49margs,\n\u001b[0;32m 1593\u001b[0m resume_from_checkpoint\u001b[39m=\u001b[39;49mresume_from_checkpoint,\n\u001b[0;32m 1594\u001b[0m trial\u001b[39m=\u001b[39;49mtrial,\n\u001b[0;32m 1595\u001b[0m ignore_keys_for_eval\u001b[39m=\u001b[39;49mignore_keys_for_eval,\n\u001b[0;32m 1596\u001b[0m )\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python311\\site-packages\\transformers\\trainer.py:1892\u001b[0m, in \u001b[0;36mTrainer._inner_training_loop\u001b[1;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[0;32m 1889\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcontrol \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcallback_handler\u001b[39m.\u001b[39mon_step_begin(args, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcontrol)\n\u001b[0;32m 1891\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39maccelerator\u001b[39m.\u001b[39maccumulate(model):\n\u001b[1;32m-> 1892\u001b[0m tr_loss_step \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtraining_step(model, inputs)\n\u001b[0;32m 1894\u001b[0m \u001b[39mif\u001b[39;00m (\n\u001b[0;32m 1895\u001b[0m args\u001b[39m.\u001b[39mlogging_nan_inf_filter\n\u001b[0;32m 1896\u001b[0m \u001b[39mand\u001b[39;00m \u001b[39mnot\u001b[39;00m is_torch_tpu_available()\n\u001b[0;32m 1897\u001b[0m \u001b[39mand\u001b[39;00m (torch\u001b[39m.\u001b[39misnan(tr_loss_step) \u001b[39mor\u001b[39;00m torch\u001b[39m.\u001b[39misinf(tr_loss_step))\n\u001b[0;32m 1898\u001b[0m ):\n\u001b[0;32m 1899\u001b[0m \u001b[39m# if loss is nan or inf simply add the average of previous logged losses\u001b[39;00m\n\u001b[0;32m 1900\u001b[0m tr_loss \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m tr_loss \u001b[39m/\u001b[39m (\u001b[39m1\u001b[39m \u001b[39m+\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mglobal_step \u001b[39m-\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_globalstep_last_logged)\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python311\\site-packages\\transformers\\trainer.py:2787\u001b[0m, in \u001b[0;36mTrainer.training_step\u001b[1;34m(self, model, inputs)\u001b[0m\n\u001b[0;32m 2785\u001b[0m scaled_loss\u001b[39m.\u001b[39mbackward()\n\u001b[0;32m 2786\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 2787\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49maccelerator\u001b[39m.\u001b[39;49mbackward(loss)\n\u001b[0;32m 2789\u001b[0m \u001b[39mreturn\u001b[39;00m loss\u001b[39m.\u001b[39mdetach() \u001b[39m/\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39margs\u001b[39m.\u001b[39mgradient_accumulation_steps\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python311\\site-packages\\accelerate\\accelerator.py:1989\u001b[0m, in \u001b[0;36mAccelerator.backward\u001b[1;34m(self, loss, **kwargs)\u001b[0m\n\u001b[0;32m 1987\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mscaler\u001b[39m.\u001b[39mscale(loss)\u001b[39m.\u001b[39mbackward(\u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m 1988\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1989\u001b[0m loss\u001b[39m.\u001b[39;49mbackward(\u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python311\\site-packages\\torch\\_tensor.py:492\u001b[0m, in \u001b[0;36mTensor.backward\u001b[1;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[0;32m 482\u001b[0m \u001b[39mif\u001b[39;00m has_torch_function_unary(\u001b[39mself\u001b[39m):\n\u001b[0;32m 483\u001b[0m \u001b[39mreturn\u001b[39;00m handle_torch_function(\n\u001b[0;32m 484\u001b[0m Tensor\u001b[39m.\u001b[39mbackward,\n\u001b[0;32m 485\u001b[0m (\u001b[39mself\u001b[39m,),\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 490\u001b[0m inputs\u001b[39m=\u001b[39minputs,\n\u001b[0;32m 491\u001b[0m )\n\u001b[1;32m--> 492\u001b[0m torch\u001b[39m.\u001b[39;49mautograd\u001b[39m.\u001b[39;49mbackward(\n\u001b[0;32m 493\u001b[0m \u001b[39mself\u001b[39;49m, gradient, retain_graph, create_graph, inputs\u001b[39m=\u001b[39;49minputs\n\u001b[0;32m 494\u001b[0m )\n", "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python311\\site-packages\\torch\\autograd\\__init__.py:251\u001b[0m, in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[0;32m 246\u001b[0m retain_graph \u001b[39m=\u001b[39m create_graph\n\u001b[0;32m 248\u001b[0m \u001b[39m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[0;32m 249\u001b[0m \u001b[39m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[0;32m 250\u001b[0m \u001b[39m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[1;32m--> 251\u001b[0m Variable\u001b[39m.\u001b[39;49m_execution_engine\u001b[39m.\u001b[39;49mrun_backward( \u001b[39m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[0;32m 252\u001b[0m tensors,\n\u001b[0;32m 253\u001b[0m grad_tensors_,\n\u001b[0;32m 254\u001b[0m retain_graph,\n\u001b[0;32m 255\u001b[0m create_graph,\n\u001b[0;32m 256\u001b[0m inputs,\n\u001b[0;32m 257\u001b[0m allow_unreachable\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m,\n\u001b[0;32m 258\u001b[0m accumulate_grad\u001b[39m=\u001b[39;49m\u001b[39mTrue\u001b[39;49;00m,\n\u001b[0;32m 259\u001b[0m )\n", "\u001b[1;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "from transformers import Trainer\n", "\n", "trainer = Trainer(\n", " model, \n", " training_args,\n", " train_dataset=tokenized_datasets['train'],\n", " eval_dataset=tokenized_datasets['validation'],\n", " data_collator=data_collator,\n", " tokenizer=tokenizer,\n", ")\n", "\n", "trainer.train()" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "\"The result will however be anticlimactic as you will only get a training loss\\nwhich doesnt really tell you how well your model is performing\\n\\nThis is because we did not define a metric to evaluate our model on!\\n\\nTo get the metrics we will first gathers the predictions on the whole evaluation set('validation set')\\nusing the trainer.predict method\\n\\nIt will return a namedtuple with the following attributes:\\npredictions, label_ids, metrics, num_samples\\n\\nwe are trying to get the metrics attribute which is empty here!\\n\"" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "'''The result will however be anticlimactic as you will only get a training loss\n", "which doesnt really tell you how well your model is performing\n", "\n", "This is because we did not define a metric to evaluate our model on!\n", "\n", "To get the metrics we will first gathers the predictions on the whole evaluation set('validation set')\n", "using the trainer.predict method\n", "\n", "It will return a namedtuple with the following attributes:\n", "predictions, label_ids, metrics, num_samples\n", "\n", "we are trying to get the metrics attribute which is empty here!\n", "'''" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "43c2affe978b4dfd904e22f2766afe46", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/7 [00:001\u001b[0m predictions \u001b[39m=\u001b[39m trainer\u001b[39m.\u001b[39mpredict(tokenized_datasets[\u001b[39m'\u001b[39m\u001b[39mvalidation\u001b[39m\u001b[39m'\u001b[39m])\n\u001b[1;32m----> 2\u001b[0m \u001b[39mprint\u001b[39m(predictions\u001b[39m.\u001b[39mpredictions\u001b[39m.\u001b[39mshape, predictions\u001b[39m.\u001b[39;49mpredictions\u001b[39m.\u001b[39;49mlabel_ids\u001b[39m.\u001b[39mshape)\n", "\u001b[1;31mAttributeError\u001b[0m: 'numpy.ndarray' object has no attribute 'label_ids'" ] } ], "source": [ "predictions = trainer.predict(tokenized_datasets['validation'])\n", "print(predictions.predictions.shape, predictions.predictions.label_ids.shape)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "'''The predictions are the logits of the model for all the sentences in the dataset\n", "a numpy array of shape 408 x 2'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "'''To match them with our labels we have to take the maximum\n", "logits for each prediction, to know which of the two classes was predicted\n", "We do this using the argmax function of numpy\n", "Then we can use the metrics from the datasets library\n", "it can be loaded as easily as the dataset with the load_metric function\n", "and it returns the evaluation metric for the dataset'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from datasets import load_metric\n", "\n", "metric = load_metric('glue','mrpc')\n", "preds = np.argmax(predictions.predictions, axis=-1)\n", "metric.compute(predictions=preds, references=predictions.label_ids)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "''''We can see our model did learn something!'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "'''To monitor the evaluation metrics during training\n", "we need to define a compute_metrics function\n", "as we have just did\n", "it takes a namedtuple with predictions and the labels and\n", "returns a dictionary with the metrics we want to keep track of\n", "'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "metric = load_metric('glue','mrpc')\n", "\n", "def compute_metric(eval_preds):\n", " logits, labels = eval_preds\n", " predictions = np.argmax(logits, axis=-1)\n", " return metric.compute(predictions=predictions, references=labels)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "'''By passing the epoch evaluation strategy to the trainer,\n", "we tell the trainer to evaluate at the end of every epoch'''" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "training_args = TrainingArguments(\n", " 'test-trainer', evaluation_strategy='epoch'\n", ")\n", "model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)\n", "\n", "trainer = Trainer(\n", " model,\n", " training_args,\n", " train_dataset=tokenized_datasets['train'],\n", " eval_dataset=tokenized_datasets['validation'],\n", " data_collator=data_collator,\n", " tokenizer=tokenizer,\n", " compute_metrics=compute_metrics\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "trainer.train()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.6" } }, "nbformat": 4, "nbformat_minor": 2 }