Upload 6 files
Browse files- .gitattributes +3 -0
- BETo-k-MMG.ipynb +390 -0
- BETo-k-MMG.keras +3 -0
- RoBERTa-k-MMG.ipynb +391 -0
- RoBERTa-k-MMG.keras +3 -0
- RoBERTa-k-MMGb.ipynb +393 -0
- RoBERTa-k-MMGb.keras +3 -0
.gitattributes
CHANGED
|
@@ -34,3 +34,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
BERTmULT-k-MMG.keras filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
BERTmULT-k-MMG.keras filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
BETo-k-MMG.keras filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
RoBERTa-k-MMG.keras filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
RoBERTa-k-MMGb.keras filter=lfs diff=lfs merge=lfs -text
|
BETo-k-MMG.ipynb
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "976841dc",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"## Preparación de un dataset\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"Descargamos el dataset y lo preparamos para el entrenamiento. En el caso de ejemplo, usaremos toxic-teenage-relationships, que son frases que describen si un comporamiento es tóxico o sano. Tienen una campo de texto y un campo de etiqueta, que vale 1 si es tóxico y 0 si no lo es. Acumula 267 ejemplos de entrenamiento y 66 para testear."
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "code",
|
| 15 |
+
"execution_count": 1,
|
| 16 |
+
"id": "b9a1f255",
|
| 17 |
+
"metadata": {},
|
| 18 |
+
"outputs": [
|
| 19 |
+
{
|
| 20 |
+
"data": {
|
| 21 |
+
"text/plain": [
|
| 22 |
+
"{'label': 1,\n",
|
| 23 |
+
" 'text': 'Mi amiga no puede subir videos a tik tok porque su pareja no le deja'}"
|
| 24 |
+
]
|
| 25 |
+
},
|
| 26 |
+
"execution_count": 1,
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"output_type": "execute_result"
|
| 29 |
+
}
|
| 30 |
+
],
|
| 31 |
+
"source": [
|
| 32 |
+
"from datasets import load_dataset\n",
|
| 33 |
+
"data_files = {\"train\": \"train.csv\", \"test\": \"test.csv\"}\n",
|
| 34 |
+
"dataset = load_dataset(\"toxic-teenage-relationships\", data_files=data_files, sep=\";\")\n",
|
| 35 |
+
"dataset['train'][100]"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"cell_type": "markdown",
|
| 40 |
+
"id": "6d0c740a",
|
| 41 |
+
"metadata": {},
|
| 42 |
+
"source": [
|
| 43 |
+
"Una vez cargado el dataset, se crea un tokenizador para procesar el texto e incluir una estrategia para el padding y el truncamiento. Par poder procesar el dataset en un solo paso, se utiliza el método dataset.map para preprocesar todo el dataset."
|
| 44 |
+
]
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"cell_type": "code",
|
| 48 |
+
"execution_count": 2,
|
| 49 |
+
"id": "01673605",
|
| 50 |
+
"metadata": {},
|
| 51 |
+
"outputs": [],
|
| 52 |
+
"source": [
|
| 53 |
+
"from transformers import AutoTokenizer\n",
|
| 54 |
+
"\n",
|
| 55 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"dccuchile/bert-base-spanish-wwm-cased\")\n",
|
| 56 |
+
"\n",
|
| 57 |
+
"\n",
|
| 58 |
+
"def tokenize_function(examples):\n",
|
| 59 |
+
" return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n",
|
| 60 |
+
"\n",
|
| 61 |
+
"\n",
|
| 62 |
+
"tokenized_datasets = dataset.map(tokenize_function, batched=True)"
|
| 63 |
+
]
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"cell_type": "markdown",
|
| 67 |
+
"id": "08aacc14",
|
| 68 |
+
"metadata": {},
|
| 69 |
+
"source": [
|
| 70 |
+
"Ahora vamos a convertir el dataset en formator de TensorFlow. Para eso usamos DefaultDataCollator, que junta los tensores en un batch para que el modelo se entrene en él. Debemos especificar el argumento return_tensors=\"tf\". \n"
|
| 71 |
+
]
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"cell_type": "code",
|
| 75 |
+
"execution_count": 3,
|
| 76 |
+
"id": "4a854ead",
|
| 77 |
+
"metadata": {},
|
| 78 |
+
"outputs": [],
|
| 79 |
+
"source": [
|
| 80 |
+
"from transformers import DefaultDataCollator\n",
|
| 81 |
+
"data_collator = DefaultDataCollator(return_tensors=\"tf\")"
|
| 82 |
+
]
|
| 83 |
+
},
|
| 84 |
+
{
|
| 85 |
+
"cell_type": "markdown",
|
| 86 |
+
"id": "06346bc5",
|
| 87 |
+
"metadata": {},
|
| 88 |
+
"source": [
|
| 89 |
+
"guardamos los dataset de train y de test\n"
|
| 90 |
+
]
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"cell_type": "code",
|
| 94 |
+
"execution_count": 4,
|
| 95 |
+
"id": "698a98ca",
|
| 96 |
+
"metadata": {},
|
| 97 |
+
"outputs": [],
|
| 98 |
+
"source": [
|
| 99 |
+
"train_dataset = tokenized_datasets[\"train\"]\n",
|
| 100 |
+
"eval_dataset = tokenized_datasets[\"test\"]"
|
| 101 |
+
]
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"cell_type": "markdown",
|
| 105 |
+
"id": "2c6d5142",
|
| 106 |
+
"metadata": {},
|
| 107 |
+
"source": [
|
| 108 |
+
"A hora vamos a convertir los datasets tokenizados en datasets de TensorFlow con el método .to_tf_dataset. Las entradas están en columns y la etiqueta en label_cols. El bach size es el número de ejemplos que se introducen en la red para que se entrene cada vez.\n"
|
| 109 |
+
]
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"cell_type": "code",
|
| 113 |
+
"execution_count": 5,
|
| 114 |
+
"id": "55fd25b8",
|
| 115 |
+
"metadata": {},
|
| 116 |
+
"outputs": [],
|
| 117 |
+
"source": [
|
| 118 |
+
"tf_train_dataset= train_dataset.to_tf_dataset(\n",
|
| 119 |
+
"columns=[\"attention_mask\", \"input_ids\", \"token_type_ids\"],\n",
|
| 120 |
+
"label_cols=\"labels\",\n",
|
| 121 |
+
"shuffle=True,\n",
|
| 122 |
+
"collate_fn=data_collator,\n",
|
| 123 |
+
"batch_size=8,\n",
|
| 124 |
+
")\n",
|
| 125 |
+
"tf_validation_dataset= eval_dataset.to_tf_dataset(\n",
|
| 126 |
+
"columns=[\"attention_mask\", \"input_ids\", \"token_type_ids\"],\n",
|
| 127 |
+
"label_cols=\"labels\",\n",
|
| 128 |
+
"shuffle=False,\n",
|
| 129 |
+
"collate_fn=data_collator,\n",
|
| 130 |
+
"batch_size=8,\n",
|
| 131 |
+
")"
|
| 132 |
+
]
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"cell_type": "markdown",
|
| 136 |
+
"id": "38a6c521",
|
| 137 |
+
"metadata": {},
|
| 138 |
+
"source": [
|
| 139 |
+
"## Fine-tuning usando Fit\n",
|
| 140 |
+
"\n",
|
| 141 |
+
"En primer lugar, vamos a cargar el modelo TensorFlow con el número esperado de labels. En este caso, tenemos 2 categorías.\n",
|
| 142 |
+
"\n"
|
| 143 |
+
]
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"cell_type": "code",
|
| 147 |
+
"execution_count": 6,
|
| 148 |
+
"id": "843f218d",
|
| 149 |
+
"metadata": {},
|
| 150 |
+
"outputs": [
|
| 151 |
+
{
|
| 152 |
+
"name": "stderr",
|
| 153 |
+
"output_type": "stream",
|
| 154 |
+
"text": [
|
| 155 |
+
"All model checkpoint layers were used when initializing TFBertForSequenceClassification.\n",
|
| 156 |
+
"\n",
|
| 157 |
+
"Some layers of TFBertForSequenceClassification were not initialized from the model checkpoint at dccuchile/bert-base-spanish-wwm-cased and are newly initialized: ['classifier', 'bert/pooler/dense/kernel:0', 'bert/pooler/dense/bias:0']\n",
|
| 158 |
+
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
| 159 |
+
]
|
| 160 |
+
}
|
| 161 |
+
],
|
| 162 |
+
"source": [
|
| 163 |
+
"import tensorflow as tf\n",
|
| 164 |
+
"from transformers import TFAutoModelForSequenceClassification\n",
|
| 165 |
+
"\n",
|
| 166 |
+
"#Hay dos categorías, así que ponemos 2 etiquetas (0 sano 1 tóxico)\n",
|
| 167 |
+
"model = TFAutoModelForSequenceClassification.from_pretrained(\"dccuchile/bert-base-spanish-wwm-cased\", num_labels=2)"
|
| 168 |
+
]
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"cell_type": "markdown",
|
| 172 |
+
"id": "a31780ca",
|
| 173 |
+
"metadata": {},
|
| 174 |
+
"source": [
|
| 175 |
+
"Ahora se aplica la función compile y fit como se haría con cualquier modelo Keras.\n",
|
| 176 |
+
"Compile configura la fase de entrenamiento del modelo antes comenzar a optimizar, por eso se elige el optimizador (en nuestro caso, Adam), la función de pérdida y las métricas que se usarań para evaluar el rendimiento que se han puesto en las celdas anteriores. \n",
|
| 177 |
+
"Fit entrena el modelo con los datos que se le han pasado, y al proporcionar un conjunto de validación se monitorea el rendimiento del modelo, por lo que se evalua mientras se entrena."
|
| 178 |
+
]
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"cell_type": "code",
|
| 182 |
+
"execution_count": 7,
|
| 183 |
+
"id": "3e01c5fb",
|
| 184 |
+
"metadata": {},
|
| 185 |
+
"outputs": [],
|
| 186 |
+
"source": [
|
| 187 |
+
"model.compile(\n",
|
| 188 |
+
"optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5),\n",
|
| 189 |
+
"loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
|
| 190 |
+
"metrics=tf.metrics.SparseCategoricalAccuracy(),\n",
|
| 191 |
+
")"
|
| 192 |
+
]
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"cell_type": "code",
|
| 196 |
+
"execution_count": 8,
|
| 197 |
+
"id": "4606c92e",
|
| 198 |
+
"metadata": {},
|
| 199 |
+
"outputs": [],
|
| 200 |
+
"source": [
|
| 201 |
+
"from keras.callbacks import EarlyStopping\n",
|
| 202 |
+
"#en este modelo he observado overfitting, por lo que voy a utilizar Early stopping para detener el entrenamiento en el momento\n",
|
| 203 |
+
"#que se observe un incremento en el error de validación. \n",
|
| 204 |
+
"#Deja pasar 2 epochs antes de interrumpir el entrenamiento, quedándose con el mejor valor\n",
|
| 205 |
+
"early_stop=EarlyStopping(monitor=\"val_loss\",patience=2,mode=\"auto\", restore_best_weights=True)"
|
| 206 |
+
]
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"cell_type": "markdown",
|
| 210 |
+
"id": "59322f38",
|
| 211 |
+
"metadata": {},
|
| 212 |
+
"source": [
|
| 213 |
+
"Los epoch es el número de veces que se van a pasar cada ejemplo de entrenamiento por la red."
|
| 214 |
+
]
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"cell_type": "code",
|
| 218 |
+
"execution_count": 9,
|
| 219 |
+
"id": "cf7268e7",
|
| 220 |
+
"metadata": {},
|
| 221 |
+
"outputs": [
|
| 222 |
+
{
|
| 223 |
+
"name": "stdout",
|
| 224 |
+
"output_type": "stream",
|
| 225 |
+
"text": [
|
| 226 |
+
"Epoch 1/10\n",
|
| 227 |
+
"34/34 [==============================] - 549s 15s/step - loss: 0.6582 - sparse_categorical_accuracy: 0.6194 - val_loss: 0.5470 - val_sparse_categorical_accuracy: 0.7424\n",
|
| 228 |
+
"Epoch 2/10\n",
|
| 229 |
+
"34/34 [==============================] - 518s 15s/step - loss: 0.3336 - sparse_categorical_accuracy: 0.8731 - val_loss: 0.4901 - val_sparse_categorical_accuracy: 0.7727\n",
|
| 230 |
+
"Epoch 3/10\n",
|
| 231 |
+
"34/34 [==============================] - 515s 15s/step - loss: 0.0858 - sparse_categorical_accuracy: 0.9739 - val_loss: 0.7612 - val_sparse_categorical_accuracy: 0.8030\n",
|
| 232 |
+
"Epoch 4/10\n",
|
| 233 |
+
"34/34 [==============================] - 524s 15s/step - loss: 0.0616 - sparse_categorical_accuracy: 0.9851 - val_loss: 0.3491 - val_sparse_categorical_accuracy: 0.8636\n",
|
| 234 |
+
"Epoch 5/10\n",
|
| 235 |
+
"34/34 [==============================] - 515s 15s/step - loss: 0.0837 - sparse_categorical_accuracy: 0.9739 - val_loss: 0.8465 - val_sparse_categorical_accuracy: 0.7727\n",
|
| 236 |
+
"Epoch 6/10\n",
|
| 237 |
+
"34/34 [==============================] - 515s 15s/step - loss: 0.0436 - sparse_categorical_accuracy: 0.9888 - val_loss: 0.7385 - val_sparse_categorical_accuracy: 0.8182\n"
|
| 238 |
+
]
|
| 239 |
+
},
|
| 240 |
+
{
|
| 241 |
+
"data": {
|
| 242 |
+
"text/plain": [
|
| 243 |
+
"<keras.src.callbacks.History at 0x7f90282d9a00>"
|
| 244 |
+
]
|
| 245 |
+
},
|
| 246 |
+
"execution_count": 9,
|
| 247 |
+
"metadata": {},
|
| 248 |
+
"output_type": "execute_result"
|
| 249 |
+
}
|
| 250 |
+
],
|
| 251 |
+
"source": [
|
| 252 |
+
"model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=10, callbacks=[early_stop])"
|
| 253 |
+
]
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"cell_type": "markdown",
|
| 257 |
+
"id": "4840d701",
|
| 258 |
+
"metadata": {},
|
| 259 |
+
"source": [
|
| 260 |
+
"sparse_categorical es el valor calculado en mi conjunto de datos de train, mientras que el que tiene el prefijo val es el que se calcula en el conjunto de datos de test. Si la métrica de test permanece igual o disminuye mientras aumenta el de train, el modelo está sobreajustando (overfitting)"
|
| 261 |
+
]
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"cell_type": "code",
|
| 265 |
+
"execution_count": 10,
|
| 266 |
+
"id": "fbeef13e",
|
| 267 |
+
"metadata": {},
|
| 268 |
+
"outputs": [
|
| 269 |
+
{
|
| 270 |
+
"name": "stdout",
|
| 271 |
+
"output_type": "stream",
|
| 272 |
+
"text": [
|
| 273 |
+
"Model: \"tf_bert_for_sequence_classification\"\n",
|
| 274 |
+
"_________________________________________________________________\n",
|
| 275 |
+
" Layer (type) Output Shape Param # \n",
|
| 276 |
+
"=================================================================\n",
|
| 277 |
+
" bert (TFBertMainLayer) multiple 109850880 \n",
|
| 278 |
+
" \n",
|
| 279 |
+
" dropout_37 (Dropout) multiple 0 \n",
|
| 280 |
+
" \n",
|
| 281 |
+
" classifier (Dense) multiple 1538 \n",
|
| 282 |
+
" \n",
|
| 283 |
+
"=================================================================\n",
|
| 284 |
+
"Total params: 109852418 (419.05 MB)\n",
|
| 285 |
+
"Trainable params: 109852418 (419.05 MB)\n",
|
| 286 |
+
"Non-trainable params: 0 (0.00 Byte)\n",
|
| 287 |
+
"_________________________________________________________________\n"
|
| 288 |
+
]
|
| 289 |
+
}
|
| 290 |
+
],
|
| 291 |
+
"source": [
|
| 292 |
+
"model.summary()"
|
| 293 |
+
]
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"cell_type": "markdown",
|
| 297 |
+
"id": "c4fa0fce",
|
| 298 |
+
"metadata": {},
|
| 299 |
+
"source": [
|
| 300 |
+
"Aunque aparecen durante el proceso de fit, imprimimos las cifras de loss y accuracy obtenidas del modelo.\n"
|
| 301 |
+
]
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"cell_type": "code",
|
| 305 |
+
"execution_count": 11,
|
| 306 |
+
"id": "4113ab57",
|
| 307 |
+
"metadata": {},
|
| 308 |
+
"outputs": [
|
| 309 |
+
{
|
| 310 |
+
"name": "stdout",
|
| 311 |
+
"output_type": "stream",
|
| 312 |
+
"text": [
|
| 313 |
+
"('loss', 0.34913551807403564)\n",
|
| 314 |
+
"('sparse_categorical_accuracy', 0.8636363744735718)\n"
|
| 315 |
+
]
|
| 316 |
+
}
|
| 317 |
+
],
|
| 318 |
+
"source": [
|
| 319 |
+
"\n",
|
| 320 |
+
"scores= model.evaluate(tf_validation_dataset, verbose=0)\n",
|
| 321 |
+
"print((model.metrics_names[0], scores[0]))\n",
|
| 322 |
+
"print((model.metrics_names[1], scores[1]))\n"
|
| 323 |
+
]
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"cell_type": "markdown",
|
| 327 |
+
"id": "9e61a040",
|
| 328 |
+
"metadata": {},
|
| 329 |
+
"source": [
|
| 330 |
+
"# Guardando el modelo"
|
| 331 |
+
]
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"cell_type": "markdown",
|
| 335 |
+
"id": "4af06209",
|
| 336 |
+
"metadata": {},
|
| 337 |
+
"source": [
|
| 338 |
+
"Para Guardarlo, utilizamos esl método save_model"
|
| 339 |
+
]
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"cell_type": "code",
|
| 343 |
+
"execution_count": 12,
|
| 344 |
+
"id": "b93638cb",
|
| 345 |
+
"metadata": {},
|
| 346 |
+
"outputs": [
|
| 347 |
+
{
|
| 348 |
+
"name": "stderr",
|
| 349 |
+
"output_type": "stream",
|
| 350 |
+
"text": [
|
| 351 |
+
"/home/mmartinez/anaconda3/envs/TFM/lib/python3.8/site-packages/transformers/generation/tf_utils.py:465: UserWarning: `seed_generator` is deprecated and will be removed in a future version.\n",
|
| 352 |
+
" warnings.warn(\"`seed_generator` is deprecated and will be removed in a future version.\", UserWarning)\n"
|
| 353 |
+
]
|
| 354 |
+
}
|
| 355 |
+
],
|
| 356 |
+
"source": [
|
| 357 |
+
"model.save(\"BETo-k-MMG.keras\")"
|
| 358 |
+
]
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"cell_type": "code",
|
| 362 |
+
"execution_count": null,
|
| 363 |
+
"id": "0e0dff1a",
|
| 364 |
+
"metadata": {},
|
| 365 |
+
"outputs": [],
|
| 366 |
+
"source": []
|
| 367 |
+
}
|
| 368 |
+
],
|
| 369 |
+
"metadata": {
|
| 370 |
+
"kernelspec": {
|
| 371 |
+
"display_name": "Python 3 (ipykernel)",
|
| 372 |
+
"language": "python",
|
| 373 |
+
"name": "python3"
|
| 374 |
+
},
|
| 375 |
+
"language_info": {
|
| 376 |
+
"codemirror_mode": {
|
| 377 |
+
"name": "ipython",
|
| 378 |
+
"version": 3
|
| 379 |
+
},
|
| 380 |
+
"file_extension": ".py",
|
| 381 |
+
"mimetype": "text/x-python",
|
| 382 |
+
"name": "python",
|
| 383 |
+
"nbconvert_exporter": "python",
|
| 384 |
+
"pygments_lexer": "ipython3",
|
| 385 |
+
"version": "3.8.13"
|
| 386 |
+
}
|
| 387 |
+
},
|
| 388 |
+
"nbformat": 4,
|
| 389 |
+
"nbformat_minor": 5
|
| 390 |
+
}
|
BETo-k-MMG.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd4258d17f8fbb66a86c116498b928ce20fa172e7702351fcffe038ed3f92971
|
| 3 |
+
size 1318850457
|
RoBERTa-k-MMG.ipynb
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "976841dc",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"## Preparación de un dataset\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"Descargamos el dataset y lo preparamos para el entrenamiento. En el caso de ejemplo, usaremos toxic-teenage-relationships, que son frases que describen si un comporamiento es tóxico o sano. Tienen una campo de texto y un campo de etiqueta, que vale 1 si es tóxico y 0 si no lo es. Acumula 267 ejemplos de entrenamiento y 66 para testear."
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "code",
|
| 15 |
+
"execution_count": 1,
|
| 16 |
+
"id": "b9a1f255",
|
| 17 |
+
"metadata": {},
|
| 18 |
+
"outputs": [
|
| 19 |
+
{
|
| 20 |
+
"data": {
|
| 21 |
+
"text/plain": [
|
| 22 |
+
"{'label': 1,\n",
|
| 23 |
+
" 'text': 'Mi amiga no puede subir videos a tik tok porque su pareja no le deja'}"
|
| 24 |
+
]
|
| 25 |
+
},
|
| 26 |
+
"execution_count": 1,
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"output_type": "execute_result"
|
| 29 |
+
}
|
| 30 |
+
],
|
| 31 |
+
"source": [
|
| 32 |
+
"from datasets import load_dataset\n",
|
| 33 |
+
"data_files = {\"train\": \"train.csv\", \"test\": \"test.csv\"}\n",
|
| 34 |
+
"dataset = load_dataset(\"toxic-teenage-relationships\", data_files=data_files, sep=\";\")\n",
|
| 35 |
+
"dataset['train'][100]"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"cell_type": "markdown",
|
| 40 |
+
"id": "6d0c740a",
|
| 41 |
+
"metadata": {},
|
| 42 |
+
"source": [
|
| 43 |
+
"Una vez cargado el dataset, se crea un tokenizador para procesar el texto e incluir una estrategia para el padding y el truncamiento. Para poder procesar el dataset en un solo paso, se utiliza el método dataset.map para preprocesar todo el dataset."
|
| 44 |
+
]
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"cell_type": "code",
|
| 48 |
+
"execution_count": 2,
|
| 49 |
+
"id": "01673605",
|
| 50 |
+
"metadata": {},
|
| 51 |
+
"outputs": [],
|
| 52 |
+
"source": [
|
| 53 |
+
"#en este ejemplo, utilizamos el AutoTokenizer propio de RoBERTa\n",
|
| 54 |
+
"#from transformers import AutoTokenizer\n",
|
| 55 |
+
"from transformers import RobertaTokenizer\n",
|
| 56 |
+
"\n",
|
| 57 |
+
"tokenizer = RobertaTokenizer.from_pretrained(\"PlanTL-GOB-ES/roberta-base-bne\")\n",
|
| 58 |
+
"\n",
|
| 59 |
+
"\n",
|
| 60 |
+
"def tokenize_function(examples):\n",
|
| 61 |
+
" return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n",
|
| 62 |
+
"\n",
|
| 63 |
+
"\n",
|
| 64 |
+
"tokenized_datasets = dataset.map(tokenize_function, batched=True)"
|
| 65 |
+
]
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"cell_type": "markdown",
|
| 69 |
+
"id": "08aacc14",
|
| 70 |
+
"metadata": {},
|
| 71 |
+
"source": [
|
| 72 |
+
"Ahora vamos a convertir el dataset en formator de TensorFlow. Para eso usamos DefaultDataCollator, que junta los tensores en un batch para que el modelo se entrene en él. Debemos especificar el argumento return_tensors=\"tf\". \n"
|
| 73 |
+
]
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"cell_type": "code",
|
| 77 |
+
"execution_count": 3,
|
| 78 |
+
"id": "4a854ead",
|
| 79 |
+
"metadata": {},
|
| 80 |
+
"outputs": [],
|
| 81 |
+
"source": [
|
| 82 |
+
"from transformers import DefaultDataCollator\n",
|
| 83 |
+
"data_collator = DefaultDataCollator(return_tensors=\"tf\")"
|
| 84 |
+
]
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"cell_type": "markdown",
|
| 88 |
+
"id": "06346bc5",
|
| 89 |
+
"metadata": {},
|
| 90 |
+
"source": [
|
| 91 |
+
"guardamos los dataset de train y de test\n"
|
| 92 |
+
]
|
| 93 |
+
},
|
| 94 |
+
{
|
| 95 |
+
"cell_type": "code",
|
| 96 |
+
"execution_count": 4,
|
| 97 |
+
"id": "698a98ca",
|
| 98 |
+
"metadata": {},
|
| 99 |
+
"outputs": [],
|
| 100 |
+
"source": [
|
| 101 |
+
"train_dataset = tokenized_datasets[\"train\"]\n",
|
| 102 |
+
"eval_dataset = tokenized_datasets[\"test\"]"
|
| 103 |
+
]
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"cell_type": "markdown",
|
| 107 |
+
"id": "2c6d5142",
|
| 108 |
+
"metadata": {},
|
| 109 |
+
"source": [
|
| 110 |
+
"A hora vamos a convertir los datasets tokenizados en datasets de TensorFlow con el método .to_tf_dataset. Las entradas están en columns y la etiqueta en label_cols. El bach size es el número de ejemplos que se introducen en la red para que se entrene cada vez.\n"
|
| 111 |
+
]
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"cell_type": "code",
|
| 115 |
+
"execution_count": 5,
|
| 116 |
+
"id": "55fd25b8",
|
| 117 |
+
"metadata": {},
|
| 118 |
+
"outputs": [],
|
| 119 |
+
"source": [
|
| 120 |
+
"tf_train_dataset= train_dataset.to_tf_dataset(\n",
|
| 121 |
+
"columns=[\"attention_mask\", \"input_ids\"],\n",
|
| 122 |
+
"label_cols=\"labels\",\n",
|
| 123 |
+
"shuffle=True,\n",
|
| 124 |
+
"collate_fn=data_collator,\n",
|
| 125 |
+
"batch_size=8,\n",
|
| 126 |
+
")\n",
|
| 127 |
+
"tf_validation_dataset= eval_dataset.to_tf_dataset(\n",
|
| 128 |
+
"columns=[\"attention_mask\", \"input_ids\"],\n",
|
| 129 |
+
"label_cols=\"labels\",\n",
|
| 130 |
+
"shuffle=False,\n",
|
| 131 |
+
"collate_fn=data_collator,\n",
|
| 132 |
+
"batch_size=8,\n",
|
| 133 |
+
")"
|
| 134 |
+
]
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"cell_type": "markdown",
|
| 138 |
+
"id": "38a6c521",
|
| 139 |
+
"metadata": {},
|
| 140 |
+
"source": [
|
| 141 |
+
"## Fine-tuning usando Fit\n",
|
| 142 |
+
"\n",
|
| 143 |
+
"En primer lugar, vamos a cargar el modelo TensorFlow con el número esperado e labels. En este caso, tenemos 2 categorías.\n",
|
| 144 |
+
"\n"
|
| 145 |
+
]
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"cell_type": "code",
|
| 149 |
+
"execution_count": 6,
|
| 150 |
+
"id": "843f218d",
|
| 151 |
+
"metadata": {},
|
| 152 |
+
"outputs": [
|
| 153 |
+
{
|
| 154 |
+
"name": "stderr",
|
| 155 |
+
"output_type": "stream",
|
| 156 |
+
"text": [
|
| 157 |
+
"Some weights of the PyTorch model were not used when initializing the TF 2.0 model TFRobertaForSequenceClassification: ['roberta.embeddings.position_ids']\n",
|
| 158 |
+
"- This IS expected if you are initializing TFRobertaForSequenceClassification from a PyTorch model trained on another task or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).\n",
|
| 159 |
+
"- This IS NOT expected if you are initializing TFRobertaForSequenceClassification from a PyTorch model that you expect to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model).\n",
|
| 160 |
+
"Some weights or buffers of the TF 2.0 model TFRobertaForSequenceClassification were not initialized from the PyTorch model and are newly initialized: ['classifier.dense.weight', 'classifier.dense.bias', 'classifier.out_proj.weight', 'classifier.out_proj.bias']\n",
|
| 161 |
+
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
| 162 |
+
]
|
| 163 |
+
}
|
| 164 |
+
],
|
| 165 |
+
"source": [
|
| 166 |
+
"import tensorflow as tf\n",
|
| 167 |
+
"#from transformers import TFAutoModelForSequenceClassification\n",
|
| 168 |
+
"#también tiene una clase propia para el cabezal de clasificación\n",
|
| 169 |
+
"from transformers import TFRobertaForSequenceClassification\n",
|
| 170 |
+
"#Hay dos categorías, así que ponemos 2 etiquetas (0 sano 1 tóxico)\n",
|
| 171 |
+
"model = TFRobertaForSequenceClassification.from_pretrained(\"PlanTL-GOB-ES/roberta-base-bne\", num_labels=2, from_pt=\"True\")"
|
| 172 |
+
]
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"cell_type": "markdown",
|
| 176 |
+
"id": "a31780ca",
|
| 177 |
+
"metadata": {},
|
| 178 |
+
"source": [
|
| 179 |
+
"Ahora se aplica la función compile y fit como se haría con cualquier modelo Keras.\n",
|
| 180 |
+
"Compile configura la fase de entrenamiento del modelo antes comenzar a optimizar, por eso se elige el optimizador (en nuestro caso, Adam), la función de pérdida y las métricas que se usarań para evaluar el rendimiento que se han puesto en las celdas anteriores. \n",
|
| 181 |
+
"Fit entrena el modelo con los datos que se le han pasado, y al proporcionar un conjunto de validación se monitorea el rendimiento del modelo, por lo que se evalua mientras se entrena."
|
| 182 |
+
]
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"cell_type": "code",
|
| 186 |
+
"execution_count": 7,
|
| 187 |
+
"id": "3e01c5fb",
|
| 188 |
+
"metadata": {},
|
| 189 |
+
"outputs": [],
|
| 190 |
+
"source": [
|
| 191 |
+
"model.compile(\n",
|
| 192 |
+
"optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5),\n",
|
| 193 |
+
"loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
|
| 194 |
+
"metrics=tf.metrics.SparseCategoricalAccuracy(),\n",
|
| 195 |
+
")"
|
| 196 |
+
]
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"cell_type": "code",
|
| 200 |
+
"execution_count": 8,
|
| 201 |
+
"id": "4606c92e",
|
| 202 |
+
"metadata": {},
|
| 203 |
+
"outputs": [],
|
| 204 |
+
"source": [
|
| 205 |
+
"from keras.callbacks import EarlyStopping\n",
|
| 206 |
+
"#en este modelo he observado overfitting, por lo que voy a utilizar Early stopping para detener el entrenamiento en el momento\n",
|
| 207 |
+
"#que se observe un incremento en el error de validación. \n",
|
| 208 |
+
"early_stop=EarlyStopping(monitor=\"val_loss\",patience=2,mode=\"auto\", restore_best_weights=True)"
|
| 209 |
+
]
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"cell_type": "markdown",
|
| 213 |
+
"id": "59322f38",
|
| 214 |
+
"metadata": {},
|
| 215 |
+
"source": [
|
| 216 |
+
"Los epoch es el número de veces que se van a pasar cada ejemplo de entrenamiento por la red."
|
| 217 |
+
]
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"cell_type": "code",
|
| 221 |
+
"execution_count": 9,
|
| 222 |
+
"id": "cf7268e7",
|
| 223 |
+
"metadata": {},
|
| 224 |
+
"outputs": [
|
| 225 |
+
{
|
| 226 |
+
"name": "stdout",
|
| 227 |
+
"output_type": "stream",
|
| 228 |
+
"text": [
|
| 229 |
+
"Epoch 1/10\n",
|
| 230 |
+
"34/34 [==============================] - 515s 14s/step - loss: 0.5807 - sparse_categorical_accuracy: 0.7127 - val_loss: 0.6163 - val_sparse_categorical_accuracy: 0.7424\n",
|
| 231 |
+
"Epoch 2/10\n",
|
| 232 |
+
"34/34 [==============================] - 483s 14s/step - loss: 0.3028 - sparse_categorical_accuracy: 0.9030 - val_loss: 0.5226 - val_sparse_categorical_accuracy: 0.7727\n",
|
| 233 |
+
"Epoch 3/10\n",
|
| 234 |
+
"34/34 [==============================] - 486s 14s/step - loss: 0.0781 - sparse_categorical_accuracy: 0.9813 - val_loss: 0.3980 - val_sparse_categorical_accuracy: 0.9091\n",
|
| 235 |
+
"Epoch 4/10\n",
|
| 236 |
+
"34/34 [==============================] - 481s 14s/step - loss: 0.0278 - sparse_categorical_accuracy: 0.9888 - val_loss: 0.8322 - val_sparse_categorical_accuracy: 0.7576\n",
|
| 237 |
+
"Epoch 5/10\n",
|
| 238 |
+
"34/34 [==============================] - 482s 14s/step - loss: 0.0252 - sparse_categorical_accuracy: 0.9963 - val_loss: 0.6986 - val_sparse_categorical_accuracy: 0.7879\n"
|
| 239 |
+
]
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"data": {
|
| 243 |
+
"text/plain": [
|
| 244 |
+
"<keras.src.callbacks.History at 0x7f61d5212f70>"
|
| 245 |
+
]
|
| 246 |
+
},
|
| 247 |
+
"execution_count": 9,
|
| 248 |
+
"metadata": {},
|
| 249 |
+
"output_type": "execute_result"
|
| 250 |
+
}
|
| 251 |
+
],
|
| 252 |
+
"source": [
|
| 253 |
+
"model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=10, callbacks=[early_stop])"
|
| 254 |
+
]
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"cell_type": "markdown",
|
| 258 |
+
"id": "4840d701",
|
| 259 |
+
"metadata": {},
|
| 260 |
+
"source": [
|
| 261 |
+
"sparse_categorical es el valor calculado en mi conjunto de datos de train, mientras que el que tiene el prefijo val es el que se calcula en el conjunto de datos de test. Si la métrica de test permanece igual o disminuye mientras aumenta el de train, el modelo está sobreajustando (overfitting)"
|
| 262 |
+
]
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"cell_type": "code",
|
| 266 |
+
"execution_count": 10,
|
| 267 |
+
"id": "fbeef13e",
|
| 268 |
+
"metadata": {},
|
| 269 |
+
"outputs": [
|
| 270 |
+
{
|
| 271 |
+
"name": "stdout",
|
| 272 |
+
"output_type": "stream",
|
| 273 |
+
"text": [
|
| 274 |
+
"Model: \"tf_roberta_for_sequence_classification\"\n",
|
| 275 |
+
"_________________________________________________________________\n",
|
| 276 |
+
" Layer (type) Output Shape Param # \n",
|
| 277 |
+
"=================================================================\n",
|
| 278 |
+
" roberta (TFRobertaMainLaye multiple 124052736 \n",
|
| 279 |
+
" r) \n",
|
| 280 |
+
" \n",
|
| 281 |
+
" classifier (TFRobertaClass multiple 592130 \n",
|
| 282 |
+
" ificationHead) \n",
|
| 283 |
+
" \n",
|
| 284 |
+
"=================================================================\n",
|
| 285 |
+
"Total params: 124644866 (475.48 MB)\n",
|
| 286 |
+
"Trainable params: 124644866 (475.48 MB)\n",
|
| 287 |
+
"Non-trainable params: 0 (0.00 Byte)\n",
|
| 288 |
+
"_________________________________________________________________\n"
|
| 289 |
+
]
|
| 290 |
+
}
|
| 291 |
+
],
|
| 292 |
+
"source": [
|
| 293 |
+
"model.summary()"
|
| 294 |
+
]
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"cell_type": "markdown",
|
| 298 |
+
"id": "c4fa0fce",
|
| 299 |
+
"metadata": {},
|
| 300 |
+
"source": [
|
| 301 |
+
"Aunque aparecen durante el proceso de fit, imprimimos las cifras de loss y accuracy obtenidas del modelo.\n"
|
| 302 |
+
]
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"cell_type": "code",
|
| 306 |
+
"execution_count": 11,
|
| 307 |
+
"id": "4113ab57",
|
| 308 |
+
"metadata": {},
|
| 309 |
+
"outputs": [
|
| 310 |
+
{
|
| 311 |
+
"name": "stdout",
|
| 312 |
+
"output_type": "stream",
|
| 313 |
+
"text": [
|
| 314 |
+
"('loss', 0.39804112911224365)\n",
|
| 315 |
+
"('sparse_categorical_accuracy', 0.9090909361839294)\n"
|
| 316 |
+
]
|
| 317 |
+
}
|
| 318 |
+
],
|
| 319 |
+
"source": [
|
| 320 |
+
"\n",
|
| 321 |
+
"scores= model.evaluate(tf_validation_dataset, verbose=0)\n",
|
| 322 |
+
"print((model.metrics_names[0], scores[0]))\n",
|
| 323 |
+
"print((model.metrics_names[1], scores[1]))\n"
|
| 324 |
+
]
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"cell_type": "markdown",
|
| 328 |
+
"id": "9e61a040",
|
| 329 |
+
"metadata": {},
|
| 330 |
+
"source": [
|
| 331 |
+
"# Guardando el modelo"
|
| 332 |
+
]
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"cell_type": "markdown",
|
| 336 |
+
"id": "4af06209",
|
| 337 |
+
"metadata": {},
|
| 338 |
+
"source": [
|
| 339 |
+
"Para Guardarlo, utilizamos esl método save_model"
|
| 340 |
+
]
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"cell_type": "code",
|
| 344 |
+
"execution_count": 12,
|
| 345 |
+
"id": "b93638cb",
|
| 346 |
+
"metadata": {},
|
| 347 |
+
"outputs": [
|
| 348 |
+
{
|
| 349 |
+
"name": "stderr",
|
| 350 |
+
"output_type": "stream",
|
| 351 |
+
"text": [
|
| 352 |
+
"/home/mmartinez/anaconda3/envs/TFM/lib/python3.8/site-packages/transformers/generation/tf_utils.py:465: UserWarning: `seed_generator` is deprecated and will be removed in a future version.\n",
|
| 353 |
+
" warnings.warn(\"`seed_generator` is deprecated and will be removed in a future version.\", UserWarning)\n"
|
| 354 |
+
]
|
| 355 |
+
}
|
| 356 |
+
],
|
| 357 |
+
"source": [
|
| 358 |
+
"model.save(\"RoBERTa-k-MMG.keras\")"
|
| 359 |
+
]
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"cell_type": "code",
|
| 363 |
+
"execution_count": null,
|
| 364 |
+
"id": "0e0dff1a",
|
| 365 |
+
"metadata": {},
|
| 366 |
+
"outputs": [],
|
| 367 |
+
"source": []
|
| 368 |
+
}
|
| 369 |
+
],
|
| 370 |
+
"metadata": {
|
| 371 |
+
"kernelspec": {
|
| 372 |
+
"display_name": "Python 3 (ipykernel)",
|
| 373 |
+
"language": "python",
|
| 374 |
+
"name": "python3"
|
| 375 |
+
},
|
| 376 |
+
"language_info": {
|
| 377 |
+
"codemirror_mode": {
|
| 378 |
+
"name": "ipython",
|
| 379 |
+
"version": 3
|
| 380 |
+
},
|
| 381 |
+
"file_extension": ".py",
|
| 382 |
+
"mimetype": "text/x-python",
|
| 383 |
+
"name": "python",
|
| 384 |
+
"nbconvert_exporter": "python",
|
| 385 |
+
"pygments_lexer": "ipython3",
|
| 386 |
+
"version": "3.8.13"
|
| 387 |
+
}
|
| 388 |
+
},
|
| 389 |
+
"nbformat": 4,
|
| 390 |
+
"nbformat_minor": 5
|
| 391 |
+
}
|
RoBERTa-k-MMG.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c973b407375dbf45bc383551727e5047b5a6ace9e34d45a65778f85a960ac915
|
| 3 |
+
size 1496360957
|
RoBERTa-k-MMGb.ipynb
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "976841dc",
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"source": [
|
| 8 |
+
"## Preparación de un dataset\n",
|
| 9 |
+
"\n",
|
| 10 |
+
"Descargamos el dataset y lo preparamos para el entrenamiento. En el caso de ejemplo, usaremos toxic-teenage-relationships, que son frases que describen si un comporamiento es tóxico o sano. Tienen una campo de texto y un campo de etiqueta, que vale 1 si es tóxico y 0 si no lo es. Acumula 267 ejemplos de entrenamiento y 66 para testear."
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"cell_type": "code",
|
| 15 |
+
"execution_count": 1,
|
| 16 |
+
"id": "b9a1f255",
|
| 17 |
+
"metadata": {},
|
| 18 |
+
"outputs": [
|
| 19 |
+
{
|
| 20 |
+
"data": {
|
| 21 |
+
"text/plain": [
|
| 22 |
+
"{'label': 1,\n",
|
| 23 |
+
" 'text': 'Mi amiga no puede subir videos a tik tok porque su pareja no le deja'}"
|
| 24 |
+
]
|
| 25 |
+
},
|
| 26 |
+
"execution_count": 1,
|
| 27 |
+
"metadata": {},
|
| 28 |
+
"output_type": "execute_result"
|
| 29 |
+
}
|
| 30 |
+
],
|
| 31 |
+
"source": [
|
| 32 |
+
"from datasets import load_dataset\n",
|
| 33 |
+
"data_files = {\"train\": \"train.csv\", \"test\": \"test.csv\"}\n",
|
| 34 |
+
"dataset = load_dataset(\"toxic-teenage-relationships\", data_files=data_files, sep=\";\")\n",
|
| 35 |
+
"dataset['train'][100]"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"cell_type": "markdown",
|
| 40 |
+
"id": "6d0c740a",
|
| 41 |
+
"metadata": {},
|
| 42 |
+
"source": [
|
| 43 |
+
"Una vez cargado el dataset, se crea un tokenizador para procesar el texto e incluir una estrategia para el padding y el truncamiento. Par poder procesar el dataset en un solo paso, se utiliza el método dataset.map para preprocesar todo el dataset."
|
| 44 |
+
]
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"cell_type": "code",
|
| 48 |
+
"execution_count": 2,
|
| 49 |
+
"id": "01673605",
|
| 50 |
+
"metadata": {},
|
| 51 |
+
"outputs": [],
|
| 52 |
+
"source": [
|
| 53 |
+
"#en este ejemplo, utilizamos el AutoTokenizer general\n",
|
| 54 |
+
"from transformers import AutoTokenizer\n",
|
| 55 |
+
"\n",
|
| 56 |
+
"\n",
|
| 57 |
+
"tokenizer = AutoTokenizer.from_pretrained(\"PlanTL-GOB-ES/roberta-base-bne\")\n",
|
| 58 |
+
"\n",
|
| 59 |
+
"\n",
|
| 60 |
+
"def tokenize_function(examples):\n",
|
| 61 |
+
" return tokenizer(examples[\"text\"], padding=\"max_length\", truncation=True)\n",
|
| 62 |
+
"\n",
|
| 63 |
+
"\n",
|
| 64 |
+
"tokenized_datasets = dataset.map(tokenize_function, batched=True)"
|
| 65 |
+
]
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"cell_type": "markdown",
|
| 69 |
+
"id": "08aacc14",
|
| 70 |
+
"metadata": {},
|
| 71 |
+
"source": [
|
| 72 |
+
"Ahora vamos a convertir el dataset en formator de TensorFlow. Para eso usamos DefaultDataCollator, que junta los tensores en un batch para que el modelo se entrene en él. Debemos especificar el argumento return_tensors=\"tf\". \n"
|
| 73 |
+
]
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"cell_type": "code",
|
| 77 |
+
"execution_count": 3,
|
| 78 |
+
"id": "4a854ead",
|
| 79 |
+
"metadata": {},
|
| 80 |
+
"outputs": [],
|
| 81 |
+
"source": [
|
| 82 |
+
"from transformers import DefaultDataCollator\n",
|
| 83 |
+
"data_collator = DefaultDataCollator(return_tensors=\"tf\")"
|
| 84 |
+
]
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"cell_type": "markdown",
|
| 88 |
+
"id": "06346bc5",
|
| 89 |
+
"metadata": {},
|
| 90 |
+
"source": [
|
| 91 |
+
"guardamos los dataset de train y de test\n"
|
| 92 |
+
]
|
| 93 |
+
},
|
| 94 |
+
{
|
| 95 |
+
"cell_type": "code",
|
| 96 |
+
"execution_count": 4,
|
| 97 |
+
"id": "698a98ca",
|
| 98 |
+
"metadata": {},
|
| 99 |
+
"outputs": [],
|
| 100 |
+
"source": [
|
| 101 |
+
"train_dataset = tokenized_datasets[\"train\"]\n",
|
| 102 |
+
"eval_dataset = tokenized_datasets[\"test\"]"
|
| 103 |
+
]
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"cell_type": "markdown",
|
| 107 |
+
"id": "2c6d5142",
|
| 108 |
+
"metadata": {},
|
| 109 |
+
"source": [
|
| 110 |
+
"Ahora vamos a convertir los datasets tokenizados en datasets de TensorFlow con el método .to_tf_dataset. Las entradas están en columns y la etiqueta en label_cols. El bach size es el número de ejemplos que se introducen en la red para que se entrene cada vez\n"
|
| 111 |
+
]
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"cell_type": "code",
|
| 115 |
+
"execution_count": 5,
|
| 116 |
+
"id": "55fd25b8",
|
| 117 |
+
"metadata": {},
|
| 118 |
+
"outputs": [],
|
| 119 |
+
"source": [
|
| 120 |
+
"tf_train_dataset= train_dataset.to_tf_dataset(\n",
|
| 121 |
+
"columns=[\"attention_mask\", \"input_ids\"],\n",
|
| 122 |
+
"label_cols=\"labels\",\n",
|
| 123 |
+
"shuffle=True,\n",
|
| 124 |
+
"collate_fn=data_collator,\n",
|
| 125 |
+
"batch_size=8,\n",
|
| 126 |
+
")\n",
|
| 127 |
+
"tf_validation_dataset= eval_dataset.to_tf_dataset(\n",
|
| 128 |
+
"columns=[\"attention_mask\", \"input_ids\"],\n",
|
| 129 |
+
"label_cols=\"labels\",\n",
|
| 130 |
+
"shuffle=False,\n",
|
| 131 |
+
"collate_fn=data_collator,\n",
|
| 132 |
+
"batch_size=8,\n",
|
| 133 |
+
")"
|
| 134 |
+
]
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"cell_type": "markdown",
|
| 138 |
+
"id": "38a6c521",
|
| 139 |
+
"metadata": {},
|
| 140 |
+
"source": [
|
| 141 |
+
"## Fine-tuning usando Fit\n",
|
| 142 |
+
"\n",
|
| 143 |
+
"En primer lugar, vamos a cargar el modelo TensorFlow con el número esperado e labels. En este caso, tenemos 2 categorías.\n",
|
| 144 |
+
"\n"
|
| 145 |
+
]
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"cell_type": "code",
|
| 149 |
+
"execution_count": 6,
|
| 150 |
+
"id": "843f218d",
|
| 151 |
+
"metadata": {},
|
| 152 |
+
"outputs": [
|
| 153 |
+
{
|
| 154 |
+
"name": "stderr",
|
| 155 |
+
"output_type": "stream",
|
| 156 |
+
"text": [
|
| 157 |
+
"Some weights of the PyTorch model were not used when initializing the TF 2.0 model TFRobertaForSequenceClassification: ['roberta.embeddings.position_ids']\n",
|
| 158 |
+
"- This IS expected if you are initializing TFRobertaForSequenceClassification from a PyTorch model trained on another task or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).\n",
|
| 159 |
+
"- This IS NOT expected if you are initializing TFRobertaForSequenceClassification from a PyTorch model that you expect to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model).\n",
|
| 160 |
+
"Some weights or buffers of the TF 2.0 model TFRobertaForSequenceClassification were not initialized from the PyTorch model and are newly initialized: ['classifier.dense.weight', 'classifier.dense.bias', 'classifier.out_proj.weight', 'classifier.out_proj.bias']\n",
|
| 161 |
+
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
| 162 |
+
]
|
| 163 |
+
}
|
| 164 |
+
],
|
| 165 |
+
"source": [
|
| 166 |
+
"import tensorflow as tf\n",
|
| 167 |
+
"from transformers import TFAutoModelForSequenceClassification\n",
|
| 168 |
+
"\n",
|
| 169 |
+
"#Hay dos categorías, así que ponemos 2 etiquetas (0 sano 1 tóxico)\n",
|
| 170 |
+
"model = TFAutoModelForSequenceClassification.from_pretrained(\"PlanTL-GOB-ES/roberta-base-bne\", num_labels=2, from_pt=\"True\")"
|
| 171 |
+
]
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"cell_type": "markdown",
|
| 175 |
+
"id": "a31780ca",
|
| 176 |
+
"metadata": {},
|
| 177 |
+
"source": [
|
| 178 |
+
"Ahora se aplica la función compile y fit como se haría con cualquier modelo Keras.\n",
|
| 179 |
+
"Compile configura la fase de entrenamiento del modelo antes comenzar a optimizar, por eso se elige el optimizador (en nuestro caso, Adam), la función de pérdida y las métricas que se usarań para evaluar el rendimiento que se han puesto en las celdas anteriores. \n",
|
| 180 |
+
"Fit entrena el modelo con los datos que se le han pasado, y al proporcionar un conjunto de validación se monitorea el rendimiento del modelo, por lo que se evalua mientras se entrena."
|
| 181 |
+
]
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"cell_type": "code",
|
| 185 |
+
"execution_count": 7,
|
| 186 |
+
"id": "3e01c5fb",
|
| 187 |
+
"metadata": {},
|
| 188 |
+
"outputs": [],
|
| 189 |
+
"source": [
|
| 190 |
+
"model.compile(\n",
|
| 191 |
+
"optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5),\n",
|
| 192 |
+
"loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
|
| 193 |
+
"metrics=tf.metrics.SparseCategoricalAccuracy(),\n",
|
| 194 |
+
")"
|
| 195 |
+
]
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"cell_type": "code",
|
| 199 |
+
"execution_count": 8,
|
| 200 |
+
"id": "4606c92e",
|
| 201 |
+
"metadata": {},
|
| 202 |
+
"outputs": [],
|
| 203 |
+
"source": [
|
| 204 |
+
"from keras.callbacks import EarlyStopping\n",
|
| 205 |
+
"#en este modelo he observado overfitting, por lo que voy a utilizar Early stopping para detener el entrenamiento en el momento\n",
|
| 206 |
+
"#si se observa un incremento en el error de validación, con una paciencia de 2 epochs, para \n",
|
| 207 |
+
"#guarda el modelo con la mejor métrica\n",
|
| 208 |
+
"early_stop=EarlyStopping(monitor=\"val_loss\",patience=2, mode=\"auto\", restore_best_weights=True)"
|
| 209 |
+
]
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"cell_type": "markdown",
|
| 213 |
+
"id": "59322f38",
|
| 214 |
+
"metadata": {},
|
| 215 |
+
"source": [
|
| 216 |
+
"Los epoch es el número de veces que se van a pasar cada ejemplo de entrenamiento por la red, lo establecemos en 10."
|
| 217 |
+
]
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"cell_type": "code",
|
| 221 |
+
"execution_count": 9,
|
| 222 |
+
"id": "cf7268e7",
|
| 223 |
+
"metadata": {},
|
| 224 |
+
"outputs": [
|
| 225 |
+
{
|
| 226 |
+
"name": "stdout",
|
| 227 |
+
"output_type": "stream",
|
| 228 |
+
"text": [
|
| 229 |
+
"Epoch 1/10\n",
|
| 230 |
+
"34/34 [==============================] - 524s 15s/step - loss: 0.6439 - sparse_categorical_accuracy: 0.6157 - val_loss: 0.4473 - val_sparse_categorical_accuracy: 0.7879\n",
|
| 231 |
+
"Epoch 2/10\n",
|
| 232 |
+
"34/34 [==============================] - 496s 15s/step - loss: 0.3275 - sparse_categorical_accuracy: 0.8769 - val_loss: 0.3903 - val_sparse_categorical_accuracy: 0.8485\n",
|
| 233 |
+
"Epoch 3/10\n",
|
| 234 |
+
"34/34 [==============================] - 495s 15s/step - loss: 0.1034 - sparse_categorical_accuracy: 0.9776 - val_loss: 0.4216 - val_sparse_categorical_accuracy: 0.8485\n",
|
| 235 |
+
"Epoch 4/10\n",
|
| 236 |
+
"34/34 [==============================] - 497s 15s/step - loss: 0.1399 - sparse_categorical_accuracy: 0.9590 - val_loss: 0.3116 - val_sparse_categorical_accuracy: 0.8788\n",
|
| 237 |
+
"Epoch 5/10\n",
|
| 238 |
+
"34/34 [==============================] - 493s 15s/step - loss: 0.1004 - sparse_categorical_accuracy: 0.9701 - val_loss: 0.6037 - val_sparse_categorical_accuracy: 0.8030\n",
|
| 239 |
+
"Epoch 6/10\n",
|
| 240 |
+
"34/34 [==============================] - 488s 14s/step - loss: 0.0980 - sparse_categorical_accuracy: 0.9664 - val_loss: 0.6070 - val_sparse_categorical_accuracy: 0.8333\n"
|
| 241 |
+
]
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"data": {
|
| 245 |
+
"text/plain": [
|
| 246 |
+
"<keras.src.callbacks.History at 0x7fd3ff8a6430>"
|
| 247 |
+
]
|
| 248 |
+
},
|
| 249 |
+
"execution_count": 9,
|
| 250 |
+
"metadata": {},
|
| 251 |
+
"output_type": "execute_result"
|
| 252 |
+
}
|
| 253 |
+
],
|
| 254 |
+
"source": [
|
| 255 |
+
"model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=10, callbacks=[early_stop])"
|
| 256 |
+
]
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"cell_type": "markdown",
|
| 260 |
+
"id": "4840d701",
|
| 261 |
+
"metadata": {},
|
| 262 |
+
"source": [
|
| 263 |
+
"sparse_categorical es el valor calculado en mi conjunto de datos de train, mientras que el que tiene el prefijo val es el que se calcula en el conjunto de datos de test. Si la métrica de test permanece igual o disminuye mientras aumenta el de train, el modelo está sobreajustando (overfitting)"
|
| 264 |
+
]
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"cell_type": "code",
|
| 268 |
+
"execution_count": 10,
|
| 269 |
+
"id": "fbeef13e",
|
| 270 |
+
"metadata": {},
|
| 271 |
+
"outputs": [
|
| 272 |
+
{
|
| 273 |
+
"name": "stdout",
|
| 274 |
+
"output_type": "stream",
|
| 275 |
+
"text": [
|
| 276 |
+
"Model: \"tf_roberta_for_sequence_classification\"\n",
|
| 277 |
+
"_________________________________________________________________\n",
|
| 278 |
+
" Layer (type) Output Shape Param # \n",
|
| 279 |
+
"=================================================================\n",
|
| 280 |
+
" roberta (TFRobertaMainLaye multiple 124052736 \n",
|
| 281 |
+
" r) \n",
|
| 282 |
+
" \n",
|
| 283 |
+
" classifier (TFRobertaClass multiple 592130 \n",
|
| 284 |
+
" ificationHead) \n",
|
| 285 |
+
" \n",
|
| 286 |
+
"=================================================================\n",
|
| 287 |
+
"Total params: 124644866 (475.48 MB)\n",
|
| 288 |
+
"Trainable params: 124644866 (475.48 MB)\n",
|
| 289 |
+
"Non-trainable params: 0 (0.00 Byte)\n",
|
| 290 |
+
"_________________________________________________________________\n"
|
| 291 |
+
]
|
| 292 |
+
}
|
| 293 |
+
],
|
| 294 |
+
"source": [
|
| 295 |
+
"model.summary()"
|
| 296 |
+
]
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"cell_type": "markdown",
|
| 300 |
+
"id": "c4fa0fce",
|
| 301 |
+
"metadata": {},
|
| 302 |
+
"source": [
|
| 303 |
+
"Aunque aparecen durante el proceso de fit, imprimimos las cifras de loss y accuracy obtenidas del modelo.\n"
|
| 304 |
+
]
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"cell_type": "code",
|
| 308 |
+
"execution_count": 11,
|
| 309 |
+
"id": "4113ab57",
|
| 310 |
+
"metadata": {},
|
| 311 |
+
"outputs": [
|
| 312 |
+
{
|
| 313 |
+
"name": "stdout",
|
| 314 |
+
"output_type": "stream",
|
| 315 |
+
"text": [
|
| 316 |
+
"('loss', 0.31155306100845337)\n",
|
| 317 |
+
"('sparse_categorical_accuracy', 0.8787878751754761)\n"
|
| 318 |
+
]
|
| 319 |
+
}
|
| 320 |
+
],
|
| 321 |
+
"source": [
|
| 322 |
+
"\n",
|
| 323 |
+
"scores= model.evaluate(tf_validation_dataset, verbose=0)\n",
|
| 324 |
+
"print((model.metrics_names[0], scores[0]))\n",
|
| 325 |
+
"print((model.metrics_names[1], scores[1]))\n"
|
| 326 |
+
]
|
| 327 |
+
},
|
| 328 |
+
{
|
| 329 |
+
"cell_type": "markdown",
|
| 330 |
+
"id": "9e61a040",
|
| 331 |
+
"metadata": {},
|
| 332 |
+
"source": [
|
| 333 |
+
"# Guardando el modelo"
|
| 334 |
+
]
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"cell_type": "markdown",
|
| 338 |
+
"id": "4af06209",
|
| 339 |
+
"metadata": {},
|
| 340 |
+
"source": [
|
| 341 |
+
"Para Guardarlo, utilizamos esl método save_model"
|
| 342 |
+
]
|
| 343 |
+
},
|
| 344 |
+
{
|
| 345 |
+
"cell_type": "code",
|
| 346 |
+
"execution_count": 12,
|
| 347 |
+
"id": "b93638cb",
|
| 348 |
+
"metadata": {},
|
| 349 |
+
"outputs": [
|
| 350 |
+
{
|
| 351 |
+
"name": "stderr",
|
| 352 |
+
"output_type": "stream",
|
| 353 |
+
"text": [
|
| 354 |
+
"/home/mmartinez/anaconda3/envs/TFM/lib/python3.8/site-packages/transformers/generation/tf_utils.py:465: UserWarning: `seed_generator` is deprecated and will be removed in a future version.\n",
|
| 355 |
+
" warnings.warn(\"`seed_generator` is deprecated and will be removed in a future version.\", UserWarning)\n"
|
| 356 |
+
]
|
| 357 |
+
}
|
| 358 |
+
],
|
| 359 |
+
"source": [
|
| 360 |
+
"model.save(\"RoBERTa-k-MMGb.keras\")"
|
| 361 |
+
]
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"cell_type": "code",
|
| 365 |
+
"execution_count": null,
|
| 366 |
+
"id": "5d3fc9d6",
|
| 367 |
+
"metadata": {},
|
| 368 |
+
"outputs": [],
|
| 369 |
+
"source": []
|
| 370 |
+
}
|
| 371 |
+
],
|
| 372 |
+
"metadata": {
|
| 373 |
+
"kernelspec": {
|
| 374 |
+
"display_name": "Python 3 (ipykernel)",
|
| 375 |
+
"language": "python",
|
| 376 |
+
"name": "python3"
|
| 377 |
+
},
|
| 378 |
+
"language_info": {
|
| 379 |
+
"codemirror_mode": {
|
| 380 |
+
"name": "ipython",
|
| 381 |
+
"version": 3
|
| 382 |
+
},
|
| 383 |
+
"file_extension": ".py",
|
| 384 |
+
"mimetype": "text/x-python",
|
| 385 |
+
"name": "python",
|
| 386 |
+
"nbconvert_exporter": "python",
|
| 387 |
+
"pygments_lexer": "ipython3",
|
| 388 |
+
"version": "3.8.13"
|
| 389 |
+
}
|
| 390 |
+
},
|
| 391 |
+
"nbformat": 4,
|
| 392 |
+
"nbformat_minor": 5
|
| 393 |
+
}
|
RoBERTa-k-MMGb.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:29ad2306f44c0680f9644413c1d487474946578dbbf611ff8ca73af5605fb83d
|
| 3 |
+
size 1496360957
|