Upload solo_descripcion_ripios.py
Browse files- solo_descripcion_ripios.py +799 -0
solo_descripcion_ripios.py
ADDED
|
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Solo_descripcion_ripios
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colaboratory.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1RYsNm31Nta3rhqrgDbBsBCFcT3l-RZpC
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
"""# **Descripción y medición de ripios de perforación mediante IA**
|
| 13 |
+
|
| 14 |
+
Este trabajo es una adaptación de los códigos de [A_K_Nain, 2021](https://keras.io/examples/vision/image_captioning/) y de [Sitar, M. & Leary, R., 2023](https://gchron.copernicus.org/articles/5/109/2023/)<br>
|
| 15 |
+
**Autores:** Jhoel Ortiz, Christian Mejía & Paola Vargas<br>
|
| 16 |
+
**Fecha de creación:** 2024/01/06<br>
|
| 17 |
+
**Última modificación:** 2024/02/15<br>
|
| 18 |
+
**Descripción:** Este trabajo implementa modelos de CNN y TNN para la descripción y medición de imágenes de ripios de perforación.
|
| 19 |
+
|
| 20 |
+
El siguiente Notebook de Google Colab se esquematiza de la siguiente manera:
|
| 21 |
+
|
| 22 |
+
**Descripción textual y oral de imágenes de ripios de perforación**
|
| 23 |
+
- Carga e instalación de librerías
|
| 24 |
+
- Procesamiento de los archivos de imagen y descripciones
|
| 25 |
+
- Vectorización de los datos de texto
|
| 26 |
+
- Canalización de datos para el entrenamiento
|
| 27 |
+
- Construcción del modelo
|
| 28 |
+
- Entrenamiento del modelo
|
| 29 |
+
- Verificación de las predicciones
|
| 30 |
+
- Evaluación con BLEU
|
| 31 |
+
- Predicción de imágenes externas
|
| 32 |
+
|
| 33 |
+
**Medición de imágenes de ripios de perforación**
|
| 34 |
+
- Carga e instalación de librerías
|
| 35 |
+
- Inspección de la imagen
|
| 36 |
+
- Descarga e inicialización del modelo
|
| 37 |
+
- Evaluación de prueba
|
| 38 |
+
- Procesamiento automatizado
|
| 39 |
+
- Ilustración de resultados automáticos
|
| 40 |
+
- Procesamiento semi-automático
|
| 41 |
+
- Ilustración de resultados semi-automáticos
|
| 42 |
+
|
| 43 |
+
# **Descripción textual y oral de imágenes de ripios de perforación**
|
| 44 |
+
Esta sección contiene todos los pasos a seguir para el desarrollo de un modelo de IA que describa automaticamente de forma escrita y oral imágenes de ripios de perforación aplicandao una RNN y un Transformer.
|
| 45 |
+
|
| 46 |
+
##**Carga e instalación de librerías**
|
| 47 |
+
Esta subsección carga e instala las librerías que se requieren para la descripción textual y oral de imágenes de ripios de perforación.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
# Carga de librerías
|
| 51 |
+
import os
|
| 52 |
+
|
| 53 |
+
os.environ["KERAS_BACKEND"] = "tensorflow"
|
| 54 |
+
|
| 55 |
+
import re
|
| 56 |
+
import numpy as np
|
| 57 |
+
import matplotlib.pyplot as plt
|
| 58 |
+
|
| 59 |
+
import tensorflow as tf
|
| 60 |
+
import keras
|
| 61 |
+
from keras import layers
|
| 62 |
+
from keras.applications import MobileNetV2
|
| 63 |
+
from keras.layers import TextVectorization
|
| 64 |
+
|
| 65 |
+
keras.utils.set_random_seed(111)
|
| 66 |
+
|
| 67 |
+
!pip install gTTs
|
| 68 |
+
from gtts import gTTS
|
| 69 |
+
|
| 70 |
+
"""##**Procesamiento de las imágenes y descripciones de ripios de perforación**
|
| 71 |
+
La siguiente subsección realiza lo siguiente:
|
| 72 |
+
* Carga los archivos de imagen y de texto de ripios de perforación
|
| 73 |
+
* Define las características y parámetros base de los archivos ingresados
|
| 74 |
+
* Divide al conjunto de datos en subconjuntos de entrenamiento y validación
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
# Acceso a Google Drive
|
| 84 |
+
from google.colab import drive
|
| 85 |
+
drive.mount('/content/gdrive')
|
| 86 |
+
|
| 87 |
+
# Ruta de la carpeta de imágenes
|
| 88 |
+
IMAGES_PATH = "/content/gdrive/MyDrive/Data 2"
|
| 89 |
+
|
| 90 |
+
# Dimensiones de imagen
|
| 91 |
+
IMAGE_SIZE = (359,359)
|
| 92 |
+
|
| 93 |
+
# Tamaño del vocabulario
|
| 94 |
+
VOCAB_SIZE = 700
|
| 95 |
+
|
| 96 |
+
# Longitud fija para cualquier secuencia
|
| 97 |
+
SEQ_LENGTH = 400
|
| 98 |
+
|
| 99 |
+
# Dimensiones para los embeddings de imágenes y de tokens
|
| 100 |
+
EMBED_DIM = 512
|
| 101 |
+
|
| 102 |
+
# Unidades por capa en la red feed-forward
|
| 103 |
+
FF_DIM = 512
|
| 104 |
+
|
| 105 |
+
# Otros parámetros de entrenamiento
|
| 106 |
+
BATCH_SIZE = 64
|
| 107 |
+
EPOCHS = 1
|
| 108 |
+
AUTOTUNE = tf.data.AUTOTUNE
|
| 109 |
+
|
| 110 |
+
def load_captions_data(filename):
|
| 111 |
+
"""Carga las descripciones (texto) y los asigna a sus imágenes correspondientes.
|
| 112 |
+
|
| 113 |
+
Argumentos:
|
| 114 |
+
filename: Ruta al archivo de texto que contiene las descripciones.
|
| 115 |
+
|
| 116 |
+
Returna:
|
| 117 |
+
caption_mapping: Diccionario que mapea los nombres de las imágenes y sus descipciones correspondientes.
|
| 118 |
+
text_data: Lista que contiene todos los subtítulos disponibles.
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
with open(filename) as caption_file:
|
| 122 |
+
caption_data = caption_file.readlines()
|
| 123 |
+
caption_mapping = {}
|
| 124 |
+
text_data = []
|
| 125 |
+
images_to_skip = set()
|
| 126 |
+
|
| 127 |
+
for line in caption_data:
|
| 128 |
+
line = line.rstrip("\n")
|
| 129 |
+
# El nombre de la imagen se separa de su descripción por una tabulación
|
| 130 |
+
img_name, caption = line.split("\t")
|
| 131 |
+
print(img_name)
|
| 132 |
+
print(caption)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
# Cada nombre de imagen tiene un sufijo `#img_name.jpg#0`
|
| 136 |
+
img_name = img_name.split("#")[0]
|
| 137 |
+
img_name = os.path.join(IMAGES_PATH, img_name.strip())
|
| 138 |
+
|
| 139 |
+
# Se eliminan las descripciones demasiado largas o demasiado cortas
|
| 140 |
+
tokens = caption.strip().split()
|
| 141 |
+
|
| 142 |
+
if img_name.endswith("jpg") and img_name not in images_to_skip:
|
| 143 |
+
# Se agrega un token de inicio <start> y fin <end> a cada descripción
|
| 144 |
+
caption = "<start> " + caption.strip() + " <end>"
|
| 145 |
+
text_data.append(caption)
|
| 146 |
+
|
| 147 |
+
if img_name in caption_mapping:
|
| 148 |
+
caption_mapping[img_name].append(caption)
|
| 149 |
+
else:
|
| 150 |
+
caption_mapping[img_name] = [caption]
|
| 151 |
+
|
| 152 |
+
for img_name in images_to_skip:
|
| 153 |
+
if img_name in caption_mapping:
|
| 154 |
+
del caption_mapping[img_name]
|
| 155 |
+
|
| 156 |
+
return caption_mapping, text_data
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def train_val_split(caption_data, train_size=0.8, shuffle=True):
|
| 160 |
+
"""Divide el conjunto de datos en subconjuntos de entrenamiento y validación.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
caption_data (dict): Diccionario que contiene las descripciones asignadas.
|
| 164 |
+
train_size (float): Fracción del conjunto de datos que se usa como subconjunto de entrenamiento.
|
| 165 |
+
shuffle (bool): Se especifica si se quiere mezclar el conjunto de datos antes de dividirlo.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
Conjuntos de datos de entrenamiento y validación como dos dictados separados
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
# 1. Lista de todas las imágenes
|
| 172 |
+
all_images = list(caption_data.keys())
|
| 173 |
+
|
| 174 |
+
# 2. Se mezcla para que sean aleatorias y no exista sesgo
|
| 175 |
+
if shuffle:
|
| 176 |
+
np.random.shuffle(all_images)
|
| 177 |
+
|
| 178 |
+
# 3. Se divide en conjuntos de entrenamiento y validación
|
| 179 |
+
train_size = int(len(caption_data) * train_size)
|
| 180 |
+
|
| 181 |
+
training_data = {
|
| 182 |
+
img_name: caption_data[img_name] for img_name in all_images[:train_size]
|
| 183 |
+
}
|
| 184 |
+
validation_data = {
|
| 185 |
+
img_name: caption_data[img_name] for img_name in all_images[train_size:]
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
# 4. Retorna las divisiones
|
| 189 |
+
return training_data, validation_data
|
| 190 |
+
|
| 191 |
+
# Carga del archivo .txt de descripciones
|
| 192 |
+
captions_mapping, text_data = load_captions_data("/content/gdrive/MyDrive/ROCAS.token.txt")
|
| 193 |
+
|
| 194 |
+
# Se divide en conjuntos de entrenamiento y validación
|
| 195 |
+
train_data, valid_data = train_val_split(captions_mapping)
|
| 196 |
+
print("Número de muestras de entrenamiento: ", len(train_data))
|
| 197 |
+
print("Número de muestras de validación: ", len(valid_data))
|
| 198 |
+
|
| 199 |
+
"""##**Vectorización de los datos de texto**
|
| 200 |
+
Esta sección transforma las descripciones del archivo de texto en vectores,
|
| 201 |
+
estandariza las cadenas de caracteres y aumenta el número de imágenes con características establecidas.
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
"""
|
| 208 |
+
|
| 209 |
+
def custom_standardization(input_string):
|
| 210 |
+
lowercase = tf.strings.lower(input_string)
|
| 211 |
+
return tf.strings.regex_replace(lowercase, "[%s]" % re.escape(strip_chars), "")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
strip_chars = "!\"$&'*+-/:<=>?@[\]^_`{|}~"
|
| 215 |
+
strip_chars = strip_chars.replace("<", "")
|
| 216 |
+
strip_chars = strip_chars.replace(">", "")
|
| 217 |
+
|
| 218 |
+
# Vectorización de los archivos de texto
|
| 219 |
+
vectorization = TextVectorization(
|
| 220 |
+
max_tokens=VOCAB_SIZE,
|
| 221 |
+
output_mode="int",
|
| 222 |
+
output_sequence_length=SEQ_LENGTH,
|
| 223 |
+
standardize=custom_standardization,
|
| 224 |
+
)
|
| 225 |
+
vectorization.adapt(text_data)
|
| 226 |
+
|
| 227 |
+
# Aumento del número de imágenes
|
| 228 |
+
image_augmentation = keras.Sequential(
|
| 229 |
+
[
|
| 230 |
+
layers.RandomFlip("horizontal"),
|
| 231 |
+
layers.RandomRotation(0.2),
|
| 232 |
+
layers.RandomContrast(0.3),
|
| 233 |
+
]
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
"""##**Canalización de datos para el entrenamiento**
|
| 237 |
+
|
| 238 |
+
Se genera pares de imágenes con sus respectivas descripciones usando `tf.data.Dataset`.
|
| 239 |
+
|
| 240 |
+
El proceso consiste de dos etapas:
|
| 241 |
+
|
| 242 |
+
- Leer la imagen del disco
|
| 243 |
+
- Tokenizar las descripciones de cada una de ellas
|
| 244 |
+
"""
|
| 245 |
+
|
| 246 |
+
def decode_and_resize(img_path):
|
| 247 |
+
img = tf.io.read_file(img_path)
|
| 248 |
+
img = tf.image.decode_jpeg(img, channels=3)
|
| 249 |
+
img = tf.image.resize(img, IMAGE_SIZE)
|
| 250 |
+
img = tf.image.convert_image_dtype(img, tf.float32)
|
| 251 |
+
return img
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def process_input(img_path, captions):
|
| 255 |
+
return decode_and_resize(img_path), vectorization(captions)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def make_dataset(images, captions):
|
| 259 |
+
dataset = tf.data.Dataset.from_tensor_slices((images, captions))
|
| 260 |
+
dataset = dataset.shuffle(BATCH_SIZE * 8)
|
| 261 |
+
dataset = dataset.map(process_input, num_parallel_calls=AUTOTUNE)
|
| 262 |
+
dataset = dataset.batch(BATCH_SIZE).prefetch(AUTOTUNE)
|
| 263 |
+
|
| 264 |
+
return dataset
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
# Lista de imágenes y de descripciones
|
| 268 |
+
train_dataset = make_dataset(list(train_data.keys()), list(train_data.values()))
|
| 269 |
+
|
| 270 |
+
valid_dataset = make_dataset(list(valid_data.keys()), list(valid_data.values()))
|
| 271 |
+
|
| 272 |
+
"""## **Construcción del modelo**
|
| 273 |
+
|
| 274 |
+
La descripción de imágenes consta de tres modelos:
|
| 275 |
+
|
| 276 |
+
- Una CNN: extrae las características de las imágenes.
|
| 277 |
+
- Un TransformerEncoder: por medio de un modelo pre-entrenado para trabajar con imágenes de rocas, se encarga de identificar y extraer las características (features) de las fotos de la base de datos.
|
| 278 |
+
- Un TransformerDecoder: toma como entradas las features del codificador y las descripciones (secuencias) e identifica el proceso para generar descripciones de imágenes.
|
| 279 |
+
"""
|
| 280 |
+
|
| 281 |
+
def get_cnn_model():
|
| 282 |
+
base_model = MobileNetV2( #resnet.ResNetV2
|
| 283 |
+
input_shape=(*IMAGE_SIZE, 3),
|
| 284 |
+
include_top=False,
|
| 285 |
+
weights="imagenet",
|
| 286 |
+
)
|
| 287 |
+
# base_model= tf.keras.models.load_model('/content/gdrive/MyDrive/best_model.h5')
|
| 288 |
+
# base_model.summary()
|
| 289 |
+
# Se congela el extractor de características
|
| 290 |
+
base_model.trainable = False
|
| 291 |
+
base_model_out = base_model.output
|
| 292 |
+
base_model_out = layers.Reshape((-1, base_model_out.shape[-1]))(base_model_out)
|
| 293 |
+
cnn_model = keras.models.Model(base_model.input, base_model_out)
|
| 294 |
+
return cnn_model
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class TransformerEncoderBlock(layers.Layer):
|
| 298 |
+
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
|
| 299 |
+
super().__init__(**kwargs)
|
| 300 |
+
self.embed_dim = embed_dim
|
| 301 |
+
self.dense_dim = dense_dim
|
| 302 |
+
self.num_heads = num_heads
|
| 303 |
+
self.attention_1 = layers.MultiHeadAttention(
|
| 304 |
+
num_heads=num_heads, key_dim=embed_dim, dropout=0.0
|
| 305 |
+
)
|
| 306 |
+
self.layernorm_1 = layers.LayerNormalization()
|
| 307 |
+
self.layernorm_2 = layers.LayerNormalization()
|
| 308 |
+
self.dense_1 = layers.Dense(embed_dim, activation="relu")
|
| 309 |
+
|
| 310 |
+
def call(self, inputs, training, mask=None):
|
| 311 |
+
inputs = self.layernorm_1(inputs)
|
| 312 |
+
inputs = self.dense_1(inputs)
|
| 313 |
+
|
| 314 |
+
attention_output_1 = self.attention_1(
|
| 315 |
+
query=inputs,
|
| 316 |
+
value=inputs,
|
| 317 |
+
key=inputs,
|
| 318 |
+
attention_mask=None,
|
| 319 |
+
training=training,
|
| 320 |
+
)
|
| 321 |
+
out_1 = self.layernorm_2(inputs + attention_output_1)
|
| 322 |
+
return out_1
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class PositionalEmbedding(layers.Layer):
|
| 326 |
+
def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
|
| 327 |
+
super().__init__(**kwargs)
|
| 328 |
+
self.token_embeddings = layers.Embedding(
|
| 329 |
+
input_dim=vocab_size, output_dim=embed_dim
|
| 330 |
+
)
|
| 331 |
+
self.position_embeddings = layers.Embedding(
|
| 332 |
+
input_dim=sequence_length, output_dim=embed_dim
|
| 333 |
+
)
|
| 334 |
+
self.sequence_length = sequence_length
|
| 335 |
+
self.vocab_size = vocab_size
|
| 336 |
+
self.embed_dim = embed_dim
|
| 337 |
+
self.embed_scale = tf.math.sqrt(tf.cast(embed_dim, tf.float32))
|
| 338 |
+
|
| 339 |
+
def call(self, inputs):
|
| 340 |
+
length = tf.shape(inputs)[-1]
|
| 341 |
+
positions = tf.range(start=0, limit=length, delta=1)
|
| 342 |
+
embedded_tokens = self.token_embeddings(inputs)
|
| 343 |
+
embedded_tokens = embedded_tokens * self.embed_scale
|
| 344 |
+
embedded_positions = self.position_embeddings(positions)
|
| 345 |
+
return embedded_tokens + embedded_positions
|
| 346 |
+
|
| 347 |
+
def compute_mask(self, inputs, mask=None):
|
| 348 |
+
return tf.math.not_equal(inputs, 0)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
class TransformerDecoderBlock(layers.Layer):
|
| 352 |
+
def __init__(self, embed_dim, ff_dim, num_heads, **kwargs):
|
| 353 |
+
super().__init__(**kwargs)
|
| 354 |
+
self.embed_dim = embed_dim
|
| 355 |
+
self.ff_dim = ff_dim
|
| 356 |
+
self.num_heads = num_heads
|
| 357 |
+
self.attention_1 = layers.MultiHeadAttention(
|
| 358 |
+
num_heads=num_heads, key_dim=embed_dim, dropout=0.1
|
| 359 |
+
)
|
| 360 |
+
self.attention_2 = layers.MultiHeadAttention(
|
| 361 |
+
num_heads=num_heads, key_dim=embed_dim, dropout=0.1
|
| 362 |
+
)
|
| 363 |
+
self.ffn_layer_1 = layers.Dense(ff_dim, activation="relu")
|
| 364 |
+
self.ffn_layer_2 = layers.Dense(embed_dim)
|
| 365 |
+
|
| 366 |
+
self.layernorm_1 = layers.LayerNormalization()
|
| 367 |
+
self.layernorm_2 = layers.LayerNormalization()
|
| 368 |
+
self.layernorm_3 = layers.LayerNormalization()
|
| 369 |
+
|
| 370 |
+
self.embedding = PositionalEmbedding(
|
| 371 |
+
embed_dim=EMBED_DIM,
|
| 372 |
+
sequence_length=SEQ_LENGTH,
|
| 373 |
+
vocab_size=VOCAB_SIZE,
|
| 374 |
+
)
|
| 375 |
+
self.out = layers.Dense(VOCAB_SIZE, activation="softmax")
|
| 376 |
+
|
| 377 |
+
self.dropout_1 = layers.Dropout(0.3)
|
| 378 |
+
self.dropout_2 = layers.Dropout(0.5)
|
| 379 |
+
self.supports_masking = True
|
| 380 |
+
|
| 381 |
+
def call(self, inputs, encoder_outputs, training, mask=None):
|
| 382 |
+
inputs = self.embedding(inputs)
|
| 383 |
+
causal_mask = self.get_causal_attention_mask(inputs)
|
| 384 |
+
|
| 385 |
+
if mask is not None:
|
| 386 |
+
padding_mask = tf.cast(mask[:, :, tf.newaxis], dtype=tf.int32)
|
| 387 |
+
combined_mask = tf.cast(mask[:, tf.newaxis, :], dtype=tf.int32)
|
| 388 |
+
combined_mask = tf.minimum(combined_mask, causal_mask)
|
| 389 |
+
|
| 390 |
+
attention_output_1 = self.attention_1(
|
| 391 |
+
query=inputs,
|
| 392 |
+
value=inputs,
|
| 393 |
+
key=inputs,
|
| 394 |
+
attention_mask=combined_mask,
|
| 395 |
+
training=training,
|
| 396 |
+
)
|
| 397 |
+
out_1 = self.layernorm_1(inputs + attention_output_1)
|
| 398 |
+
|
| 399 |
+
attention_output_2 = self.attention_2(
|
| 400 |
+
query=out_1,
|
| 401 |
+
value=encoder_outputs,
|
| 402 |
+
key=encoder_outputs,
|
| 403 |
+
attention_mask=padding_mask,
|
| 404 |
+
training=training,
|
| 405 |
+
)
|
| 406 |
+
out_2 = self.layernorm_2(out_1 + attention_output_2)
|
| 407 |
+
|
| 408 |
+
ffn_out = self.ffn_layer_1(out_2)
|
| 409 |
+
ffn_out = self.dropout_1(ffn_out, training=training)
|
| 410 |
+
ffn_out = self.ffn_layer_2(ffn_out)
|
| 411 |
+
|
| 412 |
+
ffn_out = self.layernorm_3(ffn_out + out_2, training=training)
|
| 413 |
+
ffn_out = self.dropout_2(ffn_out, training=training)
|
| 414 |
+
preds = self.out(ffn_out)
|
| 415 |
+
return preds
|
| 416 |
+
|
| 417 |
+
def get_causal_attention_mask(self, inputs):
|
| 418 |
+
input_shape = tf.shape(inputs)
|
| 419 |
+
batch_size, sequence_length = input_shape[0], input_shape[1]
|
| 420 |
+
i = tf.range(sequence_length)[:, tf.newaxis]
|
| 421 |
+
j = tf.range(sequence_length)
|
| 422 |
+
mask = tf.cast(i >= j, dtype="int32")
|
| 423 |
+
mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
|
| 424 |
+
mult = tf.concat(
|
| 425 |
+
[
|
| 426 |
+
tf.expand_dims(batch_size, -1),
|
| 427 |
+
tf.constant([1, 1], dtype=tf.int32),
|
| 428 |
+
],
|
| 429 |
+
axis=0,
|
| 430 |
+
)
|
| 431 |
+
return tf.tile(mask, mult)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
class ImageCaptioningModel(keras.Model):
|
| 435 |
+
def __init__(
|
| 436 |
+
self,
|
| 437 |
+
cnn_model,
|
| 438 |
+
encoder,
|
| 439 |
+
decoder,
|
| 440 |
+
num_captions_per_image=1,
|
| 441 |
+
image_aug=None,
|
| 442 |
+
):
|
| 443 |
+
super().__init__()
|
| 444 |
+
self.cnn_model = cnn_model
|
| 445 |
+
self.encoder = encoder
|
| 446 |
+
self.decoder = decoder
|
| 447 |
+
self.loss_tracker = keras.metrics.Mean(name="loss")
|
| 448 |
+
self.acc_tracker = keras.metrics.Mean(name="accuracy")
|
| 449 |
+
self.num_captions_per_image = num_captions_per_image
|
| 450 |
+
self.image_aug = image_aug
|
| 451 |
+
|
| 452 |
+
def calculate_loss(self, y_true, y_pred, mask):
|
| 453 |
+
loss = self.loss(y_true, y_pred)
|
| 454 |
+
mask = tf.cast(mask, dtype=loss.dtype)
|
| 455 |
+
loss *= mask
|
| 456 |
+
return tf.reduce_sum(loss) / tf.reduce_sum(mask)
|
| 457 |
+
|
| 458 |
+
def calculate_accuracy(self, y_true, y_pred, mask):
|
| 459 |
+
accuracy = tf.equal(y_true, tf.argmax(y_pred, axis=2))
|
| 460 |
+
accuracy = tf.math.logical_and(mask, accuracy)
|
| 461 |
+
accuracy = tf.cast(accuracy, dtype=tf.float32)
|
| 462 |
+
mask = tf.cast(mask, dtype=tf.float32)
|
| 463 |
+
return tf.reduce_sum(accuracy) / tf.reduce_sum(mask)
|
| 464 |
+
|
| 465 |
+
def _compute_caption_loss_and_acc(self, img_embed, batch_seq, training=True):
|
| 466 |
+
encoder_out = self.encoder(img_embed, training=training)
|
| 467 |
+
batch_seq_inp = batch_seq[:, :-1]
|
| 468 |
+
batch_seq_true = batch_seq[:, 1:]
|
| 469 |
+
mask = tf.math.not_equal(batch_seq_true, 0)
|
| 470 |
+
batch_seq_pred = self.decoder(
|
| 471 |
+
batch_seq_inp, encoder_out, training=training, mask=mask
|
| 472 |
+
)
|
| 473 |
+
loss = self.calculate_loss(batch_seq_true, batch_seq_pred, mask)
|
| 474 |
+
acc = self.calculate_accuracy(batch_seq_true, batch_seq_pred, mask)
|
| 475 |
+
return loss, acc
|
| 476 |
+
|
| 477 |
+
def train_step(self, batch_data):
|
| 478 |
+
batch_img, batch_seq = batch_data
|
| 479 |
+
batch_loss = 0
|
| 480 |
+
batch_acc = 0
|
| 481 |
+
|
| 482 |
+
if self.image_aug:
|
| 483 |
+
batch_img = self.image_aug(batch_img)
|
| 484 |
+
|
| 485 |
+
# 1. Se obtiene los embeddings de imágenes
|
| 486 |
+
img_embed = self.cnn_model(batch_img)
|
| 487 |
+
|
| 488 |
+
# 2. Las descripciones pasan por el decodificador
|
| 489 |
+
# junto con las salidas del codificador y calcula
|
| 490 |
+
# la pérdida y la precisión para cada descripción
|
| 491 |
+
for i in range(self.num_captions_per_image):
|
| 492 |
+
with tf.GradientTape() as tape:
|
| 493 |
+
loss, acc = self._compute_caption_loss_and_acc(
|
| 494 |
+
img_embed, batch_seq[:, i, :], training=True
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
# 3. Actualización de pérdida y precisión
|
| 498 |
+
batch_loss += loss
|
| 499 |
+
batch_acc += acc
|
| 500 |
+
|
| 501 |
+
# 4. Se obtiene la lista de los pesos entrenables
|
| 502 |
+
train_vars = (
|
| 503 |
+
self.encoder.trainable_variables + self.decoder.trainable_variables
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
# 5. Se obtiene los gradientes
|
| 507 |
+
grads = tape.gradient(loss, train_vars)
|
| 508 |
+
|
| 509 |
+
# 6. Actualiza los pesos entrenables
|
| 510 |
+
self.optimizer.apply_gradients(zip(grads, train_vars))
|
| 511 |
+
|
| 512 |
+
# 7. Actualiza de los rastreadores
|
| 513 |
+
batch_acc /= float(self.num_captions_per_image)
|
| 514 |
+
self.loss_tracker.update_state(batch_loss)
|
| 515 |
+
self.acc_tracker.update_state(batch_acc)
|
| 516 |
+
|
| 517 |
+
# 8. Retorna los valores de pérdida y precisión
|
| 518 |
+
return {
|
| 519 |
+
"loss": self.loss_tracker.result(),
|
| 520 |
+
"acc": self.acc_tracker.result(),
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
def test_step(self, batch_data):
|
| 524 |
+
batch_img, batch_seq = batch_data
|
| 525 |
+
batch_loss = 0
|
| 526 |
+
batch_acc = 0
|
| 527 |
+
|
| 528 |
+
# 1. Obtiene los embeddings de imágenes
|
| 529 |
+
img_embed = self.cnn_model(batch_img)
|
| 530 |
+
|
| 531 |
+
# 2. Las descripciones pasan por el decodificador
|
| 532 |
+
# junto con las salidas del codificador y calcula
|
| 533 |
+
# la pérdida y la precisión para cada descripción
|
| 534 |
+
for i in range(self.num_captions_per_image):
|
| 535 |
+
loss, acc = self._compute_caption_loss_and_acc(
|
| 536 |
+
img_embed, batch_seq[:, i, :], training=False
|
| 537 |
+
)
|
| 538 |
+
|
| 539 |
+
# 3. Actualización de pérdida y precisión
|
| 540 |
+
batch_loss += loss
|
| 541 |
+
batch_acc += acc
|
| 542 |
+
|
| 543 |
+
batch_acc /= float(self.num_captions_per_image)
|
| 544 |
+
|
| 545 |
+
# 4. Actualiza de los rastreadores
|
| 546 |
+
self.loss_tracker.update_state(batch_loss)
|
| 547 |
+
self.acc_tracker.update_state(batch_acc)
|
| 548 |
+
|
| 549 |
+
# 5. Retorna los valores de pérdida y precisión
|
| 550 |
+
return {
|
| 551 |
+
"loss": self.loss_tracker.result(),
|
| 552 |
+
"acc": self.acc_tracker.result(),
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
@property
|
| 556 |
+
def metrics(self):
|
| 557 |
+
# Se necesita enumerar las métricas para que `reset_states()`
|
| 558 |
+
# pueda ser llamado automaticamente.
|
| 559 |
+
return [self.loss_tracker, self.acc_tracker]
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
cnn_model = get_cnn_model()
|
| 563 |
+
encoder = TransformerEncoderBlock(embed_dim=EMBED_DIM, dense_dim=FF_DIM, num_heads=1)
|
| 564 |
+
decoder = TransformerDecoderBlock(embed_dim=EMBED_DIM, ff_dim=FF_DIM, num_heads=2)
|
| 565 |
+
caption_model = ImageCaptioningModel(
|
| 566 |
+
cnn_model=cnn_model,
|
| 567 |
+
encoder=encoder,
|
| 568 |
+
decoder=decoder,
|
| 569 |
+
image_aug=image_augmentation,
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
"""## **Entrenamiento del modelo**"""
|
| 573 |
+
|
| 574 |
+
# Define la función de pérdida
|
| 575 |
+
cross_entropy = keras.losses.SparseCategoricalCrossentropy(
|
| 576 |
+
from_logits=False,
|
| 577 |
+
reduction='none',
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
# Criterios de parada anticipada
|
| 581 |
+
early_stopping = keras.callbacks.EarlyStopping(patience=3, restore_best_weights=True)
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
# Programador de tasa de aprendizaje para el optimizador
|
| 585 |
+
class LRSchedule(keras.optimizers.schedules.LearningRateSchedule):
|
| 586 |
+
def __init__(self, post_warmup_learning_rate, warmup_steps):
|
| 587 |
+
super().__init__()
|
| 588 |
+
self.post_warmup_learning_rate = post_warmup_learning_rate
|
| 589 |
+
self.warmup_steps = warmup_steps
|
| 590 |
+
|
| 591 |
+
def __call__(self, step):
|
| 592 |
+
global_step = tf.cast(step, tf.float32)
|
| 593 |
+
warmup_steps = tf.cast(self.warmup_steps, tf.float32)
|
| 594 |
+
warmup_progress = global_step / warmup_steps
|
| 595 |
+
warmup_learning_rate = self.post_warmup_learning_rate * warmup_progress
|
| 596 |
+
return tf.cond(
|
| 597 |
+
global_step < warmup_steps,
|
| 598 |
+
lambda: warmup_learning_rate,
|
| 599 |
+
lambda: self.post_warmup_learning_rate,
|
| 600 |
+
)
|
| 601 |
+
|
| 602 |
+
|
| 603 |
+
# Se crea un cronograma de tasa de aprendizaje
|
| 604 |
+
num_train_steps = len(train_dataset) * EPOCHS
|
| 605 |
+
num_warmup_steps = num_train_steps // 15
|
| 606 |
+
lr_schedule = LRSchedule(post_warmup_learning_rate=1e-4, warmup_steps=num_warmup_steps)
|
| 607 |
+
|
| 608 |
+
# Se compila el modelo
|
| 609 |
+
caption_model.compile(optimizer=keras.optimizers.Adam(lr_schedule), loss=cross_entropy)
|
| 610 |
+
|
| 611 |
+
# Entrenamiento del modelo
|
| 612 |
+
caption_model.fit(
|
| 613 |
+
train_dataset,
|
| 614 |
+
epochs=EPOCHS,
|
| 615 |
+
|
| 616 |
+
validation_data=valid_dataset,
|
| 617 |
+
callbacks=[early_stopping],
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
"""### **Opción para guardar el modelo entrenado**"""
|
| 621 |
+
|
| 622 |
+
#con está opción vemos los pesos del modelo en una lista
|
| 623 |
+
pesos = caption_model.get_weights()
|
| 624 |
+
|
| 625 |
+
#guardamos esos pesos en formato npy - en este caso lo guardamos entrenado con una época, ya que si quitamos el fit o el entrenamiento nos da error, por lo que siempre tenemos que
|
| 626 |
+
#entrenarle al modelo con una época para después configurarle con otro con 10 épocas
|
| 627 |
+
np.save('pesos1.npy', np.array(pesos, dtype=object), allow_pickle=True)
|
| 628 |
+
|
| 629 |
+
#aquí configuramos los pesos que estaban entrenados con una época con diez - nosotros corrimos anteriormente con 10 y nos descargamos
|
| 630 |
+
pesos_nuevos = np.load('/content/gdrive/MyDrive/saved models/pesos10.npy', allow_pickle=True)
|
| 631 |
+
|
| 632 |
+
caption_model.set_weights(pesos_nuevos)
|
| 633 |
+
|
| 634 |
+
"""##**Verificación de las predicciones**"""
|
| 635 |
+
|
| 636 |
+
vocab = vectorization.get_vocabulary()
|
| 637 |
+
index_lookup = dict(zip(range(len(vocab)), vocab))
|
| 638 |
+
max_decoded_sentence_length = SEQ_LENGTH - 1
|
| 639 |
+
valid_images = list(valid_data.keys())
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
def generate_caption():
|
| 643 |
+
# Selecciona una imagen aleatoria del conjunto de datos de validación
|
| 644 |
+
sample_img = np.random.choice(valid_images)
|
| 645 |
+
print(sample_img)
|
| 646 |
+
|
| 647 |
+
# Lee la imagen del disco
|
| 648 |
+
sample_img = decode_and_resize(sample_img)
|
| 649 |
+
img = sample_img.numpy().clip(0, 255).astype(np.uint8)
|
| 650 |
+
plt.imshow(img)
|
| 651 |
+
plt.show()
|
| 652 |
+
|
| 653 |
+
# Pasa la imagen a la CNN
|
| 654 |
+
img = tf.expand_dims(sample_img, 0)
|
| 655 |
+
img = caption_model.cnn_model(img)
|
| 656 |
+
|
| 657 |
+
# Pasa las características de la imagen al codificador Transformer
|
| 658 |
+
encoded_img = caption_model.encoder(img, training=False)
|
| 659 |
+
|
| 660 |
+
# Genera la descripción usando el decodificador Transformer
|
| 661 |
+
decoded_caption = "<start> "
|
| 662 |
+
for i in range(max_decoded_sentence_length):
|
| 663 |
+
tokenized_caption = vectorization([decoded_caption])[:, :-1]
|
| 664 |
+
mask = tf.math.not_equal(tokenized_caption, 0)
|
| 665 |
+
predictions = caption_model.decoder(
|
| 666 |
+
tokenized_caption, encoded_img, training=False, mask=mask
|
| 667 |
+
)
|
| 668 |
+
sampled_token_index = np.argmax(predictions[0, i, :])
|
| 669 |
+
sampled_token = index_lookup[sampled_token_index]
|
| 670 |
+
if sampled_token == "<end>":
|
| 671 |
+
break
|
| 672 |
+
decoded_caption += " " + sampled_token
|
| 673 |
+
|
| 674 |
+
decoded_caption = decoded_caption.replace("<start> ", "")
|
| 675 |
+
decoded_caption = decoded_caption.replace(" <end>", "").strip()
|
| 676 |
+
print("Predicted Caption: ", decoded_caption)
|
| 677 |
+
|
| 678 |
+
# Verifica las predicciones para una imagen del dataset
|
| 679 |
+
Ex_1= generate_caption()
|
| 680 |
+
|
| 681 |
+
"""##**Predicción con imágenes externas**"""
|
| 682 |
+
|
| 683 |
+
from google.colab import files
|
| 684 |
+
import cv2
|
| 685 |
+
|
| 686 |
+
uploaded = files.upload()
|
| 687 |
+
|
| 688 |
+
for fn in uploaded.keys():
|
| 689 |
+
filepath = '/content/' + fn
|
| 690 |
+
sample_img = decode_and_resize(filepath)
|
| 691 |
+
img = sample_img.numpy().clip(0, 255).astype(np.uint8)
|
| 692 |
+
img = cv2.resize(img, (500, 500))
|
| 693 |
+
print(img.shape)
|
| 694 |
+
plt.imshow(img)
|
| 695 |
+
plt.show()
|
| 696 |
+
|
| 697 |
+
#h5_model = tf.keras.models.load_model('/content/gdrive/MyDrive/best_model.h5')
|
| 698 |
+
#img = img.reshape(-1, 359, 359, 3)
|
| 699 |
+
#result = h5_model.predict(img)
|
| 700 |
+
#print(result)
|
| 701 |
+
|
| 702 |
+
#if result[0][5] < 0.5:
|
| 703 |
+
# print("La imagen pertenece a ripios de perforación")
|
| 704 |
+
#else:
|
| 705 |
+
# print ("La imagen no pertenece a ripios de perforación")
|
| 706 |
+
|
| 707 |
+
# Ingresa la imagen a la CNN
|
| 708 |
+
img = tf.expand_dims(sample_img, 0)
|
| 709 |
+
img = caption_model.cnn_model(img)
|
| 710 |
+
|
| 711 |
+
# Las características de la imagen pasan al codificador Transformer
|
| 712 |
+
encoded_img = caption_model.encoder(img, training=False)
|
| 713 |
+
|
| 714 |
+
# Se genera la descripción usando el decodificador Transformer
|
| 715 |
+
decoded_caption = "<start> "
|
| 716 |
+
for i in range(max_decoded_sentence_length):
|
| 717 |
+
tokenized_caption = vectorization([decoded_caption])[:, :-1]
|
| 718 |
+
mask = tf.math.not_equal(tokenized_caption, 0)
|
| 719 |
+
predictions = caption_model.decoder(
|
| 720 |
+
tokenized_caption, encoded_img, training=False, mask=mask
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
sampled_token_index = np.argmax(predictions[0, i, :])
|
| 724 |
+
sampled_token = index_lookup[sampled_token_index]
|
| 725 |
+
if sampled_token == "<end>":
|
| 726 |
+
break
|
| 727 |
+
decoded_caption += " " + sampled_token
|
| 728 |
+
|
| 729 |
+
decoded_caption = decoded_caption.replace("<start> ", "")
|
| 730 |
+
decoded_caption = decoded_caption.replace(" <end>", "").strip()
|
| 731 |
+
print("Predicted Caption: ", decoded_caption)
|
| 732 |
+
|
| 733 |
+
text_to_say = decoded_caption
|
| 734 |
+
lenguage = "es-es"
|
| 735 |
+
# Se crea un archivo mp3 con la predicción resultante
|
| 736 |
+
gtts_object = gTTS(text = text_to_say,
|
| 737 |
+
lang = lenguage,
|
| 738 |
+
slow = False )
|
| 739 |
+
gtts_object.save("/content/gtts.mp3")
|
| 740 |
+
|
| 741 |
+
from IPython.display import Audio
|
| 742 |
+
Audio ("/content/gtts.mp3")
|
| 743 |
+
|
| 744 |
+
"""# **INTERFAZ CON GRADIO**"""
|
| 745 |
+
|
| 746 |
+
!pip install gradio
|
| 747 |
+
|
| 748 |
+
import gradio as gr
|
| 749 |
+
|
| 750 |
+
def generate_caption(sample_img):
|
| 751 |
+
print(sample_img.shape)
|
| 752 |
+
# Selecciona una imagen aleatoria del conjunto de datos de validación
|
| 753 |
+
sample_img = np.random.choice(valid_images)
|
| 754 |
+
|
| 755 |
+
|
| 756 |
+
# Lee la imagen del disco
|
| 757 |
+
sample_img = decode_and_resize(sample_img)
|
| 758 |
+
img = sample_img.numpy().clip(0, 255).astype(np.uint8)
|
| 759 |
+
plt.imshow(img)
|
| 760 |
+
plt.show()
|
| 761 |
+
|
| 762 |
+
# Pasa la imagen a la CNN
|
| 763 |
+
img = tf.expand_dims(sample_img, 0)
|
| 764 |
+
img = caption_model.cnn_model(img)
|
| 765 |
+
|
| 766 |
+
# Pasa las características de la imagen al codificador Transformer
|
| 767 |
+
encoded_img = caption_model.encoder(img, training=False)
|
| 768 |
+
|
| 769 |
+
# Genera la descripción usando el decodificador Transformer
|
| 770 |
+
decoded_caption = "<start> "
|
| 771 |
+
for i in range(max_decoded_sentence_length):
|
| 772 |
+
tokenized_caption = vectorization([decoded_caption])[:, :-1]
|
| 773 |
+
mask = tf.math.not_equal(tokenized_caption, 0)
|
| 774 |
+
predictions = caption_model.decoder(
|
| 775 |
+
tokenized_caption, encoded_img, training=False, mask=mask
|
| 776 |
+
)
|
| 777 |
+
sampled_token_index = np.argmax(predictions[0, i, :])
|
| 778 |
+
sampled_token = index_lookup[sampled_token_index]
|
| 779 |
+
if sampled_token == "<end>":
|
| 780 |
+
break
|
| 781 |
+
decoded_caption += " " + sampled_token
|
| 782 |
+
|
| 783 |
+
decoded_caption = decoded_caption.replace("<start> ", "")
|
| 784 |
+
decoded_caption = decoded_caption.replace(" <end>", "").strip()
|
| 785 |
+
|
| 786 |
+
text_to_say = decoded_caption
|
| 787 |
+
lenguage = "es-es"
|
| 788 |
+
gtts_object = gTTS(text = text_to_say,
|
| 789 |
+
lang = lenguage,
|
| 790 |
+
slow = False )
|
| 791 |
+
gtts_object.save("/content/gtts.mp3")
|
| 792 |
+
audio = "/content/gtts.mp3"
|
| 793 |
+
|
| 794 |
+
return decoded_caption, audio
|
| 795 |
+
|
| 796 |
+
demo = gr.Interface(fn = generate_caption,inputs = gr.Image(label="Imagen"), outputs = [gr.Text(label="Descripción textual"), gr.Audio(label="Audio")], theme ='darkhuggingface', title = 'DESCRIPCIÓN DE IMÁGENES DE RIPIOS DE PERFORACIÓN',
|
| 797 |
+
description = 'La siguiente interfaz describirá de forma automática imágenes de ripios de perforación. El usuario deberá ingresar en el recuadro de la izquierda la imagen a ser procesada, y en los recuadros de la derecha se mostrará la descripción textual y oral de la imagen. Se recomienda ingresar imágenes sin ningún tipo de mediciones o símbolos ya que esto podría afectar en la predicción del modelo.',
|
| 798 |
+
article = 'Nota: En el caso de ingresar imágenes que no tengan relación a muestras de ripios de perforación, los autores de esta aplicación no se hacen responsables por los resultados de estas, el modelo de descripción de ripios de perforación está entrenado para dar un resultado.')
|
| 799 |
+
demo.launch()
|