Pingul commited on
Commit
0a9d067
·
verified ·
1 Parent(s): 03da3a2

Upload 14 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ utils/emotions.csv filter=lfs diff=lfs merge=lfs -text
utils/balanced_dataset.csv ADDED
The diff for this file is too large to render. See raw diff
 
utils/chatgpt_config.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ # Ruta relativa al archivo .env
6
+ dotenv_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), ".env")
7
+
8
+ # Cargar el archivo .env desde la ruta especificada
9
+ load_dotenv(dotenv_path)
10
+
11
+ # Leer la clave API
12
+ API_KEY = os.getenv("OPENAI_API_KEY")
13
+
14
+ client = OpenAI(api_key=API_KEY)
15
+
16
+ def send_ChatGPT(emotion_face=None, emotion_text=None):
17
+ # Verificar qué parámetros se proporcionaron
18
+ if emotion_face and emotion_text:
19
+ prompt = f"Act like you are a psychologist expert in emotions, i want recommendation of music, fun activities, actions, or advices if the image im receiving shows a {emotion_face} emotion and the text from a chat that the user provide us shows a {emotion_text} emotion"
20
+ elif emotion_face:
21
+ prompt = f"i want recommendation of music, fun activities, actions, or advices because im expiriencing a {emotion_face} emotion"
22
+ elif emotion_text:
23
+ prompt = f"i want recommendation of music, fun activities, actions, or advices because im expiriencing a {emotion_text} emotion"
24
+ else:
25
+ raise ValueError("At least one of 'emotion_face' or 'emotion_text' must be provided.")
26
+
27
+ # Realizar la llamada al modelo
28
+ response = client.chat.completions.create(
29
+ model="gpt-4o",
30
+ messages=[
31
+ {"role": "user", "content": prompt}
32
+ ],
33
+ temperature=0.81,
34
+ max_tokens=2000,
35
+ top_p=1,
36
+ frequency_penalty=0,
37
+ presence_penalty=0
38
+ )
39
+
40
+ return response.choices[0].message.content
utils/dataset.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.utils import resample
2
+ import pandas as pd
3
+ import re
4
+ import os
5
+
6
+ file_path=os.path.join(os.path.dirname(__file__),'emotions.csv')
7
+
8
+ data=pd.read_csv(file_path)
9
+
10
+ # Ver distribución de clases después de la concatenación
11
+ class_counts = data["label"].value_counts()
12
+ print("Distribución después de la concatenación:")
13
+ print(class_counts)
14
+
15
+ min_class_count = class_counts.min()
16
+
17
+ # Balancear el dataset
18
+ balanced_data = pd.concat([
19
+ resample(data[data["label"] == label],
20
+ replace=False, # No hacer duplicados
21
+ n_samples=min_class_count, # Igualar al tamaño de la clase minoritaria
22
+ random_state=42)
23
+ for label in class_counts.index
24
+ ])
25
+
26
+ balanced_data = balanced_data.sample(frac=1, random_state=42)
27
+
28
+ def clean_text(text):
29
+ # Paso 1: Remover links completos que contengan http, https, o www
30
+ text = re.sub(r"(http[s]?://[^\s]+|www\.[^\s]+)", "", text)
31
+ # Paso 2: Remover caracteres especiales y dejar solo letras, números y espacios
32
+ text = re.sub(r"[^A-Za-z0-9\s]", "", text)
33
+ # Paso 3: Eliminar espacios extra
34
+ text = re.sub(r"\s+", " ", text).strip()
35
+ return text
36
+
37
+ balanced_data["text"] = balanced_data["text"].apply(clean_text)
38
+
39
+ # Mostrar resultados finales
40
+ print("\nDistribución balanceada de clases:")
41
+ print(balanced_data["label"].value_counts())
42
+
43
+ # Guardar el dataset balanceado
44
+ balanced_data.to_csv("balanced_dataset.csv", index=False)
utils/emotions.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd7d4d122e49efd33d0cccdfb3ef46acac24795fe04d4ab42193ea932de3088
3
+ size 42109557
utils/inigo.jpg ADDED
utils/model1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cebe5071b7014a6e135db4c4ec360e6a5694753f88c309014d34e50969301a84
3
+ size 59535664
utils/model13.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:687265749cc1cdd1c82bcad61299353d41e688cc43fcd00290aee8921599dd36
3
+ size 75734512
utils/modelFace.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import tensorflow as tf
3
+ from tensorflow.keras.models import load_model
4
+ from tensorflow.keras.preprocessing.image import img_to_array
5
+
6
+ import cv2
7
+ import numpy as np
8
+ import sys
9
+ import os
10
+ import base64
11
+
12
+
13
+ # Función para preprocesar imágenes desde un arreglo
14
+ def preprocess_image_from_array(image, target_size=(256, 256)):
15
+ img = cv2.resize(image, target_size) # Redimensionar la imagen
16
+ img_array = img_to_array(img) # Convertir a arreglo
17
+ img_array = np.expand_dims(img_array, axis=0) # Expandir dimensiones
18
+ img_array = img_array / 255.0 # Normalizar
19
+ return img_array
20
+
21
+ # Función para detectar y recortar el rostro
22
+ def detect_and_crop_face(image, yolo_model, padding=0.1):
23
+ results = yolo_model(image)
24
+ boxes = results[0].boxes.xyxy.cpu().numpy() # Obtener las coordenadas de las cajas
25
+ confidences = results[0].boxes.conf.cpu().numpy().tolist() # Obtener las confianzas
26
+
27
+ if len(boxes) > 0:
28
+ max_index = confidences.index(max(confidences)) # Índice de mayor confianza
29
+ x1, y1, x2, y2 = map(int, boxes[max_index]) # Coordenadas de la caja
30
+
31
+ # Calcular el tamaño del padding
32
+ h, w, _ = image.shape
33
+ box_width = x2 - x1
34
+ box_height = y2 - y1
35
+ pad_x = int(box_width * padding) # Padding horizontal
36
+ pad_y = int(box_height * padding) # Padding vertical
37
+
38
+ # Ajustar las coordenadas con padding, asegurándose de no salir de la imagen
39
+ x1 = max(0, x1 - pad_x)
40
+ y1 = max(0, y1 - pad_y)
41
+ x2 = min(w, x2 + pad_x)
42
+ y2 = min(h, y2 + pad_y)
43
+
44
+ # Recortar la cara con padding
45
+ cropped_face = image[y1:y2, x1:x2]
46
+ return cropped_face, max(confidences)
47
+
48
+ return None, 0 # No se detectó ninguna cara
49
+
50
+
51
+ # Función principal
52
+ def predict_face(img_base64):
53
+ # Carga del modelo YOLO para detección de caras
54
+ yolo_model = YOLO(os.path.join(os.path.dirname(__file__), "yolov8m-face.pt"))
55
+ # Carga del modelo de clasificación
56
+ emotion_model = load_model(os.path.join(os.path.dirname(__file__), './model13.h5'))
57
+ # class_names = ["sadness", "happiness", "love", "anger", "worry", "neutral"]
58
+ class_names = ["anger", "fear", "happy", "neutral", "sad", "surprise"]
59
+
60
+ image_data = base64.b64decode(img_base64)
61
+ np_array = np.frombuffer(image_data, np.uint8)
62
+
63
+ # Paso 3: Leer la imagen con OpenCV desde el array NumPy
64
+ image = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
65
+
66
+ if image is None:
67
+ print("Error: no se pudo cargar la imagen.")
68
+ return
69
+
70
+ # Detectar y recortar el rostro
71
+ face, confidence = detect_and_crop_face(image, yolo_model)
72
+ if face is not None:
73
+ print(f"Rostro detectado con confianza: {confidence*100:.2f}%")
74
+ preprocessed_face = preprocess_image_from_array(face)
75
+
76
+ # Predicción de emoción
77
+ prediction = emotion_model.predict(preprocessed_face)
78
+ print(prediction)
79
+ predicted_class = np.argmax(prediction)
80
+ confidence = np.max(prediction)
81
+
82
+ # Mostrar la imagen con la predicción
83
+ # plt.imshow(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
84
+ # plt.title(f"Emoción: {class_names[predicted_class]} ({confidence*100:.2f}%)")
85
+ # plt.axis("off")
86
+ # plt.show()
87
+ return str(class_names[predicted_class])
88
+ else:
89
+ print("No se detectó ningún rostro en la imagen.")
90
+
91
+ # if __name__ == "__main__":
92
+ # predict_face()
utils/modelText.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.models import load_model
2
+ from tensorflow.keras.preprocessing.text import Tokenizer
3
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
4
+
5
+ import numpy as np
6
+ import pickle # Si deseas guardar/cargar el tokenizer
7
+ import re
8
+ import os
9
+
10
+ # Carga el modelo entrenado
11
+ model_path = os.path.join(os.path.dirname(__file__),'model_2_texto.h5') # Cambia el número de versión si es necesario
12
+ model = load_model(model_path)
13
+
14
+ # Etiquetas de emociones (asegúrate de que coincidan con las usadas en el entrenamiento)
15
+ EMOTION_LABELS = {
16
+ 0: 'sadness',
17
+ 1: 'joy',
18
+ 2: 'love',
19
+ 3: 'anger',
20
+ 4: 'fear',
21
+ 5: 'surprise',
22
+ }
23
+
24
+ # Carga el tokenizer (suponiendo que lo guardaste en un archivo)
25
+ tokenizer_path = os.path.join(os.path.dirname(__file__),'tokenizer.pickle') # Cambia el nombre si usaste otro
26
+ with open(tokenizer_path, 'rb') as handle:
27
+ tokenizer = pickle.load(handle)
28
+
29
+ # Configuración del padding
30
+ MAX_LEN = 100 # Debe coincidir con el valor usado en el entrenamiento
31
+
32
+ # Función para limpiar el texto
33
+ def clean_text(text):
34
+ if isinstance(text, str):
35
+ text = re.sub(r'http\S+|www\S+|https\S+', '', text)
36
+ text = re.sub(r'@\w+|#\w+', '', text)
37
+ text = re.sub(r'[^a-zA-Z0-9\s!?.,]', '', text)
38
+ text = text.lower().strip()
39
+ return text
40
+ return ""
41
+
42
+ # Función para predecir la emoción principal
43
+ def predict_text(text):
44
+ sequence = tokenizer.texts_to_sequences(text)
45
+ padded = pad_sequences(sequence, maxlen=MAX_LEN)
46
+ prediction = model.predict(padded)[0]
47
+ max_index = np.argmax(prediction)
48
+ main_emotion = EMOTION_LABELS[max_index]
49
+ return str(main_emotion)
50
+
51
+ # Ejemplo de predicción7
52
+ sample_text = 'I hate my stupid dog'
53
+ emotion = predict_text(sample_text)
54
+ print(f"La emoción principal es: {emotion}")
utils/model_2_texto.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c01eff2a2ebead627bc533f9dbb34b25ef0a2d444838f5b7658efba08d82729e
3
+ size 2023640
utils/tokenizer.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5610b8bf574050c3a8c9f7b7c68a7fd61ca582e9b2b58999025e5d72847166fd
3
+ size 1306852
utils/tokenizer1.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:585e82135ef83a4748422a5dc571aadafeb527bcf76f3d46667c839aef85ad92
3
+ size 950727
utils/training_metrics_2.png ADDED
utils/yolov8m-face.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:219f490a97d5722972e1660725fa7cfab54e46c944b833debc28c2302aa1be93
3
+ size 52028566