Abhinav-kk commited on
Commit
29818ad
·
1 Parent(s): ddc4f4a

Fixed issue with other models not working

Browse files
Files changed (1) hide show
  1. app.py +53 -6
app.py CHANGED
@@ -54,7 +54,13 @@ def preprocess_image(img_array, model_name, target_size=(48, 48)):
54
 
55
  # Repeat channels if model is not base_cnn
56
  if model_name != "Scratch CNN Model":
57
- image = np.repeat(image[..., np.newaxis], 3, axis=-1)
 
 
 
 
 
 
58
 
59
  # Add batch dimension
60
  image = np.expand_dims(image, axis=0)
@@ -68,6 +74,30 @@ def predict_emotion(face_array, model, model_name, emotion_list):
68
  return predicted_emotion
69
 
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  def getResult(selected_option, image):
72
  # Load image in memory
73
  img = Image.open(BytesIO(image.read()))
@@ -77,7 +107,7 @@ def getResult(selected_option, image):
77
  model_path = MODEL_PATHS[selected_option]
78
  model = load_model(model_path)
79
 
80
- # Process the image
81
  result, face_array = extract_face(img_array)
82
  if result == "No face detected.":
83
  return result, [], None
@@ -86,11 +116,28 @@ def getResult(selected_option, image):
86
  predicted_emotion = predict_emotion(
87
  face_array, model, selected_option, emotion_list)
88
 
89
- # Convert images to Base64 for embedding in HTML
90
- bounding_box_image = convert_image_to_base64(img)
91
- cropped_face_image = convert_image_to_base64(Image.fromarray(face_array))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- return "The predicted emotion is:", [bounding_box_image, cropped_face_image], predicted_emotion
 
94
 
95
 
96
  app = Flask(__name__)
 
54
 
55
  # Repeat channels if model is not base_cnn
56
  if model_name != "Scratch CNN Model":
57
+ # If the image is grayscale (shape: [height, width, 1])
58
+ if image.ndim == 3 and image.shape[-1] == 1:
59
+ # Repeat the grayscale channels to create RGB
60
+ image = np.repeat(image, 3, axis=-1)
61
+ # If it's already RGB (shape: [height, width, 3])
62
+ elif image.ndim == 3 and image.shape[-1] == 3:
63
+ pass # No change needed
64
 
65
  # Add batch dimension
66
  image = np.expand_dims(image, axis=0)
 
74
  return predicted_emotion
75
 
76
 
77
+ def draw_bounding_box(img_array):
78
+ # Load the Haar cascade for face detection
79
+ cascade_path = os.path.join(
80
+ cv2.data.haarcascades, "haarcascade_frontalface_default.xml")
81
+ face_cascade = cv2.CascadeClassifier(cascade_path)
82
+
83
+ # Convert the image to grayscale for face detection
84
+ gray = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
85
+
86
+ # Detect faces in the grayscale image
87
+ faces = face_cascade.detectMultiScale(
88
+ gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
89
+
90
+ # Check if a face is detected
91
+ if len(faces) == 0:
92
+ return "No face detected.", None
93
+
94
+ # Draw a bounding box around the first detected face
95
+ x, y, w, h = faces[0]
96
+ cv2.rectangle(img_array, (x, y), (x+w, y+h), (255, 0, 0), 2)
97
+
98
+ return "Face detected.", img_array
99
+
100
+
101
  def getResult(selected_option, image):
102
  # Load image in memory
103
  img = Image.open(BytesIO(image.read()))
 
107
  model_path = MODEL_PATHS[selected_option]
108
  model = load_model(model_path)
109
 
110
+ # Process the image to extract the face
111
  result, face_array = extract_face(img_array)
112
  if result == "No face detected.":
113
  return result, [], None
 
116
  predicted_emotion = predict_emotion(
117
  face_array, model, selected_option, emotion_list)
118
 
119
+ # Call the draw_bounding_box function to get the image with a bounding box
120
+ result, bounding_box_img_array = draw_bounding_box(img_array)
121
+
122
+ if result == "No face detected.":
123
+ return result, [], None
124
+
125
+ # Convert the image with bounding box to Base64
126
+ bounding_box_image = convert_image_to_base64(
127
+ Image.fromarray(bounding_box_img_array))
128
+
129
+ # Convert the cropped face image to Base64
130
+ cropped_face_image = convert_image_to_base64(
131
+ Image.fromarray(cv2.cvtColor(face_array, cv2.COLOR_BGR2GRAY)))
132
+
133
+ # Convert the image to grayscale for display
134
+ grayscale_image = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
135
+ grayscale_pil_image = Image.fromarray(grayscale_image)
136
+ grayscale_base64 = convert_image_to_base64(
137
+ grayscale_pil_image) # Convert grayscale to Base64
138
 
139
+ # Return the predicted emotion, list of images (bounding box, cropped face), and grayscale image
140
+ return "The predicted emotion is:", [bounding_box_image, cropped_face_image, grayscale_base64], predicted_emotion
141
 
142
 
143
  app = Flask(__name__)