Commit ·
b9e3d2c
1
Parent(s): 71268c5
Modifed code to not save in directory
Browse files- app.py +45 -87
- templates/index.html +8 -9
app.py
CHANGED
|
@@ -4,6 +4,9 @@ import numpy as np
|
|
| 4 |
from keras.models import load_model
|
| 5 |
import cv2
|
| 6 |
import os
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
# Define paths for the models
|
| 9 |
MODEL_PATHS = {
|
|
@@ -14,100 +17,80 @@ MODEL_PATHS = {
|
|
| 14 |
}
|
| 15 |
|
| 16 |
|
| 17 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
# Load the Haar cascade for face detection
|
| 19 |
cascade_path = os.path.join(
|
| 20 |
cv2.data.haarcascades, "haarcascade_frontalface_default.xml")
|
| 21 |
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 22 |
|
| 23 |
-
# Read the image in grayscale mode
|
| 24 |
-
img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
| 25 |
-
|
| 26 |
-
if img is None:
|
| 27 |
-
print("Could not read the image.")
|
| 28 |
-
return "No face detected."
|
| 29 |
-
|
| 30 |
-
# Save the grayscale image
|
| 31 |
-
gray_output_path = "gray_" + output_path
|
| 32 |
-
cv2.imwrite(gray_output_path, img)
|
| 33 |
-
print(f"Grayscale image saved as {gray_output_path}")
|
| 34 |
-
|
| 35 |
# Detect faces in the grayscale image
|
| 36 |
faces = face_cascade.detectMultiScale(
|
| 37 |
-
|
| 38 |
|
| 39 |
-
# Check if a face is detected
|
| 40 |
if len(faces) == 0:
|
| 41 |
-
|
| 42 |
-
return "No face detected."
|
| 43 |
|
| 44 |
# Extract the first detected face (assuming there's only one face)
|
| 45 |
x, y, w, h = faces[0]
|
| 46 |
-
face =
|
| 47 |
|
| 48 |
# Resize the face to 48x48 pixels
|
| 49 |
face_resized = cv2.resize(face, (48, 48))
|
| 50 |
-
|
| 51 |
-
# Save the cropped and resized grayscale face
|
| 52 |
-
cv2.imwrite(output_path, face_resized)
|
| 53 |
-
print(f"Face extracted, resized to 48x48, and saved as {output_path}")
|
| 54 |
|
| 55 |
|
| 56 |
-
def preprocess_image(
|
| 57 |
-
#
|
| 58 |
-
image =
|
| 59 |
-
|
| 60 |
-
image = img_to_array(image) #
|
| 61 |
-
image = image / 255.0 # Normalize pixel values
|
| 62 |
|
| 63 |
# Repeat channels if model is not base_cnn
|
| 64 |
if model_name != "Scratch CNN Model":
|
| 65 |
-
image = np.repeat(image, 3, axis=-1)
|
| 66 |
|
| 67 |
# Add batch dimension
|
| 68 |
image = np.expand_dims(image, axis=0)
|
| 69 |
return image
|
| 70 |
|
| 71 |
|
| 72 |
-
def predict_emotion(
|
| 73 |
-
|
| 74 |
-
prediction = model.predict(
|
| 75 |
predicted_emotion = emotion_list[np.argmax(prediction)]
|
| 76 |
return predicted_emotion
|
| 77 |
|
| 78 |
|
| 79 |
-
def
|
| 80 |
-
# Load
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 84 |
-
|
| 85 |
-
# Read the image
|
| 86 |
-
img = cv2.imread(image_path)
|
| 87 |
-
if img is None:
|
| 88 |
-
print("Could not read the image.")
|
| 89 |
-
return "No face detected."
|
| 90 |
|
| 91 |
-
#
|
| 92 |
-
|
|
|
|
| 93 |
|
| 94 |
-
#
|
| 95 |
-
|
| 96 |
-
|
|
|
|
| 97 |
|
| 98 |
-
#
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
return "No face detected."
|
| 102 |
|
| 103 |
-
#
|
| 104 |
-
|
| 105 |
-
|
| 106 |
|
| 107 |
-
|
| 108 |
-
cv2.imwrite(output_path, img)
|
| 109 |
-
print(f"Image with bounding box saved as {output_path}")
|
| 110 |
-
return f"Image with bounding box saved as {output_path}"
|
| 111 |
|
| 112 |
|
| 113 |
app = Flask(__name__)
|
|
@@ -115,43 +98,18 @@ emotion_list = ['angry', 'disgust', 'fear',
|
|
| 115 |
'happy', 'neutral', 'sad', 'surprise']
|
| 116 |
|
| 117 |
|
| 118 |
-
def getResult(selected_option, image):
|
| 119 |
-
image.save("./static/image.jpg")
|
| 120 |
-
model_path = MODEL_PATHS[selected_option]
|
| 121 |
-
model = load_model(model_path)
|
| 122 |
-
|
| 123 |
-
bounding_face_result = draw_bounding_box("./static/image.jpg")
|
| 124 |
-
if bounding_face_result == "No face detected.":
|
| 125 |
-
return bounding_face_result, [], None
|
| 126 |
-
|
| 127 |
-
extract_face_result = extract_face(
|
| 128 |
-
"./static/image.jpg", output_path="./static/cropped_face.jpg")
|
| 129 |
-
if extract_face_result == "No face detected.":
|
| 130 |
-
return extract_face_result, [], None
|
| 131 |
-
|
| 132 |
-
image_path = './static/cropped_face.jpg'
|
| 133 |
-
predicted_emotion = predict_emotion(
|
| 134 |
-
image_path, model, selected_option, emotion_list)
|
| 135 |
-
|
| 136 |
-
images = [url_for('static', filename='face_with_bounding_box.jpg'),
|
| 137 |
-
url_for('static', filename='cropped_face.jpg')]
|
| 138 |
-
|
| 139 |
-
return "The predicted emotion is:", images, predicted_emotion
|
| 140 |
-
|
| 141 |
-
|
| 142 |
@app.route('/', methods=['GET', 'POST'])
|
| 143 |
def index():
|
| 144 |
result = None
|
| 145 |
images = []
|
| 146 |
predicted_emotion = None
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
"MobileNet Model", "VGG16 Model"]
|
| 150 |
|
| 151 |
if request.method == 'POST':
|
| 152 |
selected_option = request.form.get('option')
|
| 153 |
image = request.files.get('image')
|
| 154 |
-
if selected_option != "Select a model":
|
| 155 |
result, images, predicted_emotion = getResult(
|
| 156 |
selected_option, image)
|
| 157 |
return render_template('index.html', result=result, dropdown_options=dropdown_options, images=images, predicted_emotion=predicted_emotion, model_name=selected_option)
|
|
|
|
| 4 |
from keras.models import load_model
|
| 5 |
import cv2
|
| 6 |
import os
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import base64
|
| 10 |
|
| 11 |
# Define paths for the models
|
| 12 |
MODEL_PATHS = {
|
|
|
|
| 17 |
}
|
| 18 |
|
| 19 |
|
| 20 |
+
def convert_image_to_base64(image):
|
| 21 |
+
# Convert PIL image to Base64
|
| 22 |
+
buffered = BytesIO()
|
| 23 |
+
image.save(buffered, format="PNG")
|
| 24 |
+
return base64.b64encode(buffered.getvalue()).decode()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def extract_face(img_array):
|
| 28 |
# Load the Haar cascade for face detection
|
| 29 |
cascade_path = os.path.join(
|
| 30 |
cv2.data.haarcascades, "haarcascade_frontalface_default.xml")
|
| 31 |
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
# Detect faces in the grayscale image
|
| 34 |
faces = face_cascade.detectMultiScale(
|
| 35 |
+
img_array, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
| 36 |
|
|
|
|
| 37 |
if len(faces) == 0:
|
| 38 |
+
return "No face detected.", None
|
|
|
|
| 39 |
|
| 40 |
# Extract the first detected face (assuming there's only one face)
|
| 41 |
x, y, w, h = faces[0]
|
| 42 |
+
face = img_array[y:y+h, x:x+w]
|
| 43 |
|
| 44 |
# Resize the face to 48x48 pixels
|
| 45 |
face_resized = cv2.resize(face, (48, 48))
|
| 46 |
+
return "Face detected.", face_resized
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
+
def preprocess_image(img_array, model_name, target_size=(48, 48)):
|
| 50 |
+
# Convert image array to grayscale
|
| 51 |
+
image = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
|
| 52 |
+
image = cv2.resize(image, target_size)
|
| 53 |
+
image = img_to_array(image) / 255.0 # Normalize pixel values
|
|
|
|
| 54 |
|
| 55 |
# Repeat channels if model is not base_cnn
|
| 56 |
if model_name != "Scratch CNN Model":
|
| 57 |
+
image = np.repeat(image[..., np.newaxis], 3, axis=-1)
|
| 58 |
|
| 59 |
# Add batch dimension
|
| 60 |
image = np.expand_dims(image, axis=0)
|
| 61 |
return image
|
| 62 |
|
| 63 |
|
| 64 |
+
def predict_emotion(face_array, model, model_name, emotion_list):
|
| 65 |
+
processed_face = preprocess_image(face_array, model_name)
|
| 66 |
+
prediction = model.predict(processed_face)
|
| 67 |
predicted_emotion = emotion_list[np.argmax(prediction)]
|
| 68 |
return predicted_emotion
|
| 69 |
|
| 70 |
|
| 71 |
+
def getResult(selected_option, image):
|
| 72 |
+
# Load image in memory
|
| 73 |
+
img = Image.open(BytesIO(image.read()))
|
| 74 |
+
img_array = np.array(img.convert('RGB'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
+
# Load the model based on the selected option
|
| 77 |
+
model_path = MODEL_PATHS[selected_option]
|
| 78 |
+
model = load_model(model_path)
|
| 79 |
|
| 80 |
+
# Process the image
|
| 81 |
+
result, face_array = extract_face(img_array)
|
| 82 |
+
if result == "No face detected.":
|
| 83 |
+
return result, [], None
|
| 84 |
|
| 85 |
+
# Predict emotion on the extracted face
|
| 86 |
+
predicted_emotion = predict_emotion(
|
| 87 |
+
face_array, model, selected_option, emotion_list)
|
|
|
|
| 88 |
|
| 89 |
+
# Convert images to Base64 for embedding in HTML
|
| 90 |
+
bounding_box_image = convert_image_to_base64(img)
|
| 91 |
+
cropped_face_image = convert_image_to_base64(Image.fromarray(face_array))
|
| 92 |
|
| 93 |
+
return "The predicted emotion is:", [bounding_box_image, cropped_face_image], predicted_emotion
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
|
| 96 |
app = Flask(__name__)
|
|
|
|
| 98 |
'happy', 'neutral', 'sad', 'surprise']
|
| 99 |
|
| 100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
@app.route('/', methods=['GET', 'POST'])
|
| 102 |
def index():
|
| 103 |
result = None
|
| 104 |
images = []
|
| 105 |
predicted_emotion = None
|
| 106 |
+
dropdown_options = ["Select a model", "Scratch CNN Model",
|
| 107 |
+
"ImageNet Model", "MobileNet Model", "VGG16 Model"]
|
|
|
|
| 108 |
|
| 109 |
if request.method == 'POST':
|
| 110 |
selected_option = request.form.get('option')
|
| 111 |
image = request.files.get('image')
|
| 112 |
+
if selected_option != "Select a model" and image:
|
| 113 |
result, images, predicted_emotion = getResult(
|
| 114 |
selected_option, image)
|
| 115 |
return render_template('index.html', result=result, dropdown_options=dropdown_options, images=images, predicted_emotion=predicted_emotion, model_name=selected_option)
|
templates/index.html
CHANGED
|
@@ -37,18 +37,17 @@
|
|
| 37 |
</form>
|
| 38 |
|
| 39 |
<div class="image-section">
|
| 40 |
-
{%
|
| 41 |
<div class="image-container">
|
| 42 |
-
<img src="{{
|
| 43 |
-
<p>
|
| 44 |
-
{{
|
| 45 |
-
img.split('/').pop().split('.')[0].replace('_', ' ').capitalize()
|
| 46 |
-
}}
|
| 47 |
-
</p>
|
| 48 |
</div>
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
</div>
|
| 51 |
-
|
| 52 |
<!-- Loading overlay -->
|
| 53 |
<div class="loading-overlay" id="loadingOverlay" style="display: none;">
|
| 54 |
<div class="spinner"></div>
|
|
|
|
| 37 |
</form>
|
| 38 |
|
| 39 |
<div class="image-section">
|
| 40 |
+
{% if images %}
|
| 41 |
<div class="image-container">
|
| 42 |
+
<img src="data:image/jpeg;base64,{{ images[0] }}" alt="Bounding Box Image" class="form-image">
|
| 43 |
+
<p>Face with Bounding Box</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
</div>
|
| 45 |
+
<div class="image-container">
|
| 46 |
+
<img src="data:image/jpeg;base64,{{ images[1] }}" alt="Bounding Box Image" class="form-image">
|
| 47 |
+
<p>Cropped Face</p>
|
| 48 |
+
</div>
|
| 49 |
+
{% endif %}
|
| 50 |
</div>
|
|
|
|
| 51 |
<!-- Loading overlay -->
|
| 52 |
<div class="loading-overlay" id="loadingOverlay" style="display: none;">
|
| 53 |
<div class="spinner"></div>
|