Spaces:
Running
Running
Commit
·
4b60775
1
Parent(s):
e98339d
added docstrings, used more consistent variable names
Browse files
app.py
CHANGED
|
@@ -10,6 +10,14 @@ from PIL import Image
|
|
| 10 |
|
| 11 |
#gradio interface
|
| 12 |
def process_image(img):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
cv2_face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
|
| 14 |
'haarcascade_frontalface_default.xml')
|
| 15 |
img = resize_img(img)
|
|
@@ -21,30 +29,69 @@ def process_image(img):
|
|
| 21 |
return(img)
|
| 22 |
|
| 23 |
def annotate_objects_in_image(img, obj_cascade, labeldict, model):
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
try:
|
| 28 |
for (x, y, w, h) in objects:
|
| 29 |
face = img[y-50:y+h+50, x-50:x+w+50]
|
| 30 |
-
|
| 31 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 32 |
-
cv2.putText(img,
|
| 33 |
cv2.rectangle(img, (x-25, y-25), (x+w+25, y+h+25), (0, 255, 0), 2)
|
| 34 |
except Exception as ex:
|
| 35 |
-
|
| 36 |
-
return img,
|
| 37 |
|
| 38 |
def predict_emotion_from_image (face_raw, labeldict, model):
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
prediction = np.argmax(prediction_vec)
|
| 44 |
emotion = labeldict[prediction]
|
| 45 |
-
|
|
|
|
| 46 |
|
| 47 |
def resize_img(img):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
if img.shape[0]> 960:
|
| 49 |
img_AR = img.shape[0]/img.shape[1]
|
| 50 |
#print(f'img shape: {img.shape}; aspect ratio: {img_AR}')
|
|
|
|
| 10 |
|
| 11 |
#gradio interface
|
| 12 |
def process_image(img):
|
| 13 |
+
"""
|
| 14 |
+
Parameters:
|
| 15 |
+
img: np.ndarray
|
| 16 |
+
an image (e.g., returned by cv2.imread)
|
| 17 |
+
Returns:
|
| 18 |
+
img: np.ndarray
|
| 19 |
+
an image annotated with the bounding box and label
|
| 20 |
+
"""
|
| 21 |
cv2_face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
|
| 22 |
'haarcascade_frontalface_default.xml')
|
| 23 |
img = resize_img(img)
|
|
|
|
| 29 |
return(img)
|
| 30 |
|
| 31 |
def annotate_objects_in_image(img, obj_cascade, labeldict, model):
|
| 32 |
+
"""
|
| 33 |
+
Parameters:
|
| 34 |
+
img: np.ndarray
|
| 35 |
+
an image (e.g., returned by cv2.imread)
|
| 36 |
+
obj_cascade: cv2.CascadeClassifier
|
| 37 |
+
OpenCV cascade classifier that can detect certain objects
|
| 38 |
+
labeldict: dict
|
| 39 |
+
a dictionary for decoding the model predictions, (e.g., {0:happy, 1:sad})
|
| 40 |
+
model: keras.engine.functional.Functional
|
| 41 |
+
a Keras model instance (e.g., that is returned by keras.models.load_model)
|
| 42 |
+
Returns:
|
| 43 |
+
img: np.ndarray
|
| 44 |
+
an image annotated with the bounding box and label
|
| 45 |
+
emotion : str
|
| 46 |
+
predicted emotion of the face image
|
| 47 |
+
"""
|
| 48 |
+
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 49 |
+
objects = obj_cascade.detectMultiScale(img_gray, 1.1, 4)
|
| 50 |
+
emotion = ""
|
| 51 |
try:
|
| 52 |
for (x, y, w, h) in objects:
|
| 53 |
face = img[y-50:y+h+50, x-50:x+w+50]
|
| 54 |
+
emotion, prob = predict_emotion_from_image(face, labeldict, model)
|
| 55 |
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 56 |
+
cv2.putText(img, emotion, (x, y), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
|
| 57 |
cv2.rectangle(img, (x-25, y-25), (x+w+25, y+h+25), (0, 255, 0), 2)
|
| 58 |
except Exception as ex:
|
| 59 |
+
emotion = ""
|
| 60 |
+
return img, emotion
|
| 61 |
|
| 62 |
def predict_emotion_from_image (face_raw, labeldict, model):
|
| 63 |
+
"""
|
| 64 |
+
Parameters:
|
| 65 |
+
face_raw: np.ndarray
|
| 66 |
+
a square-like image of a human face (e.g., returned by cv2.imread)
|
| 67 |
+
label_dict: dict
|
| 68 |
+
a dictionary for decoding the model predictions, (e.g., {0:happy, 1:sad})
|
| 69 |
+
model: keras.engine.functional.Functional
|
| 70 |
+
a Keras model instance (e.g., that is returned by keras.models.load_model)
|
| 71 |
+
Returns:
|
| 72 |
+
emotion: str
|
| 73 |
+
predicted emotion of the face image
|
| 74 |
+
prob: float
|
| 75 |
+
percent probability of the predicted emotion
|
| 76 |
+
"""
|
| 77 |
+
face_res_arr = np.array(cv2.resize(face_raw, (48, 48)))
|
| 78 |
+
face_res_arr_gray = face_res_arr/255
|
| 79 |
+
face_res_arr_gray_4dims = np.expand_dims(face_res_arr_gray, axis=0)
|
| 80 |
+
prediction_vec = model.predict(face_res_arr_gray_4dims)
|
| 81 |
prediction = np.argmax(prediction_vec)
|
| 82 |
emotion = labeldict[prediction]
|
| 83 |
+
prob = prediction_vec.max()*100
|
| 84 |
+
return (emotion,prob)
|
| 85 |
|
| 86 |
def resize_img(img):
|
| 87 |
+
"""
|
| 88 |
+
Parameters:
|
| 89 |
+
img: np.ndarray
|
| 90 |
+
a potentially oversized image (e.g., returned by cv2.imread)
|
| 91 |
+
Returns:
|
| 92 |
+
img: np.ndarray
|
| 93 |
+
a resized and potentially L-R flipped image
|
| 94 |
+
"""
|
| 95 |
if img.shape[0]> 960:
|
| 96 |
img_AR = img.shape[0]/img.shape[1]
|
| 97 |
#print(f'img shape: {img.shape}; aspect ratio: {img_AR}')
|