OnurKerimoglu commited on
Commit
004ba9c
·
1 Parent(s): b71b5ce

updated app: can now detect and label multiple faces in a given image

Browse files
Angry_48_48_3.png DELETED
Binary file (4.31 kB)
 
Disgust_48_48_3.png DELETED
Binary file (3.74 kB)
 
Fear_48_48_3.png DELETED
Binary file (3.64 kB)
 
Happy_48_48_3.png DELETED
Binary file (3.59 kB)
 
Neutral_48_48_3.png DELETED
Binary file (4.15 kB)
 
OnurA_CerenH.jpg ADDED
OnurH_CerenH.jpg ADDED
Sad_48_48_3.png DELETED
Binary file (3.59 kB)
 
Surprise_48_48_3.png DELETED
Binary file (3.55 kB)
 
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import gradio as gr
3
  import numpy as np
@@ -8,23 +9,40 @@ from tensorflow.image import resize
8
  from PIL import Image
9
 
10
  #gradio interface
11
- def classify_image(image):
12
- input_arr = img_to_array(image)/255 #convert PIL object to numpy array and normalize
13
- input_arr_resh = resize(input_arr, (48, 48)).numpy()
14
- if model.channelno == 1:
15
- # Model expects inputs of shape (48,48,1)
16
- input_arr_resh_gray = input_arr_resh.mean(axis=2).reshape(48,48,1)
17
- predictions = model.predictor.predict(np.array([input_arr_resh_gray]))
18
- elif model.channelno == 3:
19
- # Model expects inputs of shape (48,48,3)
20
- input_arr_resh_4dims = np.expand_dims(input_arr_resh, axis=0)
21
- predictions = model.predictor.predict(input_arr_resh_4dims)
22
- pr_emotion = model.labeldict[predictions.argmax()]
23
- prob = predictions.max()*100
24
- returnstr = f'Prediction: {pr_emotion}, probability: {prob:4.1f}%'
25
- predictions_f = ['%s:%5.2f'%(model.labeldict[i],p*100) for i,p in enumerate(predictions[0])]
26
- print(predictions_f)
27
- return returnstr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  class ModelClass:
30
  def __init__(self,name='EDA_CNN.h5'):
@@ -45,17 +63,12 @@ modeltouse = "MobileNet12blocks_wdgenaug_onrawdata_valacc063.h5"
45
 
46
  model = ModelClass(modeltouse)
47
 
48
- image = gr.inputs.Image(shape=(48,48))
49
- label = gr.outputs.Label()
50
- examples = ['Happy_48_48_%d.png'%model.channelno,
51
- 'Neutral_48_48_%d.png'%model.channelno,
52
- 'Fear_48_48_%d.png'%model.channelno,
53
- 'Angry_48_48_%d.png'%model.channelno,
54
- 'Sad_48_48_%d.png'%model.channelno,
55
- #'Disgust_48_48_%d.png'%model.channelno,
56
- 'Surprise_48_48_%d.png'%model.channelno]
57
-
58
- # image = Image.open('./Happy_48_48_%d.png'%model.channelno)
59
- # classify_image(image)
60
- intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
61
  intf.launch(inline=False)
 
1
+ import cv2
2
  import os
3
  import gradio as gr
4
  import numpy as np
 
9
  from PIL import Image
10
 
11
  #gradio interface
12
+ def process_image(img):
13
+ cv2_face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
14
+ 'haarcascade_frontalface_default.xml')
15
+ img, _ = annotate_objects_in_image(img,
16
+ cv2_face_cascade,
17
+ model.labeldict,
18
+ model.predictor)
19
+
20
+ return(img)
21
+
22
+ def annotate_objects_in_image(img, obj_cascade, labeldict, model):
23
+ #img = cv2.flip(img, 1)
24
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
25
+ objects = obj_cascade.detectMultiScale(gray, 1.1, 4)
26
+ model_result = ""
27
+ try:
28
+ for (x, y, w, h) in objects:
29
+ face = img[y-50:y+h+50, x-50:x+w+50]
30
+ model_result = predict_emotion_from_image(face, labeldict, model)
31
+ font = cv2.FONT_HERSHEY_SIMPLEX
32
+ cv2.putText(img, model_result, (x, y), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
33
+ cv2.rectangle(img, (x-25, y-25), (x+w+25, y+h+25), (0, 255, 0), 2)
34
+ except Exception as ex:
35
+ model_result = ""
36
+ return img, model_result
37
+
38
+ def predict_emotion_from_image (face_raw, labeldict, model):
39
+ face = cv2.resize(face_raw, (48, 48))
40
+ face_array = np.array(face)/255
41
+ face_array_4dims = np.expand_dims(face_array, axis=0)
42
+ prediction_vec = model.predict(face_array_4dims)
43
+ prediction = np.argmax(prediction_vec)
44
+ emotion = labeldict[prediction]
45
+ return (emotion)
46
 
47
  class ModelClass:
48
  def __init__(self,name='EDA_CNN.h5'):
 
63
 
64
  model = ModelClass(modeltouse)
65
 
66
+ image_in = gr.inputs.Image() #shape=(48,48)
67
+ image_out = gr.inputs.Image()
68
+ examples = ['OnurH_CerenH.jpg', 'OnurA_CerenH.jpg']
69
+ #fname = 'Onur_happy.jpg'
70
+ #image = cv2.imread(fname)
71
+ #process_image(image, model)
72
+
73
+ intf = gr.Interface(fn=process_image, inputs=image_in, outputs=image_out, examples=examples)
 
 
 
 
 
74
  intf.launch(inline=False)