OnurKerimoglu commited on
Commit
e98339d
·
1 Parent(s): b0e67da

image resizing for big images

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -12,6 +12,7 @@ from PIL import Image
12
  def process_image(img):
13
  cv2_face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
14
  'haarcascade_frontalface_default.xml')
 
15
  img, _ = annotate_objects_in_image(img,
16
  cv2_face_cascade,
17
  model.labeldict,
@@ -20,7 +21,6 @@ def process_image(img):
20
  return(img)
21
 
22
  def annotate_objects_in_image(img, obj_cascade, labeldict, model):
23
- #img = cv2.flip(img, 1)
24
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
25
  objects = obj_cascade.detectMultiScale(gray, 1.1, 4)
26
  model_result = ""
@@ -44,6 +44,13 @@ def predict_emotion_from_image (face_raw, labeldict, model):
44
  emotion = labeldict[prediction]
45
  return (emotion)
46
 
 
 
 
 
 
 
 
47
  class ModelClass:
48
  def __init__(self,name='EDA_CNN.h5'):
49
  self.name = name
 
12
  def process_image(img):
13
  cv2_face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +
14
  'haarcascade_frontalface_default.xml')
15
+ img = resize_img(img)
16
  img, _ = annotate_objects_in_image(img,
17
  cv2_face_cascade,
18
  model.labeldict,
 
21
  return(img)
22
 
23
  def annotate_objects_in_image(img, obj_cascade, labeldict, model):
 
24
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
25
  objects = obj_cascade.detectMultiScale(gray, 1.1, 4)
26
  model_result = ""
 
44
  emotion = labeldict[prediction]
45
  return (emotion)
46
 
47
+ def resize_img(img):
48
+ if img.shape[0]> 960:
49
+ img_AR = img.shape[0]/img.shape[1]
50
+ #print(f'img shape: {img.shape}; aspect ratio: {img_AR}')
51
+ img = cv2.resize(img, (640, int(np.round(640*img_AR))))
52
+ return img
53
+
54
  class ModelClass:
55
  def __init__(self,name='EDA_CNN.h5'):
56
  self.name = name