Spaces:
Sleeping
Sleeping
Upload 15 files
Browse files- .gitattributes +1 -0
- app.py +79 -0
- banner_emotions.jpg +0 -0
- emotion_detector/fingerprint.pb +3 -0
- emotion_detector/keras_metadata.pb +3 -0
- emotion_detector/saved_model.pb +3 -0
- emotion_detector/variables/variables.data-00000-of-00001 +3 -0
- emotion_detector/variables/variables.index +0 -0
- haarcascade_frontalface_default.xml +0 -0
- requiements.txt +0 -0
- test_images/angry1.jpg +0 -0
- test_images/angry2.jpg +0 -0
- test_images/happy1.jpg +0 -0
- test_images/happy2.jpg +0 -0
- test_images/sad1.jpg +0 -0
- test_images/sad2.jpg +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
emotion_detector/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# import libraries
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from streamlit_image_select import image_select
|
| 4 |
+
import tensorflow as tf
|
| 5 |
+
import numpy as np
|
| 6 |
+
import cv2
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
# Project title
|
| 10 |
+
st.title('Emotion Detector App')
|
| 11 |
+
|
| 12 |
+
# display banner image
|
| 13 |
+
st.image("banner_emotions.jpg")
|
| 14 |
+
|
| 15 |
+
img = image_select(
|
| 16 |
+
label = "Select an image to run model",
|
| 17 |
+
images = ["test_images/angry1.jpg",
|
| 18 |
+
"test_images/angry2.jpg",
|
| 19 |
+
"test_images/happy1.jpg",
|
| 20 |
+
"test_images/happy2.jpg",
|
| 21 |
+
"test_images/sad1.jpg",
|
| 22 |
+
"test_images/sad2.jpg"]
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
uploaded_img = st.file_uploader("Upload an image file",
|
| 26 |
+
type = ["png", "jpg", "jpeg"])
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# load model
|
| 30 |
+
@st.cache_resource
|
| 31 |
+
def cache_model(model_add):
|
| 32 |
+
model = tf.keras.models.load_model(model_add)
|
| 33 |
+
return model
|
| 34 |
+
|
| 35 |
+
model = cache_model("emotion_detector")
|
| 36 |
+
|
| 37 |
+
# creating predict button
|
| 38 |
+
predict = st.button("Predict")
|
| 39 |
+
|
| 40 |
+
# defining harcascade classifier and class_names
|
| 41 |
+
face_detector=cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
|
| 42 |
+
class_names = ["Angry", "Happy", "Sad"]
|
| 43 |
+
|
| 44 |
+
def model_pred(model, image):
|
| 45 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 46 |
+
results = face_detector.detectMultiScale(gray,scaleFactor=1.05,
|
| 47 |
+
minNeighbors=10,
|
| 48 |
+
minSize=(100, 100))
|
| 49 |
+
if len(results) != 0:
|
| 50 |
+
for x,y,w,h in results:
|
| 51 |
+
img_crp = image[y:y+h, x:x+w]
|
| 52 |
+
img_crp = cv2.resize(img_crp,(350,350))
|
| 53 |
+
y_pred_prob = model.predict(tf.expand_dims(img_crp,
|
| 54 |
+
axis = 0))
|
| 55 |
+
y_pred = np.argmax(y_pred_prob, axis = -1)
|
| 56 |
+
# print(y_pred_prob)
|
| 57 |
+
label = class_names[int(y_pred)]
|
| 58 |
+
cv2.rectangle(image, (x,y), (x+w, y+h),
|
| 59 |
+
color=(0, 255, 0),
|
| 60 |
+
thickness = 10)
|
| 61 |
+
cv2.putText(image,f"{label},{round(np.max(y_pred_prob),2)}%",
|
| 62 |
+
(x,y+h),cv2.FONT_HERSHEY_COMPLEX,2,
|
| 63 |
+
(0,255,255),2)
|
| 64 |
+
return image
|
| 65 |
+
|
| 66 |
+
if predict:
|
| 67 |
+
if uploaded_img:
|
| 68 |
+
# img_array = np.array(uploaded_img)
|
| 69 |
+
img_array = np.array(Image.open(uploaded_img))
|
| 70 |
+
result_img = model_pred(model,img_array)
|
| 71 |
+
st.image(result_img)
|
| 72 |
+
|
| 73 |
+
else:
|
| 74 |
+
st.write("Please upload a valid image")
|
| 75 |
+
|
| 76 |
+
else:
|
| 77 |
+
image_array = np.array(Image.open(img))
|
| 78 |
+
result_img = model_pred(model, image_array)
|
| 79 |
+
st.image(result_img)
|
banner_emotions.jpg
ADDED
|
emotion_detector/fingerprint.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1240ba33fd6858d6055483ccd1a0f72df74f12f2cf32c2ef9f9477d7bd3d77e6
|
| 3 |
+
size 58
|
emotion_detector/keras_metadata.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4bb49963076dcfcf1ead2590b150cd053270147eafd78b1ce2ddc8fd40a87c20
|
| 3 |
+
size 897946
|
emotion_detector/saved_model.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b7b5b2aa1b77ad3e1b66d04c83c75de5c9b438a6284d8e5c5298632c7fa0654
|
| 3 |
+
size 4927861
|
emotion_detector/variables/variables.data-00000-of-00001
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ca3fc4921e7cee9e2c79447a7440e0a50e378d24e13b12b5fd3f66b643593cf
|
| 3 |
+
size 60187429
|
emotion_detector/variables/variables.index
ADDED
|
Binary file (24 kB). View file
|
|
|
haarcascade_frontalface_default.xml
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
requiements.txt
ADDED
|
File without changes
|
test_images/angry1.jpg
ADDED
|
test_images/angry2.jpg
ADDED
|
test_images/happy1.jpg
ADDED
|
test_images/happy2.jpg
ADDED
|
test_images/sad1.jpg
ADDED
|
test_images/sad2.jpg
ADDED
|