Commit ·
915ecec
1
Parent(s): ce6d29d
Added app files
Browse files- .gitignore +1 -0
- DockerFile +22 -0
- ML_Models/emotion_detection_imagenet_model.h5 +3 -0
- ML_Models/emotion_detection_mobilenet_model.h5 +3 -0
- ML_Models/emotion_detection_model_base_cnn.h5 +3 -0
- ML_Models/emotion_detection_vgg16_model.h5 +3 -0
- README.md +20 -1
- app.py +163 -0
- function.py +31 -0
- requirements.txt +5 -0
- static/angry.jpg +0 -0
- static/bg_texture.jpg +0 -0
- static/cropped_face.jpg +0 -0
- static/disgust.jpg +0 -0
- static/face_with_bounding_box.jpg +0 -0
- static/face_with_box.jpg +0 -0
- static/fear.jpg +0 -0
- static/happy.jpg +0 -0
- static/image.jpg +0 -0
- static/innov8_logo.png +0 -0
- static/neutral.jpg +0 -0
- static/sad.jpg +0 -0
- static/style.css +380 -0
- static/surprise.jpg +0 -0
- templates/index.html +167 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# ML_models/*
|
DockerFile
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use a compatible Python base image
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Set environment variables
|
| 5 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
| 6 |
+
ENV PYTHONUNBUFFERED=1
|
| 7 |
+
|
| 8 |
+
# Set the working directory
|
| 9 |
+
WORKDIR /app
|
| 10 |
+
|
| 11 |
+
# Copy requirements and install dependencies
|
| 12 |
+
COPY requirements.txt /app/
|
| 13 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 14 |
+
|
| 15 |
+
# Copy application files
|
| 16 |
+
COPY . /app
|
| 17 |
+
|
| 18 |
+
# Expose the required port
|
| 19 |
+
EXPOSE 7860
|
| 20 |
+
|
| 21 |
+
# Run the app with Flask on port 7860
|
| 22 |
+
CMD ["flask", "run", "--host=0.0.0.0", "--port=7860"]
|
ML_Models/emotion_detection_imagenet_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33e2efd3ab5fb06e1a54a87df4c6c23200d9b922773736e699dfbb3f9f7909ea
|
| 3 |
+
size 62142968
|
ML_Models/emotion_detection_mobilenet_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:241fca415049825dd51e14832d23309790f8735deaf383afed412c999a5d878c
|
| 3 |
+
size 34281976
|
ML_Models/emotion_detection_model_base_cnn.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4856f5d08c2411ef0ee2544e268a87dc9f092776b0db989a2449635255ab392e
|
| 3 |
+
size 13802464
|
ML_Models/emotion_detection_vgg16_model.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d3ab5f8bfed8c0563463eb0a95412e190300913bf2529a6438756b91b715dea
|
| 3 |
+
size 121987768
|
README.md
CHANGED
|
@@ -8,4 +8,23 @@ pinned: false
|
|
| 8 |
license: gpl-2.0
|
| 9 |
---
|
| 10 |
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
license: gpl-2.0
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# Emotion Recognition from Facial Expression
|
| 12 |
+
### Samsung Innovation Campus 2024 Capstone Project
|
| 13 |
+
Group 12 Innov8
|
| 14 |
+
|
| 15 |
+
## Motivation
|
| 16 |
+
Emotion recognition can be a valuable tool for enhancing both human and machine understanding of emotional states, aiding in fields such as mental health diagnostics, where understanding non-verbal cues is critical. The ability to recognize emotions accurately could also improve customer interactions in automated services by making responses more empathetic.
|
| 17 |
+
|
| 18 |
+
## Objective
|
| 19 |
+
To develop a high-accuracy CNN model that can recognize and categorize a range of emotions from facial expressions in real time, deployable in a web application. This application will allow users to upload images and receive emotion classifications, bridging the gap between advanced AI models and accessible, practical tools for various applications.
|
| 20 |
+
|
| 21 |
+
## Team Members
|
| 22 |
+
- Akshat Mistry
|
| 23 |
+
- Abhinav Kottayil
|
| 24 |
+
- Raj Dinesh Jagasia
|
| 25 |
+
- Ishita Jagasia
|
| 26 |
+
- Alyssa Ang De Guzman
|
| 27 |
+
- Muhammed Ameen
|
| 28 |
+
|
| 29 |
+
## App Screenshot
|
| 30 |
+

|
app.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request, url_for
|
| 2 |
+
from tensorflow.keras.preprocessing.image import img_to_array, load_img
|
| 3 |
+
import numpy as np
|
| 4 |
+
from keras.models import load_model
|
| 5 |
+
import cv2
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
# Define paths for the models
|
| 9 |
+
MODEL_PATHS = {
|
| 10 |
+
"Scratch CNN Model": './ML_Models/emotion_detection_model_base_cnn.h5',
|
| 11 |
+
"ImageNet Model": './ML_Models/emotion_detection_imagenet_model.h5',
|
| 12 |
+
"MobileNet Model": './ML_Models/emotion_detection_mobilenet_model.h5',
|
| 13 |
+
"VGG16 Model": './ML_Models/emotion_detection_vgg16_model.h5'
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def extract_face(image_path, output_path="./static/cropped_face.jpg"):
|
| 18 |
+
# Load the Haar cascade for face detection
|
| 19 |
+
cascade_path = os.path.join(
|
| 20 |
+
cv2.data.haarcascades, "haarcascade_frontalface_default.xml")
|
| 21 |
+
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 22 |
+
|
| 23 |
+
# Read the image in grayscale mode
|
| 24 |
+
img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
| 25 |
+
|
| 26 |
+
if img is None:
|
| 27 |
+
print("Could not read the image.")
|
| 28 |
+
return "No face detected."
|
| 29 |
+
|
| 30 |
+
# Save the grayscale image
|
| 31 |
+
gray_output_path = "gray_" + output_path
|
| 32 |
+
cv2.imwrite(gray_output_path, img)
|
| 33 |
+
print(f"Grayscale image saved as {gray_output_path}")
|
| 34 |
+
|
| 35 |
+
# Detect faces in the grayscale image
|
| 36 |
+
faces = face_cascade.detectMultiScale(
|
| 37 |
+
img, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
| 38 |
+
|
| 39 |
+
# Check if a face is detected
|
| 40 |
+
if len(faces) == 0:
|
| 41 |
+
print("No face detected.")
|
| 42 |
+
return "No face detected."
|
| 43 |
+
|
| 44 |
+
# Extract the first detected face (assuming there's only one face)
|
| 45 |
+
x, y, w, h = faces[0]
|
| 46 |
+
face = img[y:y+h, x:x+w]
|
| 47 |
+
|
| 48 |
+
# Resize the face to 48x48 pixels
|
| 49 |
+
face_resized = cv2.resize(face, (48, 48))
|
| 50 |
+
|
| 51 |
+
# Save the cropped and resized grayscale face
|
| 52 |
+
cv2.imwrite(output_path, face_resized)
|
| 53 |
+
print(f"Face extracted, resized to 48x48, and saved as {output_path}")
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def preprocess_image(image_path, model_name, target_size=(48, 48)):
|
| 57 |
+
# Load the image in grayscale
|
| 58 |
+
image = load_img(image_path, color_mode='grayscale',
|
| 59 |
+
target_size=target_size)
|
| 60 |
+
image = img_to_array(image) # Convert to array
|
| 61 |
+
image = image / 255.0 # Normalize pixel values
|
| 62 |
+
|
| 63 |
+
# Repeat channels if model is not base_cnn
|
| 64 |
+
if model_name != "Scratch CNN Model":
|
| 65 |
+
image = np.repeat(image, 3, axis=-1)
|
| 66 |
+
|
| 67 |
+
# Add batch dimension
|
| 68 |
+
image = np.expand_dims(image, axis=0)
|
| 69 |
+
return image
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def predict_emotion(image_path, model, model_name, emotion_list):
|
| 73 |
+
image = preprocess_image(image_path, model_name)
|
| 74 |
+
prediction = model.predict(image)
|
| 75 |
+
predicted_emotion = emotion_list[np.argmax(prediction)]
|
| 76 |
+
return predicted_emotion
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def draw_bounding_box(image_path, output_path="./static/face_with_bounding_box.jpg"):
|
| 80 |
+
# Load the Haar cascade for face detection
|
| 81 |
+
cascade_path = os.path.join(
|
| 82 |
+
cv2.data.haarcascades, "haarcascade_frontalface_default.xml")
|
| 83 |
+
face_cascade = cv2.CascadeClassifier(cascade_path)
|
| 84 |
+
|
| 85 |
+
# Read the image
|
| 86 |
+
img = cv2.imread(image_path)
|
| 87 |
+
if img is None:
|
| 88 |
+
print("Could not read the image.")
|
| 89 |
+
return "No face detected."
|
| 90 |
+
|
| 91 |
+
# Convert the image to grayscale for face detection
|
| 92 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 93 |
+
|
| 94 |
+
# Detect faces in the grayscale image
|
| 95 |
+
faces = face_cascade.detectMultiScale(
|
| 96 |
+
gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
| 97 |
+
|
| 98 |
+
# Check if a face is detected
|
| 99 |
+
if len(faces) == 0:
|
| 100 |
+
print("No face detected.")
|
| 101 |
+
return "No face detected."
|
| 102 |
+
|
| 103 |
+
# Draw a bounding box around the first detected face
|
| 104 |
+
x, y, w, h = faces[0]
|
| 105 |
+
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
|
| 106 |
+
|
| 107 |
+
# Save the image with the bounding box
|
| 108 |
+
cv2.imwrite(output_path, img)
|
| 109 |
+
print(f"Image with bounding box saved as {output_path}")
|
| 110 |
+
return f"Image with bounding box saved as {output_path}"
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
app = Flask(__name__)
|
| 114 |
+
emotion_list = ['angry', 'disgust', 'fear',
|
| 115 |
+
'happy', 'neutral', 'sad', 'surprise']
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def getResult(selected_option, image):
|
| 119 |
+
image.save("./static/image.jpg")
|
| 120 |
+
model_path = MODEL_PATHS[selected_option]
|
| 121 |
+
model = load_model(model_path)
|
| 122 |
+
|
| 123 |
+
bounding_face_result = draw_bounding_box("./static/image.jpg")
|
| 124 |
+
if bounding_face_result == "No face detected.":
|
| 125 |
+
return bounding_face_result, [], None
|
| 126 |
+
|
| 127 |
+
extract_face_result = extract_face(
|
| 128 |
+
"./static/image.jpg", output_path="./static/cropped_face.jpg")
|
| 129 |
+
if extract_face_result == "No face detected.":
|
| 130 |
+
return extract_face_result, [], None
|
| 131 |
+
|
| 132 |
+
image_path = './static/cropped_face.jpg'
|
| 133 |
+
predicted_emotion = predict_emotion(
|
| 134 |
+
image_path, model, selected_option, emotion_list)
|
| 135 |
+
|
| 136 |
+
images = [url_for('static', filename='face_with_bounding_box.jpg'),
|
| 137 |
+
url_for('static', filename='cropped_face.jpg')]
|
| 138 |
+
|
| 139 |
+
return "The predicted emotion is:", images, predicted_emotion
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@app.route('/', methods=['GET', 'POST'])
|
| 143 |
+
def index():
|
| 144 |
+
result = None
|
| 145 |
+
images = []
|
| 146 |
+
predicted_emotion = None
|
| 147 |
+
|
| 148 |
+
dropdown_options = ["Select a model", "Scratch CNN Model", "ImageNet Model",
|
| 149 |
+
"MobileNet Model", "VGG16 Model"]
|
| 150 |
+
|
| 151 |
+
if request.method == 'POST':
|
| 152 |
+
selected_option = request.form.get('option')
|
| 153 |
+
image = request.files.get('image')
|
| 154 |
+
if selected_option != "Select a model":
|
| 155 |
+
result, images, predicted_emotion = getResult(
|
| 156 |
+
selected_option, image)
|
| 157 |
+
return render_template('index.html', result=result, dropdown_options=dropdown_options, images=images, predicted_emotion=predicted_emotion, model_name=selected_option)
|
| 158 |
+
|
| 159 |
+
return render_template('index.html', result=result, dropdown_options=dropdown_options, images=[], predicted_emotion=None, model_name="Select")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
if __name__ == '__main__':
|
| 163 |
+
app.run(host='0.0.0.0', port=7860, debug=True)
|
function.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tensorflow.keras.preprocessing.image import img_to_array, load_img
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def preprocess_image(image_path, target_size=(48, 48)):
|
| 6 |
+
# Load the image
|
| 7 |
+
image = load_img(image_path, color_mode='grayscale',
|
| 8 |
+
target_size=target_size)
|
| 9 |
+
# Convert the image to array
|
| 10 |
+
image = img_to_array(image)
|
| 11 |
+
# Normalize the pixel values
|
| 12 |
+
image = image / 255.0
|
| 13 |
+
# Reshape the image to add batch dimension
|
| 14 |
+
image = np.expand_dims(image, axis=0)
|
| 15 |
+
return image
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def predict_emotion(image_path, model, emotion_list):
|
| 19 |
+
# Preprocess the image
|
| 20 |
+
image = preprocess_image(image_path)
|
| 21 |
+
# Make prediction
|
| 22 |
+
prediction = model.predict(image)
|
| 23 |
+
# Decode the prediction
|
| 24 |
+
# return prediction
|
| 25 |
+
predicted_emotion = emotion_list[np.argmax(prediction)]
|
| 26 |
+
return predicted_emotion
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
image_path = 'cropped_face.jpg'
|
| 30 |
+
|
| 31 |
+
# Predict the emotion
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
flask
|
| 2 |
+
tensorflow
|
| 3 |
+
keras
|
| 4 |
+
numpy
|
| 5 |
+
opencv-python
|
static/angry.jpg
ADDED
|
static/bg_texture.jpg
ADDED
|
static/cropped_face.jpg
ADDED
|
static/disgust.jpg
ADDED
|
static/face_with_bounding_box.jpg
ADDED
|
static/face_with_box.jpg
ADDED
|
static/fear.jpg
ADDED
|
static/happy.jpg
ADDED
|
static/image.jpg
ADDED
|
static/innov8_logo.png
ADDED
|
static/neutral.jpg
ADDED
|
static/sad.jpg
ADDED
|
static/style.css
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Floating container style */
|
| 2 |
+
.floating-container {
|
| 3 |
+
position: fixed;
|
| 4 |
+
top: 0;
|
| 5 |
+
left: 0;
|
| 6 |
+
width: 100%;
|
| 7 |
+
height: 100%;
|
| 8 |
+
pointer-events: none;
|
| 9 |
+
/* Allows clicks to pass through */
|
| 10 |
+
overflow: hidden;
|
| 11 |
+
z-index: -1;
|
| 12 |
+
/* Default: behind content */
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
/* Loading overlay style with a lower z-index */
|
| 16 |
+
.loading-overlay {
|
| 17 |
+
position: fixed;
|
| 18 |
+
top: 0;
|
| 19 |
+
left: 0;
|
| 20 |
+
width: 100%;
|
| 21 |
+
height: 100%;
|
| 22 |
+
background: rgba(255, 255, 255, 0.8);
|
| 23 |
+
/* Semi-transparent background */
|
| 24 |
+
display: flex;
|
| 25 |
+
align-items: center;
|
| 26 |
+
justify-content: center;
|
| 27 |
+
z-index: 5;
|
| 28 |
+
/* Ensure it’s behind the floating elements during loading */
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
/* Hoverable floating elements style */
|
| 32 |
+
.floating-element {
|
| 33 |
+
position: absolute;
|
| 34 |
+
bottom: -50px;
|
| 35 |
+
width: 100px;
|
| 36 |
+
height: 20px;
|
| 37 |
+
background-color: #FCBF49;
|
| 38 |
+
border-radius: 20%;
|
| 39 |
+
animation: floatAnimation 8s linear infinite;
|
| 40 |
+
opacity: 0.8;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
/* Define animation keyframes */
|
| 45 |
+
@keyframes floatAnimation {
|
| 46 |
+
0% {
|
| 47 |
+
transform: translateY(0) scale(1);
|
| 48 |
+
/* Starting point */
|
| 49 |
+
opacity: 1;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
100% {
|
| 53 |
+
transform: translateY(-100vh) scale(0.5);
|
| 54 |
+
/* Move up and shrink */
|
| 55 |
+
opacity: 0;
|
| 56 |
+
/* Fade out */
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
/* Spinner styles */
|
| 63 |
+
.spinner {
|
| 64 |
+
width: 50px;
|
| 65 |
+
height: 50px;
|
| 66 |
+
border: 5px solid #FCBF49;
|
| 67 |
+
border-top: 5px solid #4a86c5;
|
| 68 |
+
border-radius: 50%;
|
| 69 |
+
animation: spin 1s linear infinite;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
/* Spinner animation */
|
| 73 |
+
@keyframes spin {
|
| 74 |
+
0% {
|
| 75 |
+
transform: rotate(0deg);
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
100% {
|
| 79 |
+
transform: rotate(360deg);
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
body {
|
| 87 |
+
font-family: Arial, sans-serif;
|
| 88 |
+
background-color: #F0F0EE;
|
| 89 |
+
background-image: repeating-radial-gradient(circle at 0 0, #ccc, #ccc 2px, transparent 2px, transparent 20px);
|
| 90 |
+
background-size: 20px 20px;
|
| 91 |
+
margin: 0;
|
| 92 |
+
padding: 0;
|
| 93 |
+
display: flex;
|
| 94 |
+
justify-content: center;
|
| 95 |
+
align-items: center;
|
| 96 |
+
width: 100vw;
|
| 97 |
+
height: 100vh;
|
| 98 |
+
color: #283C3C;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
.container {
|
| 103 |
+
max-width: 100%;
|
| 104 |
+
/* Change to fill the entire width */
|
| 105 |
+
padding: 0;
|
| 106 |
+
/* Remove padding */
|
| 107 |
+
background-color: transparent;
|
| 108 |
+
/* Remove background color to blend with body */
|
| 109 |
+
box-shadow: none;
|
| 110 |
+
/* Remove the shadow */
|
| 111 |
+
border-radius: 0;
|
| 112 |
+
/* No rounding to match the rest of the page */
|
| 113 |
+
text-align: center;
|
| 114 |
+
margin: 0;
|
| 115 |
+
z-index: 1;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
header {
|
| 119 |
+
margin-bottom: 20px;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
header .logo {
|
| 123 |
+
width: 150px;
|
| 124 |
+
/* Adjust width according to logo size */
|
| 125 |
+
/* margin-top: 200px;
|
| 126 |
+
margin-bottom: 10px; */
|
| 127 |
+
/* Spacing below the logo */
|
| 128 |
+
position: absolute;
|
| 129 |
+
top: 10px;
|
| 130 |
+
left: 30px;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
header h2 {
|
| 134 |
+
color: #B83A14;
|
| 135 |
+
/* Red theme color for headings */
|
| 136 |
+
margin-bottom: 20px;
|
| 137 |
+
font-size: 1.8rem;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
label {
|
| 141 |
+
display: block;
|
| 142 |
+
margin: 10px 0 5px;
|
| 143 |
+
color: #283C3C;
|
| 144 |
+
/* Black-green theme for labels */
|
| 145 |
+
font-weight: bold;
|
| 146 |
+
font-size: larger;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
.form-group {
|
| 150 |
+
margin-bottom: 15px;
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
select {
|
| 154 |
+
width: 520px;
|
| 155 |
+
padding: 15px;
|
| 156 |
+
border-radius: 10px;
|
| 157 |
+
/* Rounded corners */
|
| 158 |
+
border: 1px solid #ccc;
|
| 159 |
+
font-size: 1rem;
|
| 160 |
+
background-color: #fff;
|
| 161 |
+
-webkit-appearance: none;
|
| 162 |
+
/* Remove default arrow for a custom arrow look */
|
| 163 |
+
-moz-appearance: none;
|
| 164 |
+
/* For Firefox */
|
| 165 |
+
appearance: none;
|
| 166 |
+
position: relative;
|
| 167 |
+
/* To position the arrow */
|
| 168 |
+
padding-right: 30px;
|
| 169 |
+
/* Extra padding to make space for the arrow */
|
| 170 |
+
}
|
| 171 |
+
|
| 172 |
+
select::after {
|
| 173 |
+
content: '';
|
| 174 |
+
position: absolute;
|
| 175 |
+
right: 10px;
|
| 176 |
+
top: 50%;
|
| 177 |
+
transform: translateY(-50%);
|
| 178 |
+
border-left: 6px solid transparent;
|
| 179 |
+
border-right: 6px solid transparent;
|
| 180 |
+
border-top: 6px solid #B83A14;
|
| 181 |
+
pointer-events: none;
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
input[type="file"] {
|
| 185 |
+
width: 500px;
|
| 186 |
+
padding: 10px;
|
| 187 |
+
border-radius: 10px;
|
| 188 |
+
/* Rounded corners */
|
| 189 |
+
border: 1px solid #ccc;
|
| 190 |
+
font-size: 1rem;
|
| 191 |
+
background-color: #fff;
|
| 192 |
+
cursor: pointer;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
.form-group {
|
| 196 |
+
display: flex;
|
| 197 |
+
flex-direction: column;
|
| 198 |
+
align-items: center;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
input[type="file"]::file-selector-button {
|
| 203 |
+
background-color: #FCBF49;
|
| 204 |
+
/* Set color similar to button */
|
| 205 |
+
color: #fff;
|
| 206 |
+
border: none;
|
| 207 |
+
border-radius: 8px;
|
| 208 |
+
/* Rounded corners */
|
| 209 |
+
padding: 8px 12px;
|
| 210 |
+
cursor: pointer;
|
| 211 |
+
transition: background-color 0.3s;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
input[type="file"]::file-selector-button:hover {
|
| 215 |
+
background-color: #B83A14;
|
| 216 |
+
/* Darken on hover */
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
button {
|
| 220 |
+
background-color: #FCBF49;
|
| 221 |
+
/* Yellow theme color for buttons */
|
| 222 |
+
color: #fff;
|
| 223 |
+
border: none;
|
| 224 |
+
cursor: pointer;
|
| 225 |
+
font-weight: bold;
|
| 226 |
+
transition: background-color 0.3s;
|
| 227 |
+
border-radius: 30px 62px;
|
| 228 |
+
/* Set to 10px for a subtle rounded look */
|
| 229 |
+
padding: 25px 62px;
|
| 230 |
+
/* Increase padding for a modern look */
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
button:hover {
|
| 234 |
+
background-color: #B83A14;
|
| 235 |
+
/* Darken to red on hover */
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
.result {
|
| 239 |
+
margin-top: 5px;
|
| 240 |
+
margin-bottom: 5px;
|
| 241 |
+
background-color: #fff3cd;
|
| 242 |
+
/* Light yellow background for the result */
|
| 243 |
+
border: 1px solid #ffeeba;
|
| 244 |
+
border-radius: 30px;
|
| 245 |
+
text-align: center;
|
| 246 |
+
/* Center align the result */
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.result h3 {
|
| 250 |
+
color: #B83A14;
|
| 251 |
+
font-size: 1.8rem;
|
| 252 |
+
margin-bottom: 5px;
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
.emotion-display {
|
| 256 |
+
display: flex;
|
| 257 |
+
flex-direction: column;
|
| 258 |
+
align-items: center;
|
| 259 |
+
margin-top: 10px;
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
.emotion-text {
|
| 263 |
+
font-size: 2rem;
|
| 264 |
+
font-weight: bold;
|
| 265 |
+
color: #4a86c5;
|
| 266 |
+
margin-bottom: 10px;
|
| 267 |
+
font-family: 'Comic Sans MS', cursive;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
.emotion-model {
|
| 271 |
+
font-size: 1.5rem;
|
| 272 |
+
font-weight: bold;
|
| 273 |
+
color: #293747;
|
| 274 |
+
margin-bottom: 10px;
|
| 275 |
+
font-family: 'Comic Sans MS', cursive;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
.emotion-image {
|
| 279 |
+
width: 150px;
|
| 280 |
+
/* Size for larger display */
|
| 281 |
+
height: 150px;
|
| 282 |
+
object-fit: contain;
|
| 283 |
+
/* Contain the image without cropping */
|
| 284 |
+
padding: 10px;
|
| 285 |
+
/* Optional: Add padding around the image */
|
| 286 |
+
border: 2px solid #4a86c5;
|
| 287 |
+
border-radius: 10px;
|
| 288 |
+
/* Optional: Soft border rounding */
|
| 289 |
+
background-color: #ffffff;
|
| 290 |
+
transition: transform 0.3s ease;
|
| 291 |
+
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
.emotion-image:hover {
|
| 295 |
+
transform: scale(1.05);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
.result p {
|
| 300 |
+
color: #283C3C;
|
| 301 |
+
/* Black-green for result text */
|
| 302 |
+
font-weight: bold;
|
| 303 |
+
font-size: 1.2rem;
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
.image-section {
|
| 307 |
+
margin-top: 20px;
|
| 308 |
+
display: flex;
|
| 309 |
+
flex-wrap: wrap;
|
| 310 |
+
gap: 15px;
|
| 311 |
+
justify-content: center;
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
.image-container {}
|
| 315 |
+
|
| 316 |
+
.form-image {
|
| 317 |
+
height: 150px;
|
| 318 |
+
width: auto;
|
| 319 |
+
object-fit: cover;
|
| 320 |
+
border-radius: 5px;
|
| 321 |
+
border: 2px solid #90151c;
|
| 322 |
+
/* Red border for images */
|
| 323 |
+
transition: transform 0.3s ease;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
.form-image:hover {
|
| 327 |
+
transform: scale(1.8);
|
| 328 |
+
/* Slightly scale up the image on hover */
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
/* technologies-used banner kind top right */
|
| 332 |
+
.technologies-used {
|
| 333 |
+
position: fixed;
|
| 334 |
+
bottom: 30px;
|
| 335 |
+
right: 30px;
|
| 336 |
+
background-color: #f5c055;
|
| 337 |
+
color: #303030;
|
| 338 |
+
padding: 15px 15px;
|
| 339 |
+
font-size: 1.2rem;
|
| 340 |
+
z-index: 10;
|
| 341 |
+
border-radius: 25px;
|
| 342 |
+
/* shadow */
|
| 343 |
+
box-shadow: 0 0 20px rgba(0, 0, 0, 0.2);
|
| 344 |
+
/* transition */
|
| 345 |
+
transition: transform 0.3s ease;
|
| 346 |
+
height: 300px;
|
| 347 |
+
width: 250px;
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
.team-members {
|
| 352 |
+
position: fixed;
|
| 353 |
+
bottom: 30px;
|
| 354 |
+
left: 30px;
|
| 355 |
+
background-color: #9fcafc;
|
| 356 |
+
color: #303030;
|
| 357 |
+
padding: 15px 15px;
|
| 358 |
+
font-size: 1.2rem;
|
| 359 |
+
z-index: 10;
|
| 360 |
+
border-radius: 25px;
|
| 361 |
+
/* shadow */
|
| 362 |
+
box-shadow: 0 0 20px rgba(0, 0, 0, 0.2);
|
| 363 |
+
/* transition */
|
| 364 |
+
transition: transform 0.3s ease;
|
| 365 |
+
height: 300px;
|
| 366 |
+
width: 250px;
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
/* technologies-used points */
|
| 370 |
+
.technologies-used ul,
|
| 371 |
+
.team-members ul {
|
| 372 |
+
list-style-type: none;
|
| 373 |
+
padding: 0;
|
| 374 |
+
margin: 0;
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
.technologies-used ul li,
|
| 378 |
+
.team-members ul li {
|
| 379 |
+
margin-bottom: 5px;
|
| 380 |
+
}
|
static/surprise.jpg
ADDED
|
templates/index.html
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
|
| 4 |
+
<head>
|
| 5 |
+
<meta charset="UTF-8">
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 7 |
+
<title>Emotion Detection App</title>
|
| 8 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
|
| 9 |
+
</head>
|
| 10 |
+
|
| 11 |
+
<body>
|
| 12 |
+
<div class="container">
|
| 13 |
+
<header>
|
| 14 |
+
<img src="{{ url_for('static', filename='innov8_logo.png') }}" alt="Logo" class="logo"
|
| 15 |
+
style="width: 350px; height: auto;">
|
| 16 |
+
<h2>Emotion Detection From Facial Expressions Model</h2>
|
| 17 |
+
</header>
|
| 18 |
+
<form method="POST" enctype="multipart/form-data">
|
| 19 |
+
<div class="form-group">
|
| 20 |
+
<label for="option">Choose the ML Model:</label>
|
| 21 |
+
<select name="option" id="option" required>
|
| 22 |
+
{% for option in dropdown_options %}
|
| 23 |
+
<option value="{{ option }}">{{ option }}</option>
|
| 24 |
+
{% endfor %}
|
| 25 |
+
</select>
|
| 26 |
+
</div>
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
<div class="floating-container"></div>
|
| 30 |
+
|
| 31 |
+
<div class="form-group">
|
| 32 |
+
<label for="image">Upload Face Image:</label>
|
| 33 |
+
<input type="file" name="image" id="image" accept="image/*" required>
|
| 34 |
+
</div>
|
| 35 |
+
|
| 36 |
+
<button type="submit">Submit</button>
|
| 37 |
+
</form>
|
| 38 |
+
|
| 39 |
+
<div class="image-section">
|
| 40 |
+
{% for img in images %}
|
| 41 |
+
<div class="image-container">
|
| 42 |
+
<img src="{{ img }}" alt="Image {{ loop.index }}" class="form-image">
|
| 43 |
+
<p>
|
| 44 |
+
{{
|
| 45 |
+
img.split('/').pop().split('.')[0].replace('_', ' ').capitalize()
|
| 46 |
+
}}
|
| 47 |
+
</p>
|
| 48 |
+
</div>
|
| 49 |
+
{% endfor %}
|
| 50 |
+
</div>
|
| 51 |
+
|
| 52 |
+
<!-- Loading overlay -->
|
| 53 |
+
<div class="loading-overlay" id="loadingOverlay" style="display: none;">
|
| 54 |
+
<div class="spinner"></div>
|
| 55 |
+
</div>
|
| 56 |
+
|
| 57 |
+
<!-- Result display area -->
|
| 58 |
+
<div id="result" style="display: none;">
|
| 59 |
+
<p>Your result will appear here after loading.</p>
|
| 60 |
+
</div>
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
{% if result %}
|
| 64 |
+
<div class="result">
|
| 65 |
+
<h3>{{ result }}</h3>
|
| 66 |
+
|
| 67 |
+
{% if predicted_emotion %}
|
| 68 |
+
<div class="emotion-display">
|
| 69 |
+
<span class="emotion-text">{{ predicted_emotion.capitalize() }}</span>
|
| 70 |
+
<img src="{{ url_for('static', filename=predicted_emotion + '.jpg') }}"
|
| 71 |
+
alt="{{ predicted_emotion }} image" class="emotion-image">
|
| 72 |
+
<span class="emotion-model">Prediction made with {{ model_name.capitalize() }}!</span>
|
| 73 |
+
</div>
|
| 74 |
+
{% endif %}
|
| 75 |
+
</div>
|
| 76 |
+
{% endif %}
|
| 77 |
+
|
| 78 |
+
<div class="technologies-used">
|
| 79 |
+
<h3>Technologies Used:</h3>
|
| 80 |
+
<ul>
|
| 81 |
+
<li>Python</li>
|
| 82 |
+
<li>Flask</li>
|
| 83 |
+
<li>HTML</li>
|
| 84 |
+
<li>CSS</li>
|
| 85 |
+
<li>JavaScript</li>
|
| 86 |
+
<li>TensorFlow</li>
|
| 87 |
+
<li>OpenCV</li>
|
| 88 |
+
<li>Numpy</li>
|
| 89 |
+
|
| 90 |
+
</ul>
|
| 91 |
+
</div>
|
| 92 |
+
<div class="team-members">
|
| 93 |
+
<h3>Team Members:</h3>
|
| 94 |
+
<ul>
|
| 95 |
+
<li>Akshat Mistry</li>
|
| 96 |
+
<li>Abhinav Kottayil</li>
|
| 97 |
+
<li>Raj Jagasia</li>
|
| 98 |
+
<li>Ishita Jagasia</li>
|
| 99 |
+
<li>Alyssa Guzman</li>
|
| 100 |
+
<li>Muhammed Ameen</li>
|
| 101 |
+
<li></li>
|
| 102 |
+
<li></li>
|
| 103 |
+
</ul>
|
| 104 |
+
<h3>Group 12: Innov8</h3>
|
| 105 |
+
</div>
|
| 106 |
+
</div>
|
| 107 |
+
<script>
|
| 108 |
+
function createFloatingElement(duration = 8) {
|
| 109 |
+
const floatingContainer = document.querySelector('.floating-container');
|
| 110 |
+
const element = document.createElement('div');
|
| 111 |
+
element.classList.add('floating-element');
|
| 112 |
+
|
| 113 |
+
// Randomize position, size, and animation duration
|
| 114 |
+
const randomX = Math.floor(Math.random() * 100); // Random horizontal position
|
| 115 |
+
const randomSize = Math.random() * 20 + 10; // Random size between 10px and 30px
|
| 116 |
+
const randomDuration = Math.random() * 3 + duration; // Set faster duration for loading effect
|
| 117 |
+
|
| 118 |
+
// Apply randomized styles
|
| 119 |
+
element.style.left = `${randomX}%`;
|
| 120 |
+
element.style.width = `${randomSize}px`;
|
| 121 |
+
element.style.height = `${randomSize}px`;
|
| 122 |
+
element.style.animationDuration = `${randomDuration}s`;
|
| 123 |
+
|
| 124 |
+
// Append to container and remove after animation
|
| 125 |
+
floatingContainer.appendChild(element);
|
| 126 |
+
setTimeout(() => {
|
| 127 |
+
floatingContainer.removeChild(element);
|
| 128 |
+
}, randomDuration * 1000);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
// Regularly generate floating elements every second
|
| 132 |
+
let floatingInterval = setInterval(() => createFloatingElement(), 1000);
|
| 133 |
+
|
| 134 |
+
function showLoading() {
|
| 135 |
+
const loadingOverlay = document.getElementById('loadingOverlay');
|
| 136 |
+
const result = document.getElementById('result');
|
| 137 |
+
const floatingContainer = document.querySelector('.floating-container');
|
| 138 |
+
|
| 139 |
+
// Show loading overlay and adjust z-index for floating elements
|
| 140 |
+
loadingOverlay.style.display = 'flex';
|
| 141 |
+
floatingContainer.style.zIndex = '10'; // Bring floating elements in front
|
| 142 |
+
|
| 143 |
+
// Create 100 floating elements for a filled effect
|
| 144 |
+
for (let i = 0; i < 100; i++) {
|
| 145 |
+
createFloatingElement(4); // Shorter duration for faster movement
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
// After 2 seconds, hide loading overlay and revert z-index
|
| 149 |
+
setTimeout(() => {
|
| 150 |
+
loadingOverlay.style.display = 'none';
|
| 151 |
+
floatingContainer.style.zIndex = '-1'; // Reset to original z-index
|
| 152 |
+
result.style.display = 'block';
|
| 153 |
+
}, 2000);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
// Event listener for form submission
|
| 157 |
+
document.querySelector("form").addEventListener("submit", function (event) {
|
| 158 |
+
event.preventDefault(); // Prevents actual form submission for demo purposes
|
| 159 |
+
showLoading(); // Show loading effect
|
| 160 |
+
setTimeout(() => this.submit(), 2000); // Submit form after loading is done
|
| 161 |
+
});
|
| 162 |
+
</script>
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
</body>
|
| 166 |
+
|
| 167 |
+
</html>
|