Upload 5 files
Browse files- app.py +53 -0
- index.html +120 -0
- script.js +98 -0
- styles.css +45 -0
- unique_face_expression_model_.h5 +3 -0
app.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, render_template, request, jsonify
|
| 2 |
+
import tensorflow as tf
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import numpy as np
|
| 5 |
+
import base64
|
| 6 |
+
import io
|
| 7 |
+
|
| 8 |
+
app = Flask(__name__)
|
| 9 |
+
|
| 10 |
+
# Load your Keras model (.h5)
|
| 11 |
+
model = tf.keras.models.load_model('unique_face_expression_model_.h5')
|
| 12 |
+
class_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']
|
| 13 |
+
|
| 14 |
+
# Function to preprocess the image before prediction
|
| 15 |
+
def preprocess_image(image):
|
| 16 |
+
image = image.resize((48, 48)) # Resize image to match the model input size
|
| 17 |
+
image = image.convert('L') # Convert image to grayscale (if required by the model)
|
| 18 |
+
image = np.array(image) / 255.0 # Normalize image to [0, 1]
|
| 19 |
+
image = np.expand_dims(image, axis=-1) # Add channel dimension for grayscale
|
| 20 |
+
image = np.expand_dims(image, axis=0) # Add batch dimension
|
| 21 |
+
return image
|
| 22 |
+
|
| 23 |
+
@app.route('/')
|
| 24 |
+
def index():
|
| 25 |
+
return render_template('index.html')
|
| 26 |
+
|
| 27 |
+
@app.route('/predict', methods=['POST'])
|
| 28 |
+
def predict():
|
| 29 |
+
if request.is_json and 'image' in request.json:
|
| 30 |
+
# Handle real-time video frame (Base64 string)
|
| 31 |
+
image_data = request.json['image'].split(",")[1] # Extract base64-encoded image
|
| 32 |
+
image = Image.open(io.BytesIO(base64.b64decode(image_data)))
|
| 33 |
+
elif 'image' in request.files:
|
| 34 |
+
# Handle uploaded image (from file input)
|
| 35 |
+
image_file = request.files['image']
|
| 36 |
+
image = Image.open(image_file)
|
| 37 |
+
else:
|
| 38 |
+
return jsonify({'error': 'No image provided'}), 400
|
| 39 |
+
|
| 40 |
+
# Preprocess the image
|
| 41 |
+
processed_image = preprocess_image(image)
|
| 42 |
+
|
| 43 |
+
# Make prediction using the Keras model
|
| 44 |
+
prediction = model.predict(processed_image)
|
| 45 |
+
predicted_class = np.argmax(prediction)
|
| 46 |
+
predicted_label = class_labels[predicted_class]
|
| 47 |
+
|
| 48 |
+
# Return prediction result
|
| 49 |
+
result = {'prediction': predicted_label}
|
| 50 |
+
return jsonify(result)
|
| 51 |
+
|
| 52 |
+
if __name__ == '__main__':
|
| 53 |
+
app.run(debug=True)
|
index.html
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Facial Expression Recognition</title>
|
| 7 |
+
<style>
|
| 8 |
+
body {
|
| 9 |
+
font-family: Arial, sans-serif;
|
| 10 |
+
background-color: #f4f4f4;
|
| 11 |
+
text-align: center;
|
| 12 |
+
margin: 0;
|
| 13 |
+
padding: 0;
|
| 14 |
+
}
|
| 15 |
+
.container {
|
| 16 |
+
width: 70%;
|
| 17 |
+
margin: auto;
|
| 18 |
+
padding: 20px;
|
| 19 |
+
background: white;
|
| 20 |
+
border-radius: 8px;
|
| 21 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
| 22 |
+
margin-top: 50px;
|
| 23 |
+
}
|
| 24 |
+
button, input[type="file"] {
|
| 25 |
+
margin: 10px;
|
| 26 |
+
padding: 10px 15px;
|
| 27 |
+
border: none;
|
| 28 |
+
border-radius: 5px;
|
| 29 |
+
background: #007BFF;
|
| 30 |
+
color: white;
|
| 31 |
+
cursor: pointer;
|
| 32 |
+
}
|
| 33 |
+
button:hover {
|
| 34 |
+
background: #0056b3;
|
| 35 |
+
}
|
| 36 |
+
input[type="file"] {
|
| 37 |
+
cursor: pointer;
|
| 38 |
+
}
|
| 39 |
+
.output {
|
| 40 |
+
margin-top: 20px;
|
| 41 |
+
font-size: 18px;
|
| 42 |
+
color: #333;
|
| 43 |
+
}
|
| 44 |
+
</style>
|
| 45 |
+
</head>
|
| 46 |
+
<body>
|
| 47 |
+
<h1>Facial Expression Recognition</h1>
|
| 48 |
+
<div class="container">
|
| 49 |
+
<!-- Real-Time Detection Section -->
|
| 50 |
+
<h2>Real-Time Detection</h2>
|
| 51 |
+
<video id="video" width="480" height="360" autoplay></video>
|
| 52 |
+
<button onclick="startRealTimePrediction()">Start Real-Time Detection</button>
|
| 53 |
+
<div id="realTimeOutput" class="output"></div>
|
| 54 |
+
|
| 55 |
+
<!-- Image Upload Section -->
|
| 56 |
+
<h2>Image Upload</h2>
|
| 57 |
+
<input type="file" id="imageInput" accept="image/*">
|
| 58 |
+
<button onclick="uploadImage()">Upload and Predict</button>
|
| 59 |
+
<div id="uploadOutput" class="output"></div>
|
| 60 |
+
</div>
|
| 61 |
+
|
| 62 |
+
<script>
|
| 63 |
+
// Real-Time Prediction (using Base64)
|
| 64 |
+
function startRealTimePrediction() {
|
| 65 |
+
const video = document.getElementById('video');
|
| 66 |
+
navigator.mediaDevices.getUserMedia({ video: true })
|
| 67 |
+
.then(stream => {
|
| 68 |
+
video.srcObject = stream;
|
| 69 |
+
})
|
| 70 |
+
.catch(err => {
|
| 71 |
+
console.error("Error accessing camera: ", err);
|
| 72 |
+
});
|
| 73 |
+
|
| 74 |
+
setInterval(() => {
|
| 75 |
+
const canvas = document.createElement('canvas');
|
| 76 |
+
canvas.width = video.videoWidth;
|
| 77 |
+
canvas.height = video.videoHeight;
|
| 78 |
+
canvas.getContext('2d').drawImage(video, 0, 0);
|
| 79 |
+
const imageData = canvas.toDataURL('image/png');
|
| 80 |
+
|
| 81 |
+
fetch('/predict', {
|
| 82 |
+
method: 'POST',
|
| 83 |
+
headers: {
|
| 84 |
+
'Content-Type': 'application/json'
|
| 85 |
+
},
|
| 86 |
+
body: JSON.stringify({ image: imageData })
|
| 87 |
+
})
|
| 88 |
+
.then(response => response.json())
|
| 89 |
+
.then(data => {
|
| 90 |
+
document.getElementById('realTimeOutput').textContent = `Prediction: ${data.prediction}`;
|
| 91 |
+
})
|
| 92 |
+
.catch(error => console.error('Error:', error));
|
| 93 |
+
}, 1000); // Perform predictions every second
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
// Upload Image Prediction
|
| 97 |
+
function uploadImage() {
|
| 98 |
+
const input = document.getElementById('imageInput');
|
| 99 |
+
const file = input.files[0];
|
| 100 |
+
if (!file) {
|
| 101 |
+
alert('Please upload an image.');
|
| 102 |
+
return;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
const formData = new FormData();
|
| 106 |
+
formData.append('image', file);
|
| 107 |
+
|
| 108 |
+
fetch('/predict', {
|
| 109 |
+
method: 'POST',
|
| 110 |
+
body: formData
|
| 111 |
+
})
|
| 112 |
+
.then(response => response.json())
|
| 113 |
+
.then(data => {
|
| 114 |
+
document.getElementById('uploadOutput').textContent = `Prediction: ${data.prediction}`;
|
| 115 |
+
})
|
| 116 |
+
.catch(error => console.error('Error:', error));
|
| 117 |
+
}
|
| 118 |
+
</script>
|
| 119 |
+
</body>
|
| 120 |
+
</html>
|
script.js
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Function to start the webcam
|
| 2 |
+
function startWebcam() {
|
| 3 |
+
const video = document.getElementById('video');
|
| 4 |
+
if (video) {
|
| 5 |
+
navigator.mediaDevices.getUserMedia({ video: true })
|
| 6 |
+
.then(stream => {
|
| 7 |
+
video.srcObject = stream;
|
| 8 |
+
})
|
| 9 |
+
.catch(error => {
|
| 10 |
+
console.error("Error accessing the webcam:", error);
|
| 11 |
+
});
|
| 12 |
+
} else {
|
| 13 |
+
console.error("Video element not found.");
|
| 14 |
+
}
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
// Call the startWebcam function once the DOM is fully loaded
|
| 18 |
+
document.addEventListener('DOMContentLoaded', (event) => {
|
| 19 |
+
startWebcam();
|
| 20 |
+
});
|
| 21 |
+
|
| 22 |
+
// Function to start real-time detection
|
| 23 |
+
function startDetection() {
|
| 24 |
+
const video = document.getElementById('video');
|
| 25 |
+
const canvas = document.getElementById('overlay');
|
| 26 |
+
const ctx = canvas.getContext('2d');
|
| 27 |
+
const realTimeOutputDiv = document.getElementById('realTimeOutput');
|
| 28 |
+
|
| 29 |
+
setInterval(() => {
|
| 30 |
+
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
|
| 31 |
+
|
| 32 |
+
// Convert the captured frame to base64
|
| 33 |
+
const imageData = canvas.toDataURL('image/png');
|
| 34 |
+
|
| 35 |
+
fetch('/predict', {
|
| 36 |
+
method: 'POST',
|
| 37 |
+
headers: {
|
| 38 |
+
'Content-Type': 'application/json',
|
| 39 |
+
},
|
| 40 |
+
body: JSON.stringify({ image: imageData }),
|
| 41 |
+
})
|
| 42 |
+
.then(response => response.json())
|
| 43 |
+
.then(data => {
|
| 44 |
+
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
| 45 |
+
|
| 46 |
+
if (data.prediction && data.bbox) {
|
| 47 |
+
const { x, y, w, h } = data.bbox;
|
| 48 |
+
|
| 49 |
+
// Draw bounding box
|
| 50 |
+
ctx.strokeStyle = 'red';
|
| 51 |
+
ctx.lineWidth = 2;
|
| 52 |
+
ctx.strokeRect(x, y, w, h);
|
| 53 |
+
|
| 54 |
+
// Display prediction label
|
| 55 |
+
ctx.font = '16px Arial';
|
| 56 |
+
ctx.fillStyle = 'red';
|
| 57 |
+
ctx.fillText(data.prediction, x, y - 10);
|
| 58 |
+
|
| 59 |
+
realTimeOutputDiv.innerHTML = `Real-Time Prediction: ${data.prediction}`;
|
| 60 |
+
} else {
|
| 61 |
+
realTimeOutputDiv.innerHTML = 'No face detected';
|
| 62 |
+
}
|
| 63 |
+
})
|
| 64 |
+
.catch(error => {
|
| 65 |
+
console.error('Error:', error);
|
| 66 |
+
});
|
| 67 |
+
}, 1000); // Capture and send frame every second
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
// Function to upload an image
|
| 71 |
+
function uploadImage() {
|
| 72 |
+
const fileInput = document.getElementById('imageUpload');
|
| 73 |
+
const uploadOutputDiv = document.getElementById('uploadOutput');
|
| 74 |
+
|
| 75 |
+
if (fileInput && fileInput.files.length > 0) {
|
| 76 |
+
const file = fileInput.files[0];
|
| 77 |
+
const formData = new FormData();
|
| 78 |
+
formData.append('image', file);
|
| 79 |
+
|
| 80 |
+
fetch('/predict', {
|
| 81 |
+
method: 'POST',
|
| 82 |
+
body: formData,
|
| 83 |
+
})
|
| 84 |
+
.then(response => response.json())
|
| 85 |
+
.then(data => {
|
| 86 |
+
if (data.prediction) {
|
| 87 |
+
uploadOutputDiv.innerHTML = `Uploaded Image Prediction: ${data.prediction}`;
|
| 88 |
+
} else {
|
| 89 |
+
uploadOutputDiv.innerHTML = 'No face detected';
|
| 90 |
+
}
|
| 91 |
+
})
|
| 92 |
+
.catch(error => {
|
| 93 |
+
console.error('Error:', error);
|
| 94 |
+
});
|
| 95 |
+
} else {
|
| 96 |
+
alert('Please select an image before uploading.');
|
| 97 |
+
}
|
| 98 |
+
}
|
styles.css
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
body {
|
| 2 |
+
font-family: Arial, sans-serif;
|
| 3 |
+
background-color: #f6f2f2;
|
| 4 |
+
text-align: center;
|
| 5 |
+
margin: 0;
|
| 6 |
+
padding: 20px;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
.container {
|
| 10 |
+
display: flex;
|
| 11 |
+
justify-content: center;
|
| 12 |
+
align-items: center;
|
| 13 |
+
flex-direction: column;
|
| 14 |
+
gap: 20px;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
.video-container {
|
| 18 |
+
display: flex;
|
| 19 |
+
flex-direction: column;
|
| 20 |
+
align-items: center;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
button {
|
| 24 |
+
background-color: #007bff;
|
| 25 |
+
color: white;
|
| 26 |
+
border: none;
|
| 27 |
+
padding: 10px 20px;
|
| 28 |
+
cursor: pointer;
|
| 29 |
+
border-radius: 5px;
|
| 30 |
+
margin-top: 10px;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
button:hover {
|
| 34 |
+
background-color: #0056b3;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
.file-upload {
|
| 38 |
+
margin-top: 20px;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.output {
|
| 42 |
+
margin-top: 20px;
|
| 43 |
+
font-size: 1.2em;
|
| 44 |
+
color: #333;
|
| 45 |
+
}
|
unique_face_expression_model_.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2b03715700915b1b68929050b2f7116a7848390e0dd6cb5207c771c591d112d4
|
| 3 |
+
size 4275304
|