| <!DOCTYPE html>
|
| <html lang="en">
|
| <head>
|
| <meta charset="UTF-8">
|
| <meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| <title>Expression Recognition</title>
|
| <style>
|
| body {
|
| margin: 0;
|
| padding: 0;
|
| display: flex;
|
| flex-direction: column;
|
| align-items: center;
|
| justify-content: center;
|
| height: 100vh;
|
| background-color: #1a1a1a;
|
| font-family: 'Segoe UI', sans-serif;
|
| color: white;
|
| }
|
|
|
| .video-container {
|
| position: relative;
|
| width: 640px;
|
| height: 480px;
|
| background: #000;
|
| border-radius: 12px;
|
| overflow: hidden;
|
| box-shadow: 0 8px 32px rgba(0,0,0,0.5);
|
| }
|
|
|
| video {
|
| width: 100%;
|
| height: 100%;
|
| object-fit: cover;
|
|
|
| transform: scaleX(-1);
|
| }
|
|
|
| canvas {
|
| position: absolute;
|
| top: 0;
|
| left: 0;
|
|
|
| }
|
|
|
| #loader {
|
| position: absolute;
|
| top: 0;
|
| left: 0;
|
| width: 100%;
|
| height: 100%;
|
| background: rgba(0,0,0,0.9);
|
| display: flex;
|
| flex-direction: column;
|
| align-items: center;
|
| justify-content: center;
|
| z-index: 10;
|
| }
|
|
|
| .spinner {
|
| width: 40px;
|
| height: 40px;
|
| border: 4px solid #f3f3f3;
|
| border-top: 4px solid #9b59b6;
|
| border-radius: 50%;
|
| animation: spin 1s linear infinite;
|
| margin-bottom: 15px;
|
| }
|
|
|
| @keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } }
|
|
|
| .controls { margin-top: 20px; display: flex; gap: 15px; }
|
| button { padding: 12px 30px; font-size: 16px; border: none; border-radius: 50px; cursor: pointer; font-weight: 600; transition: transform 0.1s; }
|
| button:active { transform: scale(0.95); }
|
|
|
| #btnCapture { background: linear-gradient(135deg, #28a745, #218838); color: white; }
|
| #btnCapture:disabled { background: #555; cursor: not-allowed; }
|
| #btnRetake { background: linear-gradient(135deg, #dc3545, #c82333); color: white; display: none; }
|
| #status { margin-top: 15px; color: #ccc; font-size: 14px; }
|
| </style>
|
| <script src="https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/dist/face-api.min.js"></script>
|
| </head>
|
| <body>
|
|
|
| <div class="video-container">
|
| <video id="video" autoplay muted playsinline></video>
|
| <canvas id="canvas"></canvas>
|
| <div id="loader">
|
| <div class="spinner"></div>
|
| <div id="loadingText">Loading Expression Models...</div>
|
| </div>
|
| </div>
|
|
|
| <div class="controls">
|
| <button id="btnCapture" disabled>Wait...</button>
|
| <button id="btnRetake">Retake</button>
|
| </div>
|
|
|
| <div id="status">Initializing system...</div>
|
|
|
| <script>
|
| const video = document.getElementById('video');
|
| const canvas = document.getElementById('canvas');
|
| const btnCapture = document.getElementById('btnCapture');
|
| const btnRetake = document.getElementById('btnRetake');
|
| const statusText = document.getElementById('status');
|
| const loader = document.getElementById('loader');
|
|
|
|
|
| const MODEL_URL = 'https://cdn.jsdelivr.net/gh/cgarciagl/face-api.js@0.22.2/weights/';
|
|
|
| async function init() {
|
| try {
|
|
|
| await faceapi.nets.ssdMobilenetv1.loadFromUri(MODEL_URL);
|
|
|
|
|
| await faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL);
|
|
|
| startCamera();
|
| } catch (error) {
|
| alert("Error loading models: " + error);
|
| }
|
| }
|
|
|
| function startCamera() {
|
| navigator.mediaDevices.getUserMedia({ video: { width: 640, height: 480 } })
|
| .then(stream => { video.srcObject = stream; })
|
| .catch(err => { console.error(err); });
|
| }
|
|
|
| video.addEventListener('play', () => {
|
| const displaySize = { width: video.videoWidth, height: video.videoHeight };
|
| faceapi.matchDimensions(canvas, displaySize);
|
| loader.style.display = 'none';
|
| btnCapture.disabled = false;
|
| btnCapture.innerText = "Capture Expression";
|
| statusText.innerText = "Ready. Show me an emotion!";
|
| });
|
|
|
| btnCapture.addEventListener('click', async () => {
|
| if (video.paused) return;
|
|
|
| video.pause();
|
| btnCapture.style.display = 'none';
|
| btnRetake.style.display = 'inline-block';
|
| statusText.innerText = "Analyzing Expressions...";
|
|
|
| const displaySize = { width: video.videoWidth, height: video.videoHeight };
|
| faceapi.matchDimensions(canvas, displaySize);
|
|
|
|
|
| const detections = await faceapi.detectAllFaces(video, new faceapi.SsdMobilenetv1Options({ minConfidence: 0.5 }))
|
| .withFaceExpressions();
|
|
|
| const resizedDetections = faceapi.resizeResults(detections, displaySize);
|
| const ctx = canvas.getContext('2d');
|
| ctx.clearRect(0, 0, canvas.width, canvas.height);
|
|
|
|
|
| ctx.save();
|
| ctx.scale(-1, 1);
|
| ctx.translate(-canvas.width, 0);
|
| faceapi.draw.drawDetections(canvas, resizedDetections);
|
| ctx.restore();
|
|
|
|
|
| resizedDetections.forEach(result => {
|
| const expressions = result.expressions;
|
|
|
|
|
| const sorted = Object.entries(expressions).sort((a, b) => b[1] - a[1]);
|
| const topEmotion = sorted[0];
|
|
|
|
|
| const box = result.detection.box;
|
| const mirroredX = canvas.width - box.x - box.width;
|
| const mirroredPos = { x: mirroredX, y: box.bottomLeft.y };
|
|
|
|
|
| new faceapi.draw.DrawTextField(
|
| [`${topEmotion[0]} (${Math.round(topEmotion[1] * 100)}%)`],
|
| mirroredPos
|
| ).draw(canvas);
|
|
|
|
|
|
|
| const minConfidence = 0.1;
|
| faceapi.draw.drawFaceExpressions(canvas, resizedDetections, minConfidence, mirroredPos);
|
| });
|
|
|
| if (detections.length === 0) statusText.innerText = "No face detected.";
|
| else statusText.innerText = `Analysis Done. Found ${detections.length} face(s).`;
|
| });
|
|
|
| btnRetake.addEventListener('click', () => {
|
| canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);
|
| video.play();
|
| btnCapture.style.display = 'inline-block';
|
| btnRetake.style.display = 'none';
|
| statusText.innerText = "Ready.";
|
| });
|
|
|
| init();
|
| </script>
|
| </body>
|
| </html>
|
|
|