manmohanai commited on
Commit
f4d08d3
·
verified ·
1 Parent(s): 6c6f592

undefined - Initial Deployment

Browse files
Files changed (2) hide show
  1. README.md +7 -5
  2. index.html +360 -19
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- title: Faceimotion
3
- emoji: 🌍
4
- colorFrom: pink
5
- colorTo: yellow
6
  sdk: static
7
  pinned: false
 
 
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: faceimotion
3
+ emoji: 🐳
4
+ colorFrom: gray
5
+ colorTo: blue
6
  sdk: static
7
  pinned: false
8
+ tags:
9
+ - deepsite
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
index.html CHANGED
@@ -1,19 +1,360 @@
1
- <!doctype html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
8
- </head>
9
- <body>
10
- <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
- <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
- <p>
14
- Also don't forget to check the
15
- <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
16
- </p>
17
- </div>
18
- </body>
19
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Emotion Detection Camera</title>
7
+ <script src="https://cdn.tailwindcss.com"></script>
8
+ <script src="https://cdn.jsdelivr.net/npm/face-api.js@0.22.2/dist/face-api.min.js"></script>
9
+ <style>
10
+ .camera-container {
11
+ position: relative;
12
+ width: 100%;
13
+ max-width: 640px;
14
+ margin: 0 auto;
15
+ border-radius: 12px;
16
+ overflow: hidden;
17
+ box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1);
18
+ }
19
+
20
+ .emotion-badge {
21
+ position: absolute;
22
+ padding: 4px 8px;
23
+ border-radius: 20px;
24
+ font-size: 12px;
25
+ font-weight: bold;
26
+ color: white;
27
+ background-color: rgba(0, 0, 0, 0.7);
28
+ transform: translate(-50%, -50%);
29
+ white-space: nowrap;
30
+ }
31
+
32
+ .loading-spinner {
33
+ border: 3px solid rgba(255, 255, 255, 0.3);
34
+ border-radius: 50%;
35
+ border-top: 3px solid #4f46e5;
36
+ width: 30px;
37
+ height: 30px;
38
+ animation: spin 1s linear infinite;
39
+ margin: 0 auto;
40
+ }
41
+
42
+ @keyframes spin {
43
+ 0% { transform: rotate(0deg); }
44
+ 100% { transform: rotate(360deg); }
45
+ }
46
+
47
+ .face-box {
48
+ position: absolute;
49
+ border: 2px solid #4f46e5;
50
+ border-radius: 4px;
51
+ background-color: rgba(79, 70, 229, 0.1);
52
+ }
53
+ </style>
54
+ </head>
55
+ <body class="bg-gray-100 min-h-screen">
56
+ <div class="container mx-auto px-4 py-8">
57
+ <div class="text-center mb-8">
58
+ <h1 class="text-3xl md:text-4xl font-bold text-indigo-700 mb-2">Emotion Detection Camera</h1>
59
+ <p class="text-gray-600 max-w-lg mx-auto">Take a photo and our AI will detect your facial expressions and emotions in real-time.</p>
60
+ </div>
61
+
62
+ <div class="bg-white rounded-xl shadow-lg p-6 max-w-4xl mx-auto">
63
+ <div id="status" class="text-center py-4 hidden">
64
+ <div class="loading-spinner"></div>
65
+ <p class="mt-2 text-gray-600">Loading models...</p>
66
+ </div>
67
+
68
+ <div class="camera-container bg-gray-200 aspect-video relative" id="cameraView">
69
+ <video id="video" autoplay muted playsinline class="w-full h-full object-cover"></video>
70
+ <canvas id="canvas" class="absolute top-0 left-0 w-full h-full hidden"></canvas>
71
+ </div>
72
+
73
+ <div class="flex flex-wrap justify-center gap-4 mt-6">
74
+ <button id="captureBtn" class="bg-indigo-600 hover:bg-indigo-700 text-white px-6 py-3 rounded-lg font-medium flex items-center gap-2 transition-all">
75
+ <svg xmlns="http://www.w3.org/2000/svg" class="h-5 w-5" viewBox="0 0 20 20" fill="currentColor">
76
+ <path fill-rule="evenodd" d="M4 5a2 2 0 00-2 2v8a2 2 0 002 2h12a2 2 0 002-2V7a2 2 0 00-2-2h-1.586a1 1 0 01-.707-.293l-1.121-1.121A2 2 0 0011.172 3H8.828a2 2 0 00-1.414.586L6.293 4.707A1 1 0 015.586 5H4zm6 9a3 3 0 100-6 3 3 0 000 6z" clip-rule="evenodd" />
77
+ </svg>
78
+ Capture Photo
79
+ </button>
80
+
81
+ <button id="switchCameraBtn" class="bg-gray-200 hover:bg-gray-300 text-gray-800 px-6 py-3 rounded-lg font-medium flex items-center gap-2 transition-all">
82
+ <svg xmlns="http://www.w3.org/2000/svg" class="h-5 w-5" viewBox="0 0 20 20" fill="currentColor">
83
+ <path d="M2 6a2 2 0 012-2h6a2 2 0 012 2v8a2 2 0 01-2 2H4a2 2 0 01-2-2V6zM14 6a2 2 0 012-2h2a2 2 0 012 2v8a2 2 0 01-2 2h-2a2 2 0 01-2-2V6z" />
84
+ </svg>
85
+ Switch Camera
86
+ </button>
87
+
88
+ <button id="resetBtn" class="bg-gray-200 hover:bg-gray-300 text-gray-800 px-6 py-3 rounded-lg font-medium flex items-center gap-2 transition-all hidden">
89
+ <svg xmlns="http://www.w3.org/2000/svg" class="h-5 w-5" viewBox="0 0 20 20" fill="currentColor">
90
+ <path fill-rule="evenodd" d="M4 2a1 1 0 011 1v2.101a7.002 7.002 0 0111.601 2.566 1 1 0 11-1.885.666A5.002 5.002 0 005.999 7H9a1 1 0 010 2H4a1 1 0 01-1-1V3a1 1 0 011-1zm.008 9.057a1 1 0 011.276.61A5.002 5.002 0 0014.001 13H11a1 1 0 110-2h5a1 1 0 011 1v5a1 1 0 11-2 0v-2.101a7.002 7.002 0 01-11.601-2.566 1 1 0 01.61-1.276z" clip-rule="evenodd" />
91
+ </svg>
92
+ Reset
93
+ </button>
94
+ </div>
95
+
96
+ <div id="results" class="mt-8 hidden">
97
+ <h3 class="text-xl font-semibold text-gray-800 mb-4">Emotion Analysis</h3>
98
+ <div class="grid grid-cols-1 md:grid-cols-2 gap-6">
99
+ <div>
100
+ <h4 class="font-medium text-gray-700 mb-2">Primary Emotion</h4>
101
+ <div id="primaryEmotion" class="bg-indigo-50 text-indigo-800 px-4 py-3 rounded-lg font-medium"></div>
102
+ </div>
103
+ <div>
104
+ <h4 class="font-medium text-gray-700 mb-2">Confidence Level</h4>
105
+ <div id="confidenceLevel" class="bg-indigo-50 text-indigo-800 px-4 py-3 rounded-lg font-medium"></div>
106
+ </div>
107
+ </div>
108
+
109
+ <div class="mt-6">
110
+ <h4 class="font-medium text-gray-700 mb-2">Detailed Breakdown</h4>
111
+ <div id="emotionDetails" class="space-y-2"></div>
112
+ </div>
113
+ </div>
114
+ </div>
115
+
116
+ <div class="mt-8 text-center text-gray-500 text-sm">
117
+ <p>This app uses face-api.js for emotion detection. All processing happens in your browser.</p>
118
+ </div>
119
+ </div>
120
+
121
+ <script>
122
+ // DOM Elements
123
+ const video = document.getElementById('video');
124
+ const canvas = document.getElementById('canvas');
125
+ const captureBtn = document.getElementById('captureBtn');
126
+ const switchCameraBtn = document.getElementById('switchCameraBtn');
127
+ const resetBtn = document.getElementById('resetBtn');
128
+ const statusElement = document.getElementById('status');
129
+ const resultsElement = document.getElementById('results');
130
+ const primaryEmotionElement = document.getElementById('primaryEmotion');
131
+ const confidenceLevelElement = document.getElementById('confidenceLevel');
132
+ const emotionDetailsElement = document.getElementById('emotionDetails');
133
+
134
+ let stream = null;
135
+ let currentFacingMode = 'user'; // 'user' for front camera, 'environment' for back
136
+ let isCaptured = false;
137
+
138
+ // Load models and start camera
139
+ async function init() {
140
+ statusElement.classList.remove('hidden');
141
+
142
+ try {
143
+ await Promise.all([
144
+ faceapi.nets.tinyFaceDetector.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models'),
145
+ faceapi.nets.faceLandmark68Net.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models'),
146
+ faceapi.nets.faceRecognitionNet.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models'),
147
+ faceapi.nets.faceExpressionNet.loadFromUri('https://justadudewhohacks.github.io/face-api.js/models')
148
+ ]);
149
+
150
+ statusElement.classList.add('hidden');
151
+ startCamera();
152
+ } catch (error) {
153
+ statusElement.innerHTML = `
154
+ <div class="bg-red-50 text-red-600 p-4 rounded-lg">
155
+ <p>Failed to load models. Please refresh the page or check your internet connection.</p>
156
+ </div>
157
+ `;
158
+ console.error(error);
159
+ }
160
+ }
161
+
162
+ // Start camera with selected facing mode
163
+ async function startCamera() {
164
+ if (stream) {
165
+ stream.getTracks().forEach(track => track.stop());
166
+ }
167
+
168
+ try {
169
+ stream = await navigator.mediaDevices.getUserMedia({
170
+ video: {
171
+ facingMode: currentFacingMode,
172
+ width: { ideal: 1280 },
173
+ height: { ideal: 720 }
174
+ },
175
+ audio: false
176
+ });
177
+
178
+ video.srcObject = stream;
179
+ video.onloadedmetadata = () => {
180
+ video.play();
181
+ if (!isCaptured) {
182
+ detectEmotions();
183
+ }
184
+ };
185
+ } catch (error) {
186
+ console.error("Camera error:", error);
187
+ alert("Could not access the camera. Please make sure you've granted camera permissions.");
188
+ }
189
+ }
190
+
191
+ // Detect emotions in real-time
192
+ async function detectEmotions() {
193
+ if (isCaptured) return;
194
+
195
+ const detection = await faceapi.detectSingleFace(video, new faceapi.TinyFaceDetectorOptions())
196
+ .withFaceLandmarks()
197
+ .withFaceExpressions();
198
+
199
+ // Clear previous drawings
200
+ const canvas = faceapi.createCanvasFromMedia(video);
201
+ const context = canvas.getContext('2d');
202
+ context.clearRect(0, 0, canvas.width, canvas.height);
203
+
204
+ // Resize canvas to match video dimensions
205
+ faceapi.matchDimensions(canvas, {
206
+ width: video.clientWidth,
207
+ height: video.clientHeight
208
+ });
209
+
210
+ if (detection) {
211
+ // Draw face detection box
212
+ const resizedDetections = faceapi.resizeResults(detection, {
213
+ width: video.clientWidth,
214
+ height: video.clientHeight
215
+ });
216
+
217
+ // Draw face box
218
+ const box = resizedDetections.detection.box;
219
+ const drawBox = new faceapi.draw.DrawBox(box, {
220
+ label: `Confidence: ${Math.round(detection.detection.score * 100)}%`
221
+ });
222
+ drawBox.draw(canvas);
223
+
224
+ // Draw face landmarks
225
+ faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
226
+
227
+ // Get dominant emotion
228
+ const expressions = detection.expressions;
229
+ const primaryEmotion = Object.entries(expressions).reduce((a, b) => a[1] > b[1] ? a : b);
230
+
231
+ // Display emotion badges
232
+ Object.entries(expressions).forEach(([emotion, value]) => {
233
+ if (value > 0.1) { // Only show emotions with > 10% confidence
234
+ const landmarkPoint = resizedDetections.landmarks.getNose()[3];
235
+ const position = {
236
+ x: landmarkPoint.x + (Math.random() * 40 - 20),
237
+ y: landmarkPoint.y - 50 - (Math.random() * 30)
238
+ };
239
+
240
+ const badge = document.createElement('div');
241
+ badge.className = 'emotion-badge';
242
+ badge.style.left = `${position.x}px`;
243
+ badge.style.top = `${position.y}px`;
244
+ badge.textContent = `${emotion}: ${Math.round(value * 100)}%`;
245
+ document.getElementById('cameraView').appendChild(badge);
246
+
247
+ // Remove badge after animation
248
+ setTimeout(() => {
249
+ badge.remove();
250
+ }, 1000);
251
+ }
252
+ });
253
+ }
254
+
255
+ // Schedule next detection
256
+ setTimeout(() => detectEmotions(), 300);
257
+ }
258
+
259
+ // Capture photo and analyze emotions
260
+ captureBtn.addEventListener('click', async () => {
261
+ if (isCaptured) return;
262
+
263
+ isCaptured = true;
264
+ captureBtn.disabled = true;
265
+
266
+ // Create canvas from video
267
+ canvas.width = video.clientWidth;
268
+ canvas.height = video.clientHeight;
269
+ const context = canvas.getContext('2d');
270
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
271
+
272
+ // Hide video and show canvas
273
+ video.classList.add('hidden');
274
+ canvas.classList.remove('hidden');
275
+
276
+ // Analyze the captured image
277
+ const detection = await faceapi.detectSingleFace(canvas, new faceapi.TinyFaceDetectorOptions())
278
+ .withFaceLandmarks()
279
+ .withFaceExpressions();
280
+
281
+ if (detection) {
282
+ const expressions = detection.expressions;
283
+ const primaryEmotion = Object.entries(expressions).reduce((a, b) => a[1] > b[1] ? a : b);
284
+
285
+ // Display results
286
+ primaryEmotionElement.textContent = `${capitalizeFirstLetter(primaryEmotion[0])} (${Math.round(primaryEmotion[1] * 100)}%)`;
287
+ confidenceLevelElement.textContent = getConfidenceLevel(primaryEmotion[1]);
288
+
289
+ // Create detailed breakdown
290
+ emotionDetailsElement.innerHTML = '';
291
+ Object.entries(expressions)
292
+ .sort((a, b) => b[1] - a[1])
293
+ .forEach(([emotion, value]) => {
294
+ const percentage = Math.round(value * 100);
295
+ const emotionItem = document.createElement('div');
296
+ emotionItem.className = 'bg-gray-50 p-3 rounded-lg';
297
+ emotionItem.innerHTML = `
298
+ <div class="flex justify-between items-center">
299
+ <span class="font-medium">${capitalizeFirstLetter(emotion)}</span>
300
+ <span class="text-gray-600">${percentage}%</span>
301
+ </div>
302
+ <div class="w-full bg-gray-200 rounded-full h-2 mt-2">
303
+ <div class="bg-indigo-500 h-2 rounded-full" style="width: ${percentage}%"></div>
304
+ </div>
305
+ `;
306
+ emotionDetailsElement.appendChild(emotionItem);
307
+ });
308
+
309
+ resultsElement.classList.remove('hidden');
310
+ resetBtn.classList.remove('hidden');
311
+ } else {
312
+ alert("No face detected in the captured image. Please try again.");
313
+ resetApp();
314
+ }
315
+
316
+ captureBtn.disabled = false;
317
+ });
318
+
319
+ // Switch between front and back camera
320
+ switchCameraBtn.addEventListener('click', () => {
321
+ currentFacingMode = currentFacingMode === 'user' ? 'environment' : 'user';
322
+ startCamera();
323
+ });
324
+
325
+ // Reset the app to initial state
326
+ resetBtn.addEventListener('click', resetApp);
327
+
328
+ function resetApp() {
329
+ isCaptured = false;
330
+ video.classList.remove('hidden');
331
+ canvas.classList.add('hidden');
332
+ resultsElement.classList.add('hidden');
333
+ resetBtn.classList.add('hidden');
334
+
335
+ // Clear canvas
336
+ const context = canvas.getContext('2d');
337
+ context.clearRect(0, 0, canvas.width, canvas.height);
338
+
339
+ // Restart emotion detection
340
+ detectEmotions();
341
+ }
342
+
343
+ // Helper functions
344
+ function capitalizeFirstLetter(string) {
345
+ return string.charAt(0).toUpperCase() + string.slice(1);
346
+ }
347
+
348
+ function getConfidenceLevel(confidence) {
349
+ if (confidence > 0.8) return "Very High";
350
+ if (confidence > 0.6) return "High";
351
+ if (confidence > 0.4) return "Moderate";
352
+ if (confidence > 0.2) return "Low";
353
+ return "Very Low";
354
+ }
355
+
356
+ // Initialize the app
357
+ document.addEventListener('DOMContentLoaded', init);
358
+ </script>
359
+ <p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=manmohanai/faceimotion" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
360
+ </html>