kamcio1989's picture
Create app.py
9ed72dc verified
I'll create a comprehensive face recognition platform using transformers.js that can detect faces, extract facial
features, and identify people from a database of known faces.
=== index.html ===
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Face Recognition Platform</title>
<script src="https://cdn.tailwindcss.com"></script>
<link rel="stylesheet" href="styles.css">
</head>
<body class="bg-gradient-to-br from-gray-900 to-gray-800 text-white min-h-screen">
<!-- Header -->
<header class="bg-gray-800/50 backdrop-blur-sm border-b border-gray-700">
<div class="container mx-auto px-4 py-4">
<div class="flex items-center justify-between">
<div class="flex items-center space-x-3">
<svg class="w-8 h-8 text-blue-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M17 20h5v-2a3 3 0 00-5.356-1.857M17 20H7m10 0v-2c0-.656-.126-1.283-.356-1.857M7 20H2v-2a3 3 0 015.356-1.857M7 20v-2c0-.656.126-1.283.356-1.857m0 0a5.002 5.002 0 019.288 0M15 7a3 3 0 11-6 0 3 3 0 016 0zm6 3a2 2 0 11-4 0 2 2 0 014 0zM7 10a2 2 0 11-4 0 2 2 0 014 0z">
</path>
</svg>
<h1 class="text-2xl font-bold">Face Recognition Platform</h1>
</div>
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank"
class="text-blue-400 hover:text-blue-300 transition-colors">
Built with anycoder
</a>
</div>
</div>
</header>
<!-- Main Content -->
<main class="container mx-auto px-4 py-8">
<!-- Model Loading Status -->
<div id="loadingStatus" class="bg-gray-800/50 backdrop-blur-sm rounded-lg p-6 mb-6 border border-gray-700">
<div class="flex items-center space-x-3">
<div class="animate-spin rounded-full h-6 w-6 border-b-2 border-blue-500"></div>
<span id="loadingText">Loading AI models...</span>
<div class="flex-1 bg-gray-700 rounded-full h-2">
<div id="progressBar" class="bg-blue-500 h-2 rounded-full transition-all duration-300" style="width: 0%">
</div>
</div>
<span id="progressText" class="text-sm text-gray-400">0%</span>
</div>
</div>
<!-- Tab Navigation -->
<div class="bg-gray-800/50 backdrop-blur-sm rounded-lg p-1 mb-6 border border-gray-700">
<div class="flex space-x-1">
<button onclick="switchTab('recognize')" id="recognizeTab" class="tab-btn flex-1 py-2 px-4 rounded-md bg-blue-600 text-white transition-all">
Recognize Faces
</button>
<button onclick="switchTab('manage')" id="manageTab" class="tab-btn flex-1 py-2 px-4 rounded-md text-gray-300 hover:bg-gray-700 transition-all">
Manage Database
</button>
</div>
</div>
<!-- Recognize Tab -->
<div id="recognizePanel" class="tab-panel">
<div class="grid md:grid-cols-2 gap-6">
<!-- Upload Section -->
<div class="bg-gray-800/50 backdrop-blur-sm rounded-lg p-6 border border-gray-700">
<h2 class="text-xl font-semibold mb-4 flex items-center">
<svg class="w-5 h-5 mr-2 text-blue-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M7 16a4 4 0 01-.88-7.903A5 5 0 1115.9 6L16 6a5 5 0 011 9.9M15 13l-3-3m0 0l-3 3m3-3v12"></path>
</svg>
Upload Image
</h2>
<div
class="border-2 border-dashed border-gray-600 rounded-lg p-8 text-center hover:border-blue-500 transition-colors cursor-pointer"
onclick="document.getElementById('imageInput').click()">
<svg class="w-12 h-12 mx-auto text-gray-400 mb-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M4 16l4.586-4.586a2 2 0 012.828 0L16 16m-2-2l1.586-1.586a2 2 0 012.828 0L20 14m-6-6h.01M6 20h12a2 2 0 002-2V6a2 2 0 00-2-2H6a2 2 0 00-2 2v12a2 2 0 002 2z">
</path>
</svg>
<p class="text-gray-400">Click to upload or drag and drop</p>
<p class="text-sm text-gray-500 mt-2">PNG, JPG, GIF up to 10MB</p>
<input type="file" id="imageInput" accept="image/*" class="hidden" onchange="handleImageUpload(event)">
</div>
<!-- Preview -->
<div id="imagePreview" class="mt-4 hidden">
<img id="previewImg" class="w-full rounded-lg">
<button onclick="recognizeFaces()" class="mt-4 w-full bg-blue-600 hover:bg-blue-700 text-white py-2 px-4 rounded-lg transition-colors">
Recognize Faces
</button>
</div>
</div>
<!-- Results Section -->
<div class="bg-gray-800/50 backdrop-blur-sm rounded-lg p-6 border border-gray-700">
<h2 class="text-xl font-semibold mb-4 flex items-center">
<svg class="w-5 h-5 mr-2 text-green-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z"></path>
</svg>
Recognition Results
</h2>
<div id="resultsContainer" class="space-y-3">
<div class="text-center text-gray-400 py-8">
<svg class="w-16 h-16 mx-auto mb-4 text-gray-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M15 12a3 3 0 11-6 0 3 3 0 016 0z"></path>
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M2.458 12C3.732 7.943 7.523 5 12 5c4.478 0 8.268 2.943 9.542 7-1.274 4.057-5.064 7-9.542 7-4.477 0-8.268-2.943-9.542-7z">
</path>
</svg>
<p>Upload an image to recognize faces</p>
</div>
</div>
</div>
</div>
</div>
<!-- Manage Tab -->
<div id="managePanel" class="tab-panel hidden">
<div class="grid md:grid-cols-2 gap-6">
<!-- Add New Person -->
<div class="bg-gray-800/50 backdrop-blur-sm rounded-lg p-6 border border-gray-700">
<h2 class="text-xl font-semibold mb-4 flex items-center">
<svg class="w-5 h-5 mr-2 text-green-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M18 9v3m0 0v3m0-3h3m-3 0h-3m-2-5a4 4 0 11-8 0 4 4 0 018 0zM3 20a6 6 0 0112 0v1H3v-1z"></path>
</svg>
Add New Person
</h2>
<div class="space-y-4">
<div>
<label class="block text-sm font-medium text-gray-300 mb-2">Name</label>
<input type="text" id="personName" class="w-full bg-gray-700 border border-gray-600 rounded-lg px-4 py-2 text-white focus:outline-none focus:border-blue-500" placeholder="Enter person's name">
</div>
<div>
<label class="block text-sm font-medium text-gray-300 mb-2">Photo</label>
<div
class="border-2 border-dashed border-gray-600 rounded-lg p-4 text-center hover:border-blue-500 transition-colors cursor-pointer"
onclick="document.getElementById('personPhoto').click()">
<svg class="w-8 h-8 mx-auto text-gray-400 mb-2" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 6v6m0 0v6m0-6h6m-6 0H6">
</path>
</svg>
<p class="text-sm text-gray-400">Click to select photo</p>
<input type="file" id="personPhoto" accept="image/*" class="hidden" onchange="handlePersonPhoto(event)">
</div>
<img id="personPhotoPreview" class="w-full rounded-lg mt-2 hidden">
</div>
<button onclick="addPerson()" class="w-full bg-green-600 hover:bg-green-700 text-white py-2 px-4 rounded-lg transition-colors">
Add to Database
</button>
</div>
</div>
<!-- Database List -->
<div class="bg-gray-800/50 backdrop-blur-sm rounded-lg p-6 border border-gray-700">
<h2 class="text-xl font-semibold mb-4 flex items-center">
<svg class="w-5 h-5 mr-2 text-blue-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M19 11H5m14 0a2 2 0 012 2v6a2 2 0 01-2 2H5a2 2 0 01-2-2v-6a2 2 0 012-2m14 0V9a2 2 0 00-2-2M5 11V9a2 2 0 012-2m0 0V5a2 2 0 012-2h6a2 2 0 012 2v2M7 7h10">
</path>
</svg>
Face Database
</h2>
<div id="databaseList" class="space-y-2 max-h-96 overflow-y-auto">
<div class="text-center text-gray-400 py-8">
<svg class="w-16 h-16 mx-auto mb-4 text-gray-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M20 13V6a2 2 0 00-2-2H6a2 2 0 00-2 2v7m16 0v5a2 2 0 01-2 2H6a2 2 0 01-2-2v-5m16 0h-2.586a1 1 0 00-.707.293l-2.414 2.414a1 1 0 01-.707.293h-3.172a1 1 0 01-.707-.293l-2.414-2.414A1 1 0 006.586 13H4">
</path>
</svg>
<p>No faces in database</p>
<p class="text-sm text-gray-500 mt-2">Add people to start recognizing faces</p>
</div>
</div>
</div>
</div>
</div>
</main>
<script src="https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.0.1/dist/transformers.min.js"></script>
<script src="app.js"></script>
</body>
</html>
=== app.js ===
// Global variables
let faceDetector = null;
let faceFeatureExtractor = null;
let faceDatabase = [];
let currentImage = null;
// Initialize application
document.addEventListener('DOMContentLoaded', async () => {
await loadModels();
loadDatabase();
});
// Load AI models using Web Worker
async function loadModels() {
const worker = new Worker('worker.js');
worker.onmessage = (event) => {
const { type, data } = event.data;
switch(type) {
case 'progress':
updateLoadingProgress(data.progress, data.message);
break;
case 'modelsLoaded':
faceDetector = data.faceDetector;
faceFeatureExtractor = data.faceFeatureExtractor;
document.getElementById('loadingStatus').classList.add('hidden');
break;
case 'error':
showError(data.message);
break;
}
};
worker.postMessage({ type: 'loadModels' });
}
// Update loading progress
function updateLoadingProgress(progress, message) {
document.getElementById('progressBar').style.width = `${progress}%`;
document.getElementById('progressText').textContent = `${progress}%`;
document.getElementById('loadingText').textContent = message;
}
// Switch between tabs
function switchTab(tab) {
const tabs = document.querySelectorAll('.tab-btn');
const panels = document.querySelectorAll('.tab-panel');
tabs.forEach(t => t.classList.remove('bg-blue-600', 'text-white'));
tabs.forEach(t => t.classList.add('text-gray-300', 'hover:bg-gray-700'));
panels.forEach(p => p.classList.add('hidden'));
if (tab === 'recognize') {
document.getElementById('recognizeTab').classList.add('bg-blue-600', 'text-white');
document.getElementById('recognizeTab').classList.remove('text-gray-300', 'hover:bg-gray-700');
document.getElementById('recognizePanel').classList.remove('hidden');
} else {
document.getElementById('manageTab').classList.add('bg-blue-600', 'text-white');
document.getElementById('manageTab').classList.remove('text-gray-300', 'hover:bg-gray-700');
document.getElementById('managePanel').classList.remove('hidden');
}
}
// Handle image upload for recognition
function handleImageUpload(event) {
const file = event.target.files[0];
if (file && file.type.startsWith('image/')) {
const reader = new FileReader();
reader.onload = (e) => {
currentImage = e.target.result;
document.getElementById('previewImg').src = currentImage;
document.getElementById('imagePreview').classList.remove('hidden');
};
reader.readAsDataURL(file);
}
}
// Handle person photo upload
function handlePersonPhoto(event) {
const file = event.target.files[0];
if (file && file.type.startsWith('image/')) {
const reader = new FileReader();
reader.onload = (e) => {
document.getElementById('personPhotoPreview').src = e.target.result;
document.getElementById('personPhotoPreview').classList.remove('hidden');
};
reader.readAsDataURL(file);
}
}
// Recognize faces in the uploaded image
async function recognizeFaces() {
if (!currentImage || !faceDetector || !faceFeatureExtractor) {
showError('Please upload an image and wait for models to load');
return;
}
const resultsContainer = document.getElementById('resultsContainer');
resultsContainer.innerHTML = '<div class="text-center py-4">
<div class="animate-spin rounded-full h-8 w-8 border-b-2 border-blue-500 mx-auto"></div>
<p class="mt-2 text-gray-400">Processing image...</p>
</div>';
try {
// Create image element from source
const img = new Image();
img.src = currentImage;
await new Promise(resolve => img.onload = resolve);
// Detect faces
const detections = await faceDetector(img);
if (detections.length === 0) {
resultsContainer.innerHTML = '<div class="text-center py-8 text-gray-400">
<p>No faces detected in the image</p>
</div>';
return;
}
// Extract features and match with database
const results = [];
for (const detection of detections) {
const face = await extractFaceFeatures(img, detection);
const matches = await matchFace(face);
if (matches.length > 0) {
results.push({
...detection,
match: matches[0],
confidence: matches[0].similarity
});
} else {
results.push({
...detection,
match: null,
confidence: 0
});
}
}
displayResults(results);
} catch (error) {
console.error('Error recognizing faces:', error);
showError('Failed to recognize faces. Please try again.');
}
}
// Extract face features
async function extractFaceFeatures(image, detection) {
// Create canvas to crop face
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
// Get face bounding box
const box = detection.box;
canvas.width = box[2] - box[0];
canvas.height = box[3] - box[1];
// Draw cropped face
ctx.drawImage(image, box[0], box[1], canvas.width, canvas.height, 0, 0, canvas.width, canvas.height);
// Extract features using the feature extractor
const features = await faceFeatureExtractor(canvas.toDataURL());
return features;
}
// Match face with database
async function matchFace(faceFeatures) {
if (!faceDatabase || faceDatabase.length === 0) {
return [];
}
const matches = [];
for (const person of faceDatabase) {
// Calculate cosine similarity
const similarity = cosineSimilarity(faceFeatures, person.features);
if (similarity > 0.6) { // Threshold for matching
matches.push({
name: person.name,
similarity: similarity
});
}
}
// Sort by similarity
matches.sort((a, b) => b.similarity - a.similarity);
return matches;
}
// Calculate cosine similarity between two feature vectors
function cosineSimilarity(vecA, vecB) {
if (!vecA || !vecB || vecA.length !== vecB.length) return 0;
let dotProduct = 0;
let normA = 0;
let normB = 0;
for (let i = 0; i < vecA.length; i++) { dotProduct +=vecA[i] * vecB[i]; normA +=vecA[i] * vecA[i]; normB +=vecB[i] *
vecB[i]; } normA=Math.sqrt(normA); normB=Math.sqrt(normB); if (normA===0 || normB===0) return 0; return dotProduct /
(normA * normB); } // Display recognition results function displayResults(results) { const
resultsContainer=document.getElementById('resultsContainer'); if (results.length===0) {
resultsContainer.innerHTML='<div class="text-center py-8 text-gray-400"><p>No faces detected</p></div>' ; return; }
let html='' ; results.forEach((result, index)=> {
const confidence = Math.round(result.confidence * 100);
const statusColor = result.match ? 'text-green-400' : 'text-yellow-400';
const statusText = result.match ? `Recognized as ${result.match.name}` : 'Unknown person';
html += `
<div class="bg-gray-700/50 rounded-lg p-4 border border-gray-600">
<div class="flex items-center justify-between">
<div class="flex items-center space-x-3">
<div class="bg-gray-600 rounded-full p-2">
<svg class="w-6 h-6 text-gray-300" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M16 7a4 4 0 11-8 0 4 4 0 018 0zM12 14a7 7 0 00-7 7h14a7 7 0 00-7-7z"></path>
</svg>
</div>
<div>
<p class="font-medium ${statusColor}">${statusText}</p>
<p class="text-sm text-gray-400">Confidence: ${confidence}%</p>
</div>
</div>
<div class="text-sm text-gray-500">
Face ${index + 1}
</div>
</div>
</div>
`;
});
resultsContainer.innerHTML = html;
}
// Add new person to database
async function addPerson() {
const name = document.getElementById('personName').value.trim();
const photoPreview = document.getElementById('personPhotoPreview');
if (!name) {
showError('Please enter a name');
return;
}
if (photoPreview.classList.contains('hidden')) {
showError('Please select a photo');
return;
}
if (!faceFeatureExtractor) {
showError('Feature extractor not loaded');
return;
}
try {
// Extract features from the photo
const img = new Image();
img.src = photoPreview.src;
await new Promise(resolve => img.onload = resolve);
// Detect face first
const detections = await faceDetector(img);
if (detections.length === 0) {
showError('No face detected in the photo');
return;
}
// Extract features
const features = await extractFaceFeatures(img, detections[0]);
// Add to database
const person = {
id: Date.now(),
name: name,
features: features,
photo: photoPreview.src
};
faceDatabase.push(person);
saveDatabase();
updateDatabaseList();
// Clear form
document.getElementById('personName').value = '';
photoPreview.classList.add('hidden');
document.getElementById('personPhoto').value = '';
showSuccess(`${name} added to database successfully!`);
} catch (error) {
console.error('Error adding person:', error);
showError('Failed to add person. Please try again.');
}
}
// Load database from localStorage
function loadDatabase() {
const saved = localStorage.getItem('faceDatabase');
if (saved) {
faceDatabase = JSON.parse(saved);
updateDatabaseList();
}
}
// Save database to localStorage
function saveDatabase() {
localStorage.setItem('faceDatabase', JSON.stringify(faceDatabase));
}
// Update database list display
function updateDatabaseList() {
const listContainer = document.getElementById('databaseList');
if (faceDatabase.length === 0) {
listContainer.innerHTML = `
<div class="text-center text-gray-400 py-8">
<svg class="w-16 h-16 mx-auto mb-4 text-gray-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M20 13V6a2 2 0 00-2-2H6a2 2 0 00-2 2v7m16 0v5a2 2 0 01-2 2H6a2 2 0 01-2-2v-5m16 0h-2.586a1 1 0 00-.707.293l-2.414 2.414a1 1 0 01-.707.293h-3.172a1 1 0 01-.707-.293l-2.414-2.414A1 1 0 006.586 13H4">
</path>
</svg>
<p>No faces in database</p>
<p class="text-sm text-gray-500 mt-2">Add people to start recognizing faces</p>
</div>
`;
return;
}
let html = '';
faceDatabase.forEach(person => {
html += `
<div class="bg-gray-700/50 rounded-lg p-3 border border-gray-600 flex items-center justify-between">
<div class="flex items-center space-x-3">
<img src="${person.photo}" alt="${person.name}" class="w-12 h-12 rounded-full object-cover">
<div>
<p class="font-medium">${person.name}</p>
<p class="text-sm text-gray-400">ID: ${person.id}</p>
</div>
</div>
<button onclick="removePerson(${person.id})" class="text-red-400 hover:text-red-300 transition-colors">
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 7l-.867 12.142A2 2 0 0116.138 21H7.862a2 2 0 01-1.995-1.858L5 7m5 4v6m4-6v6m1-10V4a1 1 0 00-1-1h-4a1 1 0 00-1 1v3M4 7h16"></path>
</svg>
</button>
</div>
`;
});
listContainer.innerHTML = html;
}
// Remove person from database
function removePerson(id) {
if (confirm('Are you sure you want to remove this person from the database?')) {
faceDatabase = faceDatabase.filter(person => person.id !== id);
saveDatabase();
updateDatabaseList();
showSuccess('Person removed from database');
}
}
// Show error message
function showError(message) {
const toast = document.createElement('div');
toast.className = 'fixed top-4 right-4 bg-red-600 text-white px-6 py-3 rounded-lg shadow-lg z-50 animate-pulse';
toast.innerHTML = `
<div class="flex items-center space-x-2">
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M12 8v4m0 4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z"></path>
</svg>
<span>${message}</span>
</div>
`;
document.body.appendChild(toast);
setTimeout(() => toast.remove(), 3000);
}
// Show success message
function showSuccess(message) {
const toast = document.createElement('div');
toast.className = 'fixed top-4 right-4 bg-green-600 text-white px-6 py-3 rounded-lg shadow-lg z-50 animate-pulse';
toast.innerHTML = `
<div class="flex items-center space-x-2">
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2"
d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z"></path>
</svg>
<span>${message}</span>
</div>
`;
document.body.appendChild(toast);
setTimeout(() => toast.remove(), 3000);
}
// Drag and drop functionality
document.addEventListener('DOMContentLoaded', () => {
const dropZone = document.querySelector('.border-dashed');
['dragenter', 'dragover', 'dragleave', 'drop'].forEach(eventName => {
dropZone.addEventListener(eventName, preventDefaults, false);
});
function preventDefaults(e) {
e.preventDefault();
e.stopPropagation();
}
['dragenter', 'dragover'].forEach(eventName => {
dropZone.addEventListener(eventName, () => {
dropZone.classList.add('border-blue-500');
});
});
['dragleave', 'drop'].forEach(eventName => {
dropZone.addEventListener(eventName, () => {
dropZone.classList.remove('border-blue-500');
});
});
dropZone.addEventListener('drop', handleDrop);
function handleDrop(e) {
const dt = e.dataTransfer;
const files = dt.files;
if (files.length > 0) {
document.getElementById('imageInput').files = files;
handleImageUpload({ target: { files: files } });
}
}
});
=== worker.js ===
// Web Worker for loading models in the background
self.importScripts('https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.0.1/dist/transformers.min.js');
async function loadModels() {
try {
// Load face detection model
self.postMessage({ type: 'progress', data: { progress: 20, message: 'Loading face detection model...' } });
const faceDetector = await pipeline('face-detection', 'Xenova/retinaface-resnet50', {
progress_callback: (data) => {
if (data.status === 'downloading') {
const progress = Math.round(data.progress * 100);
self.postMessage({
type: 'progress',
data: {
progress: 20 + Math.floor(progress * 0.3),
message: `Downloading face detection model: ${progress}%`
}
});
}
}
});
// Load face feature extraction model
self.postMessage({ type: 'progress', data: { progress: 50, message: 'Loading face recognition model...' } });
const faceFeatureExtractor = await pipeline('image-feature-extraction', 'Xenova/face-recognition-model', {
progress_callback: (data) => {
if (data.status === 'downloading') {
const progress = Math.round(data.progress * 100);
self.postMessage({
type: 'progress',
data: {
progress: 50 + Math.floor(progress * 0.5),
message: `Downloading face recognition model: ${progress}%`
}
});
}
}
});
self.postMessage({ type: 'progress', data: { progress: 100, message: 'Models loaded successfully!' } });
self.postMessage({
type: 'modelsLoaded',
data: {
faceDetector: faceDetector,
faceFeatureExtractor: faceFeatureExtractor
}
});
} catch (error) {
console.error('Error loading models:', error);
self.postMessage({ type: 'error', data: { message: 'Failed to load models: ' + error.message } });
}
}
// Listen for messages from main thread
self.onmessage = (event) => {
const { type } = event.data;
switch(type) {
case 'loadModels':
loadModels();
break;
}
};