samiesam commited on
Commit
dc5cf56
·
verified ·
1 Parent(s): 48ad4ab

Add 3 files

Browse files
Files changed (3) hide show
  1. README.md +7 -5
  2. index.html +763 -19
  3. prompts.txt +0 -0
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- title: License Process Detection
3
- emoji: 📉
4
- colorFrom: red
5
- colorTo: green
6
  sdk: static
7
  pinned: false
 
 
8
  ---
9
 
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: license-process-detection
3
+ emoji: 🐳
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: static
7
  pinned: false
8
+ tags:
9
+ - deepsite
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
index.html CHANGED
@@ -1,19 +1,763 @@
1
- <!doctype html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8" />
5
- <meta name="viewport" content="width=device-width" />
6
- <title>My static Space</title>
7
- <link rel="stylesheet" href="style.css" />
8
- </head>
9
- <body>
10
- <div class="card">
11
- <h1>Welcome to your static Space!</h1>
12
- <p>You can modify this app directly by editing <i>index.html</i> in the Files and versions tab.</p>
13
- <p>
14
- Also don't forget to check the
15
- <a href="https://huggingface.co/docs/hub/spaces" target="_blank">Spaces documentation</a>.
16
- </p>
17
- </div>
18
- </body>
19
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>AI Vehicle Tracking System</title>
7
+ <script src="https://cdn.tailwindcss.com"></script>
8
+ <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@3.18.0/dist/tf.min.js"></script>
9
+ <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/coco-ssd@2.2.2/dist/coco-ssd.min.js"></script>
10
+ <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
11
+ <style>
12
+ .video-container {
13
+ position: relative;
14
+ width: 100%;
15
+ height: 0;
16
+ padding-bottom: 56.25%;
17
+ background-color: #1a202c;
18
+ border-radius: 0.5rem;
19
+ overflow: hidden;
20
+ }
21
+ .video-container video, .video-container canvas {
22
+ position: absolute;
23
+ top: 0;
24
+ left: 0;
25
+ width: 100%;
26
+ height: 100%;
27
+ object-fit: cover;
28
+ }
29
+ .plate-highlight {
30
+ position: absolute;
31
+ border: 2px solid #3B82F6;
32
+ background-color: rgba(59, 130, 246, 0.2);
33
+ z-index: 10;
34
+ }
35
+ .processing-overlay {
36
+ position: absolute;
37
+ top: 0;
38
+ left: 0;
39
+ width: 100%;
40
+ height: 100%;
41
+ background-color: rgba(0, 0, 0, 0.7);
42
+ display: flex;
43
+ justify-content: center;
44
+ align-items: center;
45
+ z-index: 20;
46
+ color: white;
47
+ font-size: 1.5rem;
48
+ border-radius: 0.5rem;
49
+ }
50
+ .pulse {
51
+ animation: pulse 2s infinite;
52
+ }
53
+ @keyframes pulse {
54
+ 0% { opacity: 0.6; }
55
+ 50% { opacity: 1; }
56
+ 100% { opacity: 0.6; }
57
+ }
58
+ .result-card {
59
+ transition: all 0.3s ease;
60
+ }
61
+ .result-card:hover {
62
+ transform: translateY(-5px);
63
+ box-shadow: 0 10px 25px -5px rgba(0, 0, 0, 0.1);
64
+ }
65
+ </style>
66
+ </head>
67
+ <body class="bg-gray-100 min-h-screen">
68
+ <div class="container mx-auto px-4 py-8">
69
+ <!-- Header -->
70
+ <header class="mb-8 text-center">
71
+ <h1 class="text-4xl font-bold text-blue-600 mb-2">
72
+ <i class="fas fa-car-alt mr-2"></i>AI Vehicle Tracking System
73
+ </h1>
74
+ <p class="text-gray-600">Real-time car tracking with license plate recognition powered by Ultralytics and Claude AI</p>
75
+ </header>
76
+
77
+ <!-- Main Content -->
78
+ <div class="grid grid-cols-1 lg:grid-cols-3 gap-8">
79
+ <!-- Video Feed Section -->
80
+ <div class="lg:col-span-2">
81
+ <div class="bg-white rounded-xl shadow-lg p-6">
82
+ <div class="flex justify-between items-center mb-4">
83
+ <h2 class="text-xl font-semibold text-gray-800">
84
+ <i class="fas fa-video mr-2 text-blue-500"></i>Live Camera Feed
85
+ </h2>
86
+ <div class="flex space-x-2">
87
+ <button id="startBtn" class="bg-green-500 hover:bg-green-600 text-white px-4 py-2 rounded-lg flex items-center">
88
+ <i class="fas fa-play mr-2"></i> Start
89
+ </button>
90
+ <button id="stopBtn" disabled class="bg-red-500 hover:bg-red-600 text-white px-4 py-2 rounded-lg flex items-center">
91
+ <i class="fas fa-stop mr-2"></i> Stop
92
+ </button>
93
+ <button id="uploadBtn" class="bg-blue-500 hover:bg-blue-600 text-white px-4 py-2 rounded-lg flex items-center">
94
+ <i class="fas fa-upload mr-2"></i> Upload
95
+ </button>
96
+ <input type="file" id="fileInput" accept="image/*,video/*" class="hidden">
97
+ </div>
98
+ </div>
99
+
100
+ <div class="video-container" id="videoContainer">
101
+ <video id="videoFeed" autoplay muted playsinline class="hidden"></video>
102
+ <canvas id="canvasOutput"></canvas>
103
+ <div id="processingOverlay" class="processing-overlay hidden">
104
+ <div class="text-center">
105
+ <i class="fas fa-cog fa-spin text-4xl mb-2 text-blue-400"></i>
106
+ <p class="pulse">Processing frame...</p>
107
+ </div>
108
+ </div>
109
+ </div>
110
+
111
+ <div class="mt-4 grid grid-cols-2 gap-4">
112
+ <div class="bg-gray-50 p-4 rounded-lg">
113
+ <h3 class="font-medium text-gray-700 mb-2">
114
+ <i class="fas fa-chart-line mr-2 text-blue-500"></i>Detection Stats
115
+ </h3>
116
+ <div class="space-y-2">
117
+ <div class="flex justify-between">
118
+ <span class="text-gray-600">Vehicles Detected:</span>
119
+ <span id="vehicleCount" class="font-semibold">0</span>
120
+ </div>
121
+ <div class="flex justify-between">
122
+ <span class="text-gray-600">License Plates:</span>
123
+ <span id="plateCount" class="font-semibold">0</span>
124
+ </div>
125
+ <div class="flex justify-between">
126
+ <span class="text-gray-600">Processing Time:</span>
127
+ <span id="processingTime" class="font-semibold">0ms</span>
128
+ </div>
129
+ </div>
130
+ </div>
131
+ <div class="bg-gray-50 p-4 rounded-lg">
132
+ <h3 class="font-medium text-gray-700 mb-2">
133
+ <i class="fas fa-sliders-h mr-2 text-blue-500"></i>Settings
134
+ </h3>
135
+ <div class="space-y-3">
136
+ <div>
137
+ <label class="block text-sm text-gray-600 mb-1">Confidence Threshold</label>
138
+ <input type="range" id="confidenceSlider" min="0.1" max="0.9" step="0.1" value="0.5" class="w-full">
139
+ <div class="flex justify-between text-xs text-gray-500">
140
+ <span>10%</span>
141
+ <span id="confidenceValue">50%</span>
142
+ <span>90%</span>
143
+ </div>
144
+ </div>
145
+ <div class="flex items-center">
146
+ <input type="checkbox" id="enableOCR" checked class="mr-2">
147
+ <label for="enableOCR" class="text-sm text-gray-600">Enable OCR Processing</label>
148
+ </div>
149
+ </div>
150
+ </div>
151
+ </div>
152
+ </div>
153
+ </div>
154
+
155
+ <!-- Results Section -->
156
+ <div class="lg:col-span-1">
157
+ <div class="bg-white rounded-xl shadow-lg p-6 h-full">
158
+ <div class="flex justify-between items-center mb-4">
159
+ <h2 class="text-xl font-semibold text-gray-800">
160
+ <i class="fas fa-clipboard-list mr-2 text-blue-500"></i>Detection Results
161
+ </h2>
162
+ <button id="clearResults" class="text-gray-500 hover:text-gray-700">
163
+ <i class="fas fa-trash-alt"></i>
164
+ </button>
165
+ </div>
166
+
167
+ <div id="resultsContainer" class="space-y-4 max-h-[calc(100vh-250px)] overflow-y-auto pr-2">
168
+ <div class="text-center py-10 text-gray-400" id="emptyResults">
169
+ <i class="fas fa-car-side text-4xl mb-3"></i>
170
+ <p>No detections yet. Start the camera or upload media to begin.</p>
171
+ </div>
172
+ </div>
173
+ </div>
174
+ </div>
175
+ </div>
176
+
177
+ <!-- API Status -->
178
+ <div class="mt-8 bg-white rounded-xl shadow-lg p-6">
179
+ <h2 class="text-xl font-semibold text-gray-800 mb-4">
180
+ <i class="fas fa-plug mr-2 text-blue-500"></i>API Status
181
+ </h2>
182
+ <div class="grid grid-cols-1 md:grid-cols-3 gap-4">
183
+ <div class="bg-gray-50 p-4 rounded-lg">
184
+ <div class="flex items-center mb-2">
185
+ <div id="ultralyticsStatus" class="w-3 h-3 rounded-full bg-gray-400 mr-2"></div>
186
+ <span class="font-medium">Ultralytics Model</span>
187
+ </div>
188
+ <p class="text-sm text-gray-600">Vehicle detection and license plate extraction</p>
189
+ </div>
190
+ <div class="bg-gray-50 p-4 rounded-lg">
191
+ <div class="flex items-center mb-2">
192
+ <div id="claudeStatus" class="w-3 h-3 rounded-full bg-gray-400 mr-2"></div>
193
+ <span class="font-medium">Claude API</span>
194
+ </div>
195
+ <p class="text-sm text-gray-600">OCR processing for license plates</p>
196
+ </div>
197
+ <div class="bg-gray-50 p-4 rounded-lg">
198
+ <div class="flex items-center mb-2">
199
+ <div id="systemStatus" class="w-3 h-3 rounded-full bg-gray-400 mr-2"></div>
200
+ <span class="font-medium">System Status</span>
201
+ </div>
202
+ <p class="text-sm text-gray-600" id="systemStatusText">Initializing...</p>
203
+ </div>
204
+ </div>
205
+ </div>
206
+ </div>
207
+
208
+ <!-- Result Card Template -->
209
+ <template id="resultCardTemplate">
210
+ <div class="result-card bg-gray-50 rounded-lg p-4 border border-gray-200">
211
+ <div class="flex justify-between items-start mb-2">
212
+ <div>
213
+ <span class="font-semibold text-blue-600 detection-type">Vehicle</span>
214
+ <span class="text-xs bg-blue-100 text-blue-800 px-2 py-1 rounded-full ml-2 confidence">85%</span>
215
+ </div>
216
+ <span class="text-xs text-gray-500 timestamp">12:34:56 PM</span>
217
+ </div>
218
+ <div class="flex mb-3">
219
+ <div class="w-16 h-16 bg-gray-200 rounded-md overflow-hidden mr-3 thumbnail-container">
220
+ <img src="" alt="Detection" class="w-full h-full object-cover thumbnail">
221
+ </div>
222
+ <div class="flex-1">
223
+ <div class="text-sm mb-1">
224
+ <span class="text-gray-600">Plate:</span>
225
+ <span class="font-medium ml-1 plate-number">Not detected</span>
226
+ </div>
227
+ <div class="text-sm">
228
+ <span class="text-gray-600">Make/Model:</span>
229
+ <span class="font-medium ml-1 vehicle-model">Unknown</span>
230
+ </div>
231
+ </div>
232
+ </div>
233
+ <div class="flex justify-end space-x-2">
234
+ <button class="text-xs bg-blue-50 text-blue-600 px-3 py-1 rounded hover:bg-blue-100 view-btn">
235
+ <i class="fas fa-search mr-1"></i> View
236
+ </button>
237
+ <button class="text-xs bg-gray-100 text-gray-600 px-3 py-1 rounded hover:bg-gray-200 export-btn">
238
+ <i class="fas fa-download mr-1"></i> Export
239
+ </button>
240
+ </div>
241
+ </div>
242
+ </template>
243
+
244
+ <!-- Modal for detailed view -->
245
+ <div id="detailModal" class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center z-50 hidden">
246
+ <div class="bg-white rounded-lg w-full max-w-2xl max-h-[90vh] overflow-auto">
247
+ <div class="p-4 border-b flex justify-between items-center">
248
+ <h3 class="text-lg font-semibold">Detection Details</h3>
249
+ <button id="closeModal" class="text-gray-500 hover:text-gray-700">
250
+ <i class="fas fa-times"></i>
251
+ </button>
252
+ </div>
253
+ <div class="p-6">
254
+ <div class="grid grid-cols-1 md:grid-cols-2 gap-6">
255
+ <div>
256
+ <h4 class="font-medium text-gray-700 mb-2">Detection Image</h4>
257
+ <img id="modalImage" src="" alt="Detection" class="w-full rounded-lg border border-gray-200">
258
+ </div>
259
+ <div>
260
+ <h4 class="font-medium text-gray-700 mb-2">Details</h4>
261
+ <div class="space-y-3">
262
+ <div>
263
+ <label class="block text-sm text-gray-500">Detection Type</label>
264
+ <p id="modalType" class="font-medium">Vehicle</p>
265
+ </div>
266
+ <div>
267
+ <label class="block text-sm text-gray-500">Confidence</label>
268
+ <p id="modalConfidence" class="font-medium">85%</p>
269
+ </div>
270
+ <div>
271
+ <label class="block text-sm text-gray-500">License Plate</label>
272
+ <p id="modalPlate" class="font-medium">ABC123</p>
273
+ </div>
274
+ <div>
275
+ <label class="block text-sm text-gray-500">Vehicle Make/Model</label>
276
+ <p id="modalModel" class="font-medium">Toyota Camry</p>
277
+ </div>
278
+ <div>
279
+ <label class="block text-sm text-gray-500">Timestamp</label>
280
+ <p id="modalTimestamp" class="font-medium">12:34:56 PM</p>
281
+ </div>
282
+ </div>
283
+ </div>
284
+ </div>
285
+ <div class="mt-6 pt-4 border-t">
286
+ <h4 class="font-medium text-gray-700 mb-2">Raw Data</h4>
287
+ <pre id="modalRawData" class="bg-gray-50 p-3 rounded text-xs overflow-x-auto"></pre>
288
+ </div>
289
+ </div>
290
+ <div class="p-4 border-t flex justify-end space-x-3">
291
+ <button class="px-4 py-2 border border-gray-300 rounded-lg hover:bg-gray-50">Export as JSON</button>
292
+ <button class="px-4 py-2 bg-blue-600 text-white rounded-lg hover:bg-blue-700">Save to Database</button>
293
+ </div>
294
+ </div>
295
+ </div>
296
+
297
+ <script>
298
+ // DOM Elements
299
+ const videoFeed = document.getElementById('videoFeed');
300
+ const canvasOutput = document.getElementById('canvasOutput');
301
+ const videoContainer = document.getElementById('videoContainer');
302
+ const startBtn = document.getElementById('startBtn');
303
+ const stopBtn = document.getElementById('stopBtn');
304
+ const uploadBtn = document.getElementById('uploadBtn');
305
+ const fileInput = document.getElementById('fileInput');
306
+ const processingOverlay = document.getElementById('processingOverlay');
307
+ const resultsContainer = document.getElementById('resultsContainer');
308
+ const emptyResults = document.getElementById('emptyResults');
309
+ const confidenceSlider = document.getElementById('confidenceSlider');
310
+ const confidenceValue = document.getElementById('confidenceValue');
311
+ const enableOCR = document.getElementById('enableOCR');
312
+ const clearResults = document.getElementById('clearResults');
313
+ const vehicleCount = document.getElementById('vehicleCount');
314
+ const plateCount = document.getElementById('plateCount');
315
+ const processingTime = document.getElementById('processingTime');
316
+ const ultralyticsStatus = document.getElementById('ultralyticsStatus');
317
+ const claudeStatus = document.getElementById('claudeStatus');
318
+ const systemStatus = document.getElementById('systemStatus');
319
+ const systemStatusText = document.getElementById('systemStatusText');
320
+ const detailModal = document.getElementById('detailModal');
321
+ const closeModal = document.getElementById('closeModal');
322
+ const modalImage = document.getElementById('modalImage');
323
+ const modalType = document.getElementById('modalType');
324
+ const modalConfidence = document.getElementById('modalConfidence');
325
+ const modalPlate = document.getElementById('modalPlate');
326
+ const modalModel = document.getElementById('modalModel');
327
+ const modalTimestamp = document.getElementById('modalTimestamp');
328
+ const modalRawData = document.getElementById('modalRawData');
329
+
330
+ // State variables
331
+ let stream = null;
332
+ let model = null;
333
+ let isProcessing = false;
334
+ let detectionHistory = [];
335
+ let currentDetections = [];
336
+ let confidenceThreshold = 0.5;
337
+ let ctx = canvasOutput.getContext('2d');
338
+ let animationId = null;
339
+ let currentMediaType = null; // 'camera' or 'file'
340
+
341
+ // Initialize
342
+ document.addEventListener('DOMContentLoaded', async () => {
343
+ // Set up canvas to match video container aspect ratio
344
+ resizeCanvas();
345
+
346
+ // Event listeners
347
+ startBtn.addEventListener('click', startCamera);
348
+ stopBtn.addEventListener('click', stopCamera);
349
+ uploadBtn.addEventListener('click', () => fileInput.click());
350
+ fileInput.addEventListener('change', handleFileUpload);
351
+ confidenceSlider.addEventListener('input', updateConfidenceThreshold);
352
+ clearResults.addEventListener('click', clearDetectionHistory);
353
+ closeModal.addEventListener('click', () => detailModal.classList.add('hidden'));
354
+
355
+ // Initialize models
356
+ await initializeModels();
357
+
358
+ // Update system status
359
+ updateSystemStatus();
360
+ });
361
+
362
+ // Resize canvas to match container
363
+ function resizeCanvas() {
364
+ const containerWidth = videoContainer.clientWidth;
365
+ const containerHeight = videoContainer.clientHeight;
366
+ canvasOutput.width = containerWidth;
367
+ canvasOutput.height = containerHeight;
368
+ }
369
+
370
+ // Initialize AI models
371
+ async function initializeModels() {
372
+ try {
373
+ systemStatusText.textContent = "Loading Ultralytics model...";
374
+ ultralyticsStatus.classList.remove('bg-gray-400', 'bg-red-500');
375
+ ultralyticsStatus.classList.add('bg-yellow-500');
376
+
377
+ // In a real implementation, we would load the actual Ultralytics model here
378
+ // For this demo, we'll use COCO-SSD as a placeholder
379
+ model = await cocoSsd.load();
380
+
381
+ ultralyticsStatus.classList.remove('bg-yellow-500');
382
+ ultralyticsStatus.classList.add('bg-green-500');
383
+ systemStatusText.textContent = "Models loaded successfully";
384
+ systemStatus.classList.remove('bg-gray-400');
385
+ systemStatus.classList.add('bg-green-500');
386
+
387
+ // Simulate Claude API connection
388
+ setTimeout(() => {
389
+ claudeStatus.classList.remove('bg-gray-400');
390
+ claudeStatus.classList.add('bg-green-500');
391
+ }, 1500);
392
+ } catch (error) {
393
+ console.error("Error loading models:", error);
394
+ ultralyticsStatus.classList.remove('bg-yellow-500');
395
+ ultralyticsStatus.classList.add('bg-red-500');
396
+ systemStatusText.textContent = "Error loading models";
397
+ systemStatus.classList.remove('bg-gray-400');
398
+ systemStatus.classList.add('bg-red-500');
399
+ }
400
+ }
401
+
402
+ // Update system status UI
403
+ function updateSystemStatus() {
404
+ // This would be more comprehensive in a real implementation
405
+ const statusElements = [
406
+ { element: ultralyticsStatus, condition: model !== null },
407
+ { element: claudeStatus, condition: true }, // Simulated as connected
408
+ { element: systemStatus, condition: model !== null }
409
+ ];
410
+
411
+ statusElements.forEach(item => {
412
+ if (item.condition) {
413
+ item.element.classList.remove('bg-gray-400', 'bg-red-500', 'bg-yellow-500');
414
+ item.element.classList.add('bg-green-500');
415
+ }
416
+ });
417
+ }
418
+
419
+ // Start camera feed
420
+ async function startCamera() {
421
+ try {
422
+ currentMediaType = 'camera';
423
+ stream = await navigator.mediaDevices.getUserMedia({
424
+ video: { width: 1280, height: 720, facingMode: 'environment' },
425
+ audio: false
426
+ });
427
+
428
+ videoFeed.srcObject = stream;
429
+ videoFeed.classList.remove('hidden');
430
+ startBtn.disabled = true;
431
+ stopBtn.disabled = false;
432
+ uploadBtn.disabled = true;
433
+
434
+ // Start processing frames
435
+ processVideo();
436
+ } catch (error) {
437
+ console.error("Error accessing camera:", error);
438
+ alert("Could not access the camera. Please ensure you've granted camera permissions.");
439
+ }
440
+ }
441
+
442
+ // Stop camera feed
443
+ function stopCamera() {
444
+ if (stream) {
445
+ stream.getTracks().forEach(track => track.stop());
446
+ stream = null;
447
+ }
448
+
449
+ if (animationId) {
450
+ cancelAnimationFrame(animationId);
451
+ animationId = null;
452
+ }
453
+
454
+ videoFeed.classList.add('hidden');
455
+ startBtn.disabled = false;
456
+ stopBtn.disabled = true;
457
+ uploadBtn.disabled = false;
458
+ isProcessing = false;
459
+
460
+ // Clear canvas
461
+ ctx.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
462
+ }
463
+
464
+ // Handle file upload
465
+ function handleFileUpload(event) {
466
+ const file = event.target.files[0];
467
+ if (!file) return;
468
+
469
+ currentMediaType = 'file';
470
+ const fileURL = URL.createObjectURL(file);
471
+
472
+ if (file.type.startsWith('image/')) {
473
+ processImageFile(fileURL);
474
+ } else if (file.type.startsWith('video/')) {
475
+ processVideoFile(fileURL);
476
+ }
477
+
478
+ // Reset file input
479
+ event.target.value = '';
480
+ }
481
+
482
+ // Process image file
483
+ function processImageFile(fileURL) {
484
+ const img = new Image();
485
+ img.onload = async () => {
486
+ // Set canvas dimensions to match image
487
+ canvasOutput.width = img.width;
488
+ canvasOutput.height = img.height;
489
+
490
+ // Draw image to canvas
491
+ ctx.drawImage(img, 0, 0, img.width, img.height);
492
+
493
+ // Process the image
494
+ await detectObjects(canvasOutput);
495
+ };
496
+ img.src = fileURL;
497
+ }
498
+
499
+ // Process video file
500
+ function processVideoFile(fileURL) {
501
+ videoFeed.src = fileURL;
502
+ videoFeed.classList.remove('hidden');
503
+ startBtn.disabled = true;
504
+ stopBtn.disabled = false;
505
+ uploadBtn.disabled = true;
506
+
507
+ videoFeed.onloadedmetadata = () => {
508
+ // Set canvas dimensions to match video
509
+ canvasOutput.width = videoFeed.videoWidth;
510
+ canvasOutput.height = videoFeed.videoHeight;
511
+
512
+ // Start processing
513
+ processVideo();
514
+ };
515
+
516
+ videoFeed.play();
517
+ }
518
+
519
+ // Process video frames
520
+ function processVideo() {
521
+ if (!isProcessing) {
522
+ isProcessing = true;
523
+ processingOverlay.classList.remove('hidden');
524
+ }
525
+
526
+ // Draw video frame to canvas
527
+ ctx.drawImage(videoFeed, 0, 0, canvasOutput.width, canvasOutput.height);
528
+
529
+ // Detect objects in the frame
530
+ detectObjects(canvasOutput).then(() => {
531
+ if (stream || currentMediaType === 'file') {
532
+ animationId = requestAnimationFrame(processVideo);
533
+ } else {
534
+ isProcessing = false;
535
+ processingOverlay.classList.add('hidden');
536
+ }
537
+ });
538
+ }
539
+
540
+ // Detect objects in frame
541
+ async function detectObjects(canvas) {
542
+ if (!model) return;
543
+
544
+ const startTime = performance.now();
545
+
546
+ try {
547
+ // Get predictions from model
548
+ const predictions = await model.detect(canvas);
549
+
550
+ // Filter predictions based on confidence threshold and relevant classes
551
+ const relevantClasses = ['car', 'truck', 'bus', 'motorcycle'];
552
+ const vehiclePredictions = predictions.filter(
553
+ p => relevantClasses.includes(p.class) && p.score >= confidenceThreshold
554
+ );
555
+
556
+ // Clear previous detections
557
+ currentDetections = [];
558
+
559
+ // Process each vehicle detection
560
+ for (const prediction of vehiclePredictions) {
561
+ const { bbox, class: className, score } = prediction;
562
+ const [x, y, width, height] = bbox;
563
+
564
+ // Draw bounding box
565
+ ctx.strokeStyle = '#3B82F6';
566
+ ctx.lineWidth = 2;
567
+ ctx.strokeRect(x, y, width, height);
568
+
569
+ // Draw label background
570
+ ctx.fillStyle = '#3B82F6';
571
+ const textWidth = ctx.measureText(`${className} ${Math.round(score * 100)}%`).width;
572
+ ctx.fillRect(x, y - 20, textWidth + 10, 20);
573
+
574
+ // Draw label text
575
+ ctx.fillStyle = 'white';
576
+ ctx.font = '14px Arial';
577
+ ctx.fillText(`${className} ${Math.round(score * 100)}%`, x + 5, y - 5);
578
+
579
+ // Simulate license plate detection (in a real app, this would use Ultralytics)
580
+ const hasPlate = Math.random() > 0.3; // 70% chance of detecting a plate
581
+ let plateText = null;
582
+ let plateBbox = null;
583
+
584
+ if (hasPlate && enableOCR.checked) {
585
+ // Simulate plate position (bottom center of vehicle)
586
+ const plateWidth = width * 0.6;
587
+ const plateHeight = height * 0.15;
588
+ const plateX = x + (width - plateWidth) / 2;
589
+ const plateY = y + height - plateHeight * 0.8;
590
+
591
+ plateBbox = [plateX, plateY, plateWidth, plateHeight];
592
+
593
+ // Draw plate bounding box
594
+ ctx.strokeStyle = '#10B981';
595
+ ctx.lineWidth = 2;
596
+ ctx.strokeRect(plateX, plateY, plateWidth, plateHeight);
597
+
598
+ // Simulate OCR with Claude API (in a real app, this would make an API call)
599
+ plateText = simulateClaudeOCR(canvas, plateBbox);
600
+
601
+ // Draw plate text
602
+ ctx.fillStyle = '#10B981';
603
+ ctx.font = '12px Arial';
604
+ ctx.fillText(plateText || 'Processing...', plateX + 5, plateY + 15);
605
+ }
606
+
607
+ // Save detection data
608
+ const detection = {
609
+ type: className,
610
+ confidence: score,
611
+ bbox: [x, y, width, height],
612
+ plate: plateText ? {
613
+ text: plateText,
614
+ bbox: plateBbox
615
+ } : null,
616
+ timestamp: new Date().toLocaleTimeString(),
617
+ imageData: canvas.toDataURL('image/jpeg', 0.7)
618
+ };
619
+
620
+ currentDetections.push(detection);
621
+ }
622
+
623
+ // Update stats
624
+ updateDetectionStats(vehiclePredictions.length, currentDetections.filter(d => d.plate).length);
625
+
626
+ // Add to history and update UI
627
+ if (currentDetections.length > 0) {
628
+ addToDetectionHistory(currentDetections);
629
+ }
630
+
631
+ // Update processing time
632
+ const endTime = performance.now();
633
+ processingTime.textContent = `${Math.round(endTime - startTime)}ms`;
634
+
635
+ } catch (error) {
636
+ console.error("Detection error:", error);
637
+ } finally {
638
+ processingOverlay.classList.add('hidden');
639
+ isProcessing = false;
640
+ }
641
+ }
642
+
643
+ // Simulate Claude API OCR processing
644
+ function simulateClaudeOCR(canvas, bbox) {
645
+ // In a real implementation, this would:
646
+ // 1. Extract the license plate region from the canvas
647
+ // 2. Send to Claude API for OCR processing
648
+ // 3. Return the recognized text
649
+
650
+ // For demo purposes, generate random plate numbers
651
+ const letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
652
+ const numbers = '0123456789';
653
+
654
+ let plateText = '';
655
+
656
+ // Random format: 3 letters + 3 numbers (e.g., ABC123)
657
+ for (let i = 0; i < 3; i++) {
658
+ plateText += letters.charAt(Math.floor(Math.random() * letters.length));
659
+ }
660
+ for (let i = 0; i < 3; i++) {
661
+ plateText += numbers.charAt(Math.floor(Math.random() * numbers.length));
662
+ }
663
+
664
+ // 10% chance to return null (simulating OCR failure)
665
+ return Math.random() > 0.1 ? plateText : null;
666
+ }
667
+
668
+ // Update confidence threshold
669
+ function updateConfidenceThreshold() {
670
+ confidenceThreshold = parseFloat(confidenceSlider.value);
671
+ confidenceValue.textContent = `${Math.round(confidenceThreshold * 100)}%`;
672
+ }
673
+
674
+ // Update detection stats
675
+ function updateDetectionStats(vehicles, plates) {
676
+ vehicleCount.textContent = vehicles;
677
+ plateCount.textContent = plates;
678
+ }
679
+
680
+ // Add detections to history
681
+ function addToDetectionHistory(detections) {
682
+ emptyResults.classList.add('hidden');
683
+
684
+ detections.forEach(detection => {
685
+ detectionHistory.unshift(detection);
686
+
687
+ // Create result card
688
+ const template = document.getElementById('resultCardTemplate');
689
+ const clone = template.content.cloneNode(true);
690
+
691
+ const card = clone.querySelector('.result-card');
692
+ card.querySelector('.detection-type').textContent = detection.type;
693
+ card.querySelector('.confidence').textContent = `${Math.round(detection.confidence * 100)}%`;
694
+ card.querySelector('.timestamp').textContent = detection.timestamp;
695
+
696
+ const thumbnail = card.querySelector('.thumbnail');
697
+ thumbnail.src = detection.imageData;
698
+
699
+ if (detection.plate) {
700
+ card.querySelector('.plate-number').textContent = detection.plate.text;
701
+ } else {
702
+ card.querySelector('.plate-number').textContent = 'Not detected';
703
+ }
704
+
705
+ // Simulate vehicle make/model detection
706
+ const makes = ['Toyota', 'Honda', 'Ford', 'Chevrolet', 'BMW', 'Mercedes', 'Tesla'];
707
+ const models = ['Camry', 'Civic', 'F-150', 'Silverado', '3 Series', 'C-Class', 'Model 3'];
708
+ const randomMake = makes[Math.floor(Math.random() * makes.length)];
709
+ const randomModel = models[Math.floor(Math.random() * models.length)];
710
+ card.querySelector('.vehicle-model').textContent = `${randomMake} ${randomModel}`;
711
+
712
+ // Add click handlers
713
+ card.querySelector('.view-btn').addEventListener('click', () => showDetectionDetails(detection));
714
+ card.querySelector('.export-btn').addEventListener('click', () => exportDetection(detection));
715
+
716
+ // Add to results container
717
+ resultsContainer.prepend(card);
718
+ });
719
+ }
720
+
721
+ // Show detection details in modal
722
+ function showDetectionDetails(detection) {
723
+ modalImage.src = detection.imageData;
724
+ modalType.textContent = detection.type;
725
+ modalConfidence.textContent = `${Math.round(detection.confidence * 100)}%`;
726
+ modalPlate.textContent = detection.plate ? detection.plate.text : 'Not detected';
727
+
728
+ // Simulate vehicle make/model
729
+ const makes = ['Toyota', 'Honda', 'Ford', 'Chevrolet', 'BMW', 'Mercedes', 'Tesla'];
730
+ const models = ['Camry', 'Civic', 'F-150', 'Silverado', '3 Series', 'C-Class', 'Model 3'];
731
+ const randomMake = makes[Math.floor(Math.random() * makes.length)];
732
+ const randomModel = models[Math.floor(Math.random() * models.length)];
733
+ modalModel.textContent = `${randomMake} ${randomModel}`;
734
+
735
+ modalTimestamp.textContent = detection.timestamp;
736
+ modalRawData.textContent = JSON.stringify(detection, null, 2);
737
+
738
+ detailModal.classList.remove('hidden');
739
+ }
740
+
741
+ // Export detection data
742
+ function exportDetection(detection) {
743
+ // In a real implementation, this would save the data or image
744
+ console.log("Exporting detection:", detection);
745
+ alert(`Detection data for ${detection.plate?.text || 'unknown plate'} has been exported.`);
746
+ }
747
+
748
+ // Clear detection history
749
+ function clearDetectionHistory() {
750
+ detectionHistory = [];
751
+ resultsContainer.innerHTML = '';
752
+ emptyResults.classList.remove('hidden');
753
+ }
754
+
755
+ // Handle window resize
756
+ window.addEventListener('resize', () => {
757
+ if (currentMediaType === 'camera' && stream) {
758
+ resizeCanvas();
759
+ }
760
+ });
761
+ </script>
762
+ <p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=samiesam/license-process-detection" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body>
763
+ </html>
prompts.txt ADDED
File without changes