Mahiruoshi commited on
Commit
689227c
·
verified ·
1 Parent(s): 695534e
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /imgrecAaron
PC_CONFIG.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # Base directory for your project (automatically set to the script's directory)
4
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
5
+
6
+ # Configuration for Flask
7
+ HOST = '0.0.0.0' # or '0.0.0.0' for external access
8
+ IMAGE_REC_PORT = 4000 # Port number for your Flask app
9
+
10
+ # File directory paths
11
+ FILE_DIRECTORY = os.path.join(BASE_DIR, 'data') # Adjust according to your structure
12
+
13
+ # Directories for image processing
14
+ UPLOAD_FOLDER = os.path.join(FILE_DIRECTORY, 'image-rec', 'images')
15
+ ANNOTATED_IMAGES = os.path.join(FILE_DIRECTORY, 'image-rec', 'annotated_images')
16
+ ANNOTATED_ARCHIVE = os.path.join(FILE_DIRECTORY, 'image-rec', 'annotated_archive')
17
+ STITCHED_IMAGES = os.path.join(FILE_DIRECTORY, 'image-rec', 'stitched_images')
18
+ SAMPLE_IMAGES = os.path.join(FILE_DIRECTORY, 'image-rec', 'sample_images')
19
+
20
+ if __name__ == '__main__':
21
+ # Ensure all necessary directories exist
22
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
23
+ os.makedirs(ANNOTATED_IMAGES, exist_ok=True)
24
+ os.makedirs(ANNOTATED_ARCHIVE, exist_ok=True)
25
+ os.makedirs(STITCHED_IMAGES, exist_ok=True)
26
+ os.makedirs(SAMPLE_IMAGES, exist_ok=True)
app.py ADDED
@@ -0,0 +1,722 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, render_template_string, send_from_directory, send_file
2
+ from flask_cors import CORS
3
+ import os
4
+ import importlib.util
5
+ import time
6
+ import zipfile
7
+ import shutil
8
+ import json
9
+ from datetime import datetime
10
+ from predict_task1 import Predictor
11
+ from id_mapping import mapping
12
+ from show_stitched import *
13
+ import cv2
14
+ import supervision as sv
15
+ from ultralytics import YOLO
16
+
17
+ app = Flask(__name__)
18
+ CORS(app)
19
+
20
+ # Load configuration
21
+ config_dir = os.path.abspath(os.path.dirname(__file__))
22
+ config_path = os.path.join(config_dir, 'PC_CONFIG.py')
23
+ spec = importlib.util.spec_from_file_location("PC_CONFIG", config_path)
24
+ PC_CONFIG = importlib.util.module_from_spec(spec)
25
+ spec.loader.exec_module(PC_CONFIG)
26
+
27
+ HOST = PC_CONFIG.HOST
28
+ PORT = PC_CONFIG.IMAGE_REC_PORT
29
+ UPLOAD_FOLDER = os.path.join(PC_CONFIG.FILE_DIRECTORY, "image-rec", "images")
30
+ DATASET_FOLDER = os.path.join(PC_CONFIG.BASE_DIR, "yolo_dataset")
31
+ ANNOTATED_FOLDER = os.path.join(DATASET_FOLDER, "annotated_images")
32
+ LABELS_FOLDER = os.path.join(DATASET_FOLDER, "labels")
33
+ IMAGES_FOLDER = os.path.join(DATASET_FOLDER, "images")
34
+ CLASS_MAPPING_FILE = os.path.join(DATASET_FOLDER, "classes.json")
35
+
36
+ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
37
+
38
+ # Initialize predictor
39
+ predictor = Predictor()
40
+
41
+ # Ensure directories exist
42
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
43
+ os.makedirs(DATASET_FOLDER, exist_ok=True)
44
+ os.makedirs(ANNOTATED_FOLDER, exist_ok=True)
45
+ os.makedirs(LABELS_FOLDER, exist_ok=True)
46
+ os.makedirs(IMAGES_FOLDER, exist_ok=True)
47
+
48
+ # Initialize class mapping file
49
+ if not os.path.exists(CLASS_MAPPING_FILE):
50
+ # Create initial class mapping from id_mapping.py
51
+ reverse_mapping = {str(v): k for k, v in mapping.items() if v != -1 and k is not None}
52
+ with open(CLASS_MAPPING_FILE, 'w', encoding='utf-8') as f:
53
+ json.dump(reverse_mapping, f, indent=2, ensure_ascii=False)
54
+
55
+ def load_class_mapping():
56
+ """Load class mapping from JSON file"""
57
+ try:
58
+ with open(CLASS_MAPPING_FILE, 'r', encoding='utf-8') as f:
59
+ return json.load(f)
60
+ except:
61
+ return {}
62
+
63
+ def save_class_mapping(class_mapping):
64
+ """Save class mapping to JSON file"""
65
+ with open(CLASS_MAPPING_FILE, 'w', encoding='utf-8') as f:
66
+ json.dump(class_mapping, f, indent=2, ensure_ascii=False)
67
+
68
+ def generate_yolo_annotation(results, detection_id, image_width, image_height, class_name):
69
+ """Generate YOLO format annotation string"""
70
+ if not results or detection_id >= len(results[0].boxes):
71
+ return ""
72
+
73
+ # Get class mapping
74
+ class_mapping = load_class_mapping()
75
+
76
+ # Get class ID from mapping, if not found, add it
77
+ class_id = None
78
+ for id_str, name in class_mapping.items():
79
+ if name == class_name:
80
+ class_id = int(id_str)
81
+ break
82
+
83
+ if class_id is None:
84
+ # Add new class to mapping
85
+ max_id = max([int(k) for k in class_mapping.keys()]) if class_mapping else -1
86
+ class_id = max_id + 1
87
+ class_mapping[str(class_id)] = class_name
88
+ save_class_mapping(class_mapping)
89
+
90
+ # Get bounding box
91
+ box = results[0].boxes.xyxy[detection_id]
92
+ x1, y1, x2, y2 = box.tolist()
93
+
94
+ # Convert to YOLO format (normalized)
95
+ x_center = ((x1 + x2) / 2) / image_width
96
+ y_center = ((y1 + y2) / 2) / image_height
97
+ width = (x2 - x1) / image_width
98
+ height = (y2 - y1) / image_height
99
+
100
+ confidence = results[0].boxes.conf[detection_id].item()
101
+
102
+ # YOLO format: class_id x_center y_center width height confidence
103
+ return f"{class_id} {x_center:.6f} {y_center:.6f} {width:.6f} {height:.6f} {confidence:.6f}"
104
+
105
+ def save_annotated_image(image, results, detection_id, filename):
106
+ """Save annotated image with bounding boxes"""
107
+ if not results or not results[0].boxes:
108
+ return None
109
+
110
+ # Create supervision annotators
111
+ bounding_box_annotator = sv.BoundingBoxAnnotator()
112
+ label_annotator = sv.LabelAnnotator()
113
+
114
+ # Convert YOLOv8 results to supervision format
115
+ boxes = results[0].boxes.xyxy.cpu().numpy()
116
+ confidences = results[0].boxes.conf.cpu().numpy()
117
+ class_ids = results[0].boxes.cls.cpu().numpy().astype(int)
118
+
119
+ # Get class names
120
+ class_names = [results[0].names[class_id] for class_id in class_ids]
121
+
122
+ # Create detections
123
+ detections = sv.Detections(
124
+ xyxy=boxes,
125
+ confidence=confidences,
126
+ class_id=class_ids
127
+ )
128
+
129
+ # Annotate image
130
+ annotated_image = bounding_box_annotator.annotate(scene=image.copy(), detections=detections)
131
+ annotated_image = label_annotator.annotate(
132
+ scene=annotated_image,
133
+ detections=detections,
134
+ labels=[f"{class_names[i]} {confidences[i]:.2f}" for i in range(len(class_names))]
135
+ )
136
+
137
+ # Save annotated image
138
+ annotated_path = os.path.join(ANNOTATED_FOLDER, f"annotated_{filename}")
139
+ cv2.imwrite(annotated_path, annotated_image)
140
+
141
+ return annotated_path
142
+
143
+ def process_file(file_path, direction, task_type):
144
+ """Process uploaded file and generate predictions"""
145
+ print("File received and saved successfully.")
146
+ print(f"Direction received: {direction}")
147
+ print(f"Task type received: {task_type}")
148
+
149
+ startTime = datetime.now()
150
+
151
+ # Load image
152
+ image = cv2.imread(file_path)
153
+ if image is None:
154
+ return None
155
+
156
+ # Perform prediction
157
+ class_name, results, detection_id = predictor.predict_id(file_path, task_type)
158
+ class_id = str(mapping.get(class_name, -1))
159
+
160
+ if class_name and results:
161
+ # Generate filename
162
+ timestamp = int(time.time())
163
+ base_filename = f"{class_name}_{timestamp}"
164
+
165
+ # Save original image to dataset
166
+ image_filename = f"{base_filename}.jpg"
167
+ dataset_image_path = os.path.join(IMAGES_FOLDER, image_filename)
168
+ shutil.copy2(file_path, dataset_image_path)
169
+
170
+ # Generate and save YOLO annotation
171
+ h, w = image.shape[:2]
172
+ yolo_annotation = generate_yolo_annotation(results, detection_id, w, h, class_name)
173
+ if yolo_annotation:
174
+ txt_filename = f"{base_filename}.txt"
175
+ txt_path = os.path.join(LABELS_FOLDER, txt_filename)
176
+ with open(txt_path, 'w') as f:
177
+ f.write(yolo_annotation)
178
+
179
+ # Save annotated image
180
+ save_annotated_image(image, results, detection_id, image_filename)
181
+
182
+ endTime = datetime.now()
183
+ totalTime = (endTime - startTime).total_seconds()
184
+ print(f"Predicted ID: {class_id}")
185
+ print(f"Time taken for Predicting Image = {totalTime} s")
186
+
187
+ return class_id
188
+
189
+ # HTML template for the frontend
190
+ HTML_TEMPLATE = """
191
+ <!DOCTYPE html>
192
+ <html lang="en">
193
+ <head>
194
+ <meta charset="UTF-8">
195
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
196
+ <title>YOLO Image Recognition System</title>
197
+ <style>
198
+ body {
199
+ font-family: Arial, sans-serif;
200
+ max-width: 1200px;
201
+ margin: 0 auto;
202
+ padding: 20px;
203
+ background-color: #f5f5f5;
204
+ }
205
+ .header {
206
+ text-align: center;
207
+ margin-bottom: 30px;
208
+ }
209
+ .container {
210
+ display: grid;
211
+ grid-template-columns: 1fr 1fr;
212
+ gap: 20px;
213
+ margin-bottom: 30px;
214
+ }
215
+ .card {
216
+ background-color: white;
217
+ padding: 20px;
218
+ border-radius: 10px;
219
+ box-shadow: 0 2px 10px rgba(0,0,0,0.1);
220
+ }
221
+ .image-display {
222
+ text-align: center;
223
+ }
224
+ .image-display img {
225
+ max-width: 100%;
226
+ max-height: 400px;
227
+ border: 2px solid #ddd;
228
+ border-radius: 8px;
229
+ }
230
+ .status {
231
+ padding: 10px;
232
+ border-radius: 5px;
233
+ margin-bottom: 15px;
234
+ font-weight: bold;
235
+ }
236
+ .status.waiting {
237
+ background-color: #fff3cd;
238
+ color: #856404;
239
+ }
240
+ .status.updated {
241
+ background-color: #d4edda;
242
+ color: #155724;
243
+ }
244
+ .upload-section {
245
+ text-align: center;
246
+ }
247
+ .upload-form {
248
+ display: inline-block;
249
+ text-align: left;
250
+ }
251
+ .form-group {
252
+ margin-bottom: 15px;
253
+ }
254
+ .form-group label {
255
+ display: block;
256
+ margin-bottom: 5px;
257
+ font-weight: bold;
258
+ }
259
+ .form-group input, .form-group select {
260
+ width: 300px;
261
+ padding: 8px;
262
+ border: 1px solid #ddd;
263
+ border-radius: 4px;
264
+ }
265
+ .btn {
266
+ background-color: #007bff;
267
+ color: white;
268
+ border: none;
269
+ padding: 12px 24px;
270
+ border-radius: 5px;
271
+ font-size: 16px;
272
+ cursor: pointer;
273
+ transition: background-color 0.3s;
274
+ }
275
+ .btn:hover {
276
+ background-color: #0056b3;
277
+ }
278
+ .btn:disabled {
279
+ background-color: #6c757d;
280
+ cursor: not-allowed;
281
+ }
282
+ .dataset-info {
283
+ grid-column: span 2;
284
+ }
285
+ .stats-grid {
286
+ display: grid;
287
+ grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
288
+ gap: 15px;
289
+ margin-top: 15px;
290
+ }
291
+ .stat-card {
292
+ background-color: #f8f9fa;
293
+ padding: 15px;
294
+ border-radius: 5px;
295
+ text-align: center;
296
+ }
297
+ .stat-number {
298
+ font-size: 24px;
299
+ font-weight: bold;
300
+ color: #007bff;
301
+ }
302
+ .timestamp {
303
+ color: #666;
304
+ font-size: 12px;
305
+ margin-top: 10px;
306
+ }
307
+ .class-mapping {
308
+ max-height: 200px;
309
+ overflow-y: auto;
310
+ background-color: #f8f9fa;
311
+ padding: 10px;
312
+ border-radius: 5px;
313
+ font-family: monospace;
314
+ font-size: 12px;
315
+ }
316
+ </style>
317
+ </head>
318
+ <body>
319
+ <div class="header">
320
+ <h1>YOLO Image Recognition System</h1>
321
+ <p>Real-time image processing with dataset management</p>
322
+ </div>
323
+
324
+ <div class="container">
325
+ <div class="card">
326
+ <h3>Latest Recognition Result</h3>
327
+ <div id="status" class="status waiting">Waiting for recognition results...</div>
328
+ <div class="image-display">
329
+ <div id="result" style="display: none;">
330
+ <img id="resultImage" />
331
+ <div id="timestamp" class="timestamp"></div>
332
+ </div>
333
+ <div id="noResult" style="color: #666; padding: 40px;">
334
+ No recognition results yet
335
+ </div>
336
+ </div>
337
+ </div>
338
+
339
+ <div class="card">
340
+ <h3>Upload Image for Recognition</h3>
341
+ <div class="upload-section">
342
+ <form id="uploadForm" class="upload-form" enctype="multipart/form-data">
343
+ <div class="form-group">
344
+ <label for="file">Select Image:</label>
345
+ <input type="file" id="file" name="file" accept="image/*" required>
346
+ </div>
347
+ <div class="form-group">
348
+ <label for="direction">Direction:</label>
349
+ <select id="direction" name="direction" required>
350
+ <option value="north">North</option>
351
+ <option value="south">South</option>
352
+ <option value="east">East</option>
353
+ <option value="west">West</option>
354
+ </select>
355
+ </div>
356
+ <div class="form-group">
357
+ <label for="task_type">Task Type:</label>
358
+ <select id="task_type" name="task_type" required>
359
+ <option value="TASK_1">Task 1</option>
360
+ <option value="TASK_2">Task 2</option>
361
+ </select>
362
+ </div>
363
+ <button type="submit" class="btn">Upload and Predict</button>
364
+ </form>
365
+ <div id="uploadResult" style="margin-top: 15px;"></div>
366
+ </div>
367
+ </div>
368
+
369
+ <div class="card dataset-info">
370
+ <h3>Dataset Information</h3>
371
+ <div class="stats-grid">
372
+ <div class="stat-card">
373
+ <div class="stat-number" id="totalImages">0</div>
374
+ <div>Total Images</div>
375
+ </div>
376
+ <div class="stat-card">
377
+ <div class="stat-number" id="totalClasses">0</div>
378
+ <div>Total Classes</div>
379
+ </div>
380
+ <div class="stat-card">
381
+ <div class="stat-number" id="annotatedImages">0</div>
382
+ <div>Annotated Images</div>
383
+ </div>
384
+ <div class="stat-card">
385
+ <button id="downloadBtn" class="btn" onclick="downloadDataset()">
386
+ Download Dataset
387
+ </button>
388
+ </div>
389
+ </div>
390
+ <div style="margin-top: 20px;">
391
+ <h4>Class Mapping:</h4>
392
+ <div id="classMapping" class="class-mapping">Loading...</div>
393
+ </div>
394
+ <div id="downloadInfo" style="margin-top: 10px; color: #666;"></div>
395
+ </div>
396
+ </div>
397
+
398
+ <script>
399
+ let lastImagePath = '';
400
+
401
+ // Check for latest results
402
+ async function checkLatestResult() {
403
+ try {
404
+ const response = await fetch('/latest-result');
405
+ const data = await response.json();
406
+
407
+ if (data.success && data.image_path) {
408
+ const newImagePath = data.image_path;
409
+
410
+ if (newImagePath !== lastImagePath) {
411
+ lastImagePath = newImagePath;
412
+
413
+ const resultDiv = document.getElementById('result');
414
+ const noResultDiv = document.getElementById('noResult');
415
+ const resultImg = document.getElementById('resultImage');
416
+ const timestampDiv = document.getElementById('timestamp');
417
+ const statusDiv = document.getElementById('status');
418
+
419
+ // Update image and timestamp
420
+ resultImg.src = '/annotated/' + newImagePath + '?t=' + new Date().getTime();
421
+ timestampDiv.textContent = 'Last updated: ' + new Date().toLocaleString();
422
+
423
+ // Show result
424
+ noResultDiv.style.display = 'none';
425
+ resultDiv.style.display = 'block';
426
+
427
+ // Update status
428
+ statusDiv.className = 'status updated';
429
+ statusDiv.textContent = 'New recognition result available';
430
+
431
+ setTimeout(() => {
432
+ statusDiv.className = 'status waiting';
433
+ statusDiv.textContent = 'Waiting for next result...';
434
+ }, 3000);
435
+ }
436
+ }
437
+ } catch (error) {
438
+ console.error('Failed to check latest result:', error);
439
+ }
440
+ }
441
+
442
+ // Update dataset statistics
443
+ async function updateDatasetStats() {
444
+ try {
445
+ const response = await fetch('/dataset-stats');
446
+ const data = await response.json();
447
+
448
+ document.getElementById('totalImages').textContent = data.total_images || 0;
449
+ document.getElementById('totalClasses').textContent = data.total_classes || 0;
450
+ document.getElementById('annotatedImages').textContent = data.annotated_images || 0;
451
+
452
+ // Update class mapping
453
+ const mappingDiv = document.getElementById('classMapping');
454
+ if (data.class_mapping) {
455
+ let mappingText = '';
456
+ for (const [id, name] of Object.entries(data.class_mapping)) {
457
+ mappingText += `${id}: ${name}\\n`;
458
+ }
459
+ mappingDiv.textContent = mappingText || 'No classes defined yet';
460
+ } else {
461
+ mappingDiv.textContent = 'No classes defined yet';
462
+ }
463
+ } catch (error) {
464
+ console.error('Failed to update dataset stats:', error);
465
+ }
466
+ }
467
+
468
+ // Handle file upload
469
+ document.getElementById('uploadForm').addEventListener('submit', async function(e) {
470
+ e.preventDefault();
471
+
472
+ const formData = new FormData(this);
473
+ const uploadResult = document.getElementById('uploadResult');
474
+ const submitBtn = this.querySelector('button[type="submit"]');
475
+
476
+ submitBtn.disabled = true;
477
+ submitBtn.textContent = 'Processing...';
478
+ uploadResult.innerHTML = '<div style="color: #007bff;">Processing image...</div>';
479
+
480
+ try {
481
+ const response = await fetch('/upload', {
482
+ method: 'POST',
483
+ body: formData
484
+ });
485
+
486
+ const result = await response.json();
487
+
488
+ if (response.ok) {
489
+ uploadResult.innerHTML = `
490
+ <div style="color: #28a745;">
491
+ <strong>Success!</strong><br>
492
+ Predicted ID: ${result.predicted_id}
493
+ </div>
494
+ `;
495
+ // Refresh dataset stats
496
+ updateDatasetStats();
497
+ } else {
498
+ uploadResult.innerHTML = `<div style="color: #dc3545;">Error: ${result.error}</div>`;
499
+ }
500
+ } catch (error) {
501
+ uploadResult.innerHTML = `<div style="color: #dc3545;">Error: ${error.message}</div>`;
502
+ } finally {
503
+ submitBtn.disabled = false;
504
+ submitBtn.textContent = 'Upload and Predict';
505
+ }
506
+ });
507
+
508
+ // Download dataset
509
+ async function downloadDataset() {
510
+ const downloadBtn = document.getElementById('downloadBtn');
511
+ const downloadInfo = document.getElementById('downloadInfo');
512
+
513
+ try {
514
+ downloadBtn.disabled = true;
515
+ downloadBtn.textContent = 'Preparing...';
516
+ downloadInfo.textContent = 'Creating ZIP file, please wait...';
517
+ downloadInfo.style.color = '#007bff';
518
+
519
+ const response = await fetch('/download-dataset');
520
+
521
+ if (response.ok) {
522
+ const blob = await response.blob();
523
+ const url = window.URL.createObjectURL(blob);
524
+ const a = document.createElement('a');
525
+ a.href = url;
526
+ a.download = `yolo_dataset_${new Date().toISOString().slice(0,10)}.zip`;
527
+ document.body.appendChild(a);
528
+ a.click();
529
+ window.URL.revokeObjectURL(url);
530
+ document.body.removeChild(a);
531
+
532
+ downloadInfo.textContent = 'Dataset downloaded successfully!';
533
+ downloadInfo.style.color = '#28a745';
534
+ } else {
535
+ const errorData = await response.json();
536
+ downloadInfo.textContent = 'Error: ' + (errorData.message || 'Failed to download');
537
+ downloadInfo.style.color = '#dc3545';
538
+ }
539
+ } catch (error) {
540
+ downloadInfo.textContent = 'Error: ' + error.message;
541
+ downloadInfo.style.color = '#dc3545';
542
+ } finally {
543
+ downloadBtn.disabled = false;
544
+ downloadBtn.textContent = 'Download Dataset';
545
+ setTimeout(() => { downloadInfo.textContent = ''; }, 5000);
546
+ }
547
+ }
548
+
549
+ // Initialize
550
+ checkLatestResult();
551
+ updateDatasetStats();
552
+
553
+ // Auto-refresh every 3 seconds
554
+ setInterval(checkLatestResult, 3000);
555
+ setInterval(updateDatasetStats, 10000);
556
+ </script>
557
+ </body>
558
+ </html>
559
+ """
560
+
561
+ # Routes
562
+ @app.route('/')
563
+ def index():
564
+ """Home page with web interface"""
565
+ return render_template_string(HTML_TEMPLATE)
566
+
567
+ @app.route('/status', methods=['GET'])
568
+ def server_status():
569
+ """Health check endpoint"""
570
+ return jsonify({'status': 'OK'})
571
+
572
+ @app.route('/upload', methods=['POST'])
573
+ def upload_file():
574
+ """Handle file upload and prediction"""
575
+ if 'file' not in request.files:
576
+ return jsonify({'error': 'No file part'}), 400
577
+
578
+ file = request.files['file']
579
+ direction = request.form.get('direction', 'north')
580
+ task_type = request.form.get('task_type', 'TASK_1')
581
+
582
+ if file.filename == '':
583
+ return jsonify({'error': 'No selected file'}), 400
584
+
585
+ if file:
586
+ filename = os.path.basename(file.filename)
587
+ file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
588
+ file.save(file_path)
589
+
590
+ # Process the file and predict
591
+ class_id = process_file(file_path, direction, task_type)
592
+
593
+ if class_id is not None:
594
+ return jsonify({
595
+ 'message': 'File successfully uploaded and processed',
596
+ 'predicted_id': class_id,
597
+ 'direction': direction,
598
+ 'task_type': task_type
599
+ }), 200
600
+ else:
601
+ return jsonify({'error': 'Failed to process image'}), 500
602
+
603
+ @app.route('/latest-result')
604
+ def get_latest_result():
605
+ """Get the latest annotated image"""
606
+ if not os.path.exists(ANNOTATED_FOLDER):
607
+ return jsonify({"success": False, "message": "Annotated folder not found"})
608
+
609
+ # Get all annotated images
610
+ annotated_files = []
611
+ for filename in os.listdir(ANNOTATED_FOLDER):
612
+ if filename.startswith('annotated_') and filename.lower().endswith(('.png', '.jpg', '.jpeg')):
613
+ filepath = os.path.join(ANNOTATED_FOLDER, filename)
614
+ mtime = os.path.getmtime(filepath)
615
+ annotated_files.append((filename, mtime))
616
+
617
+ if not annotated_files:
618
+ return jsonify({"success": False, "message": "No annotated images found"})
619
+
620
+ # Sort by modification time, get latest
621
+ annotated_files.sort(key=lambda x: x[1], reverse=True)
622
+ latest_file = annotated_files[0][0]
623
+
624
+ return jsonify({
625
+ "success": True,
626
+ "image_path": latest_file,
627
+ "timestamp": annotated_files[0][1]
628
+ })
629
+
630
+ @app.route('/annotated/<filename>')
631
+ def serve_annotated_image(filename):
632
+ """Serve annotated images"""
633
+ return send_from_directory(ANNOTATED_FOLDER, filename)
634
+
635
+ @app.route('/dataset-stats')
636
+ def get_dataset_stats():
637
+ """Get dataset statistics"""
638
+ stats = {
639
+ 'total_images': 0,
640
+ 'total_classes': 0,
641
+ 'annotated_images': 0,
642
+ 'class_mapping': {}
643
+ }
644
+
645
+ # Count images
646
+ if os.path.exists(IMAGES_FOLDER):
647
+ stats['total_images'] = len([f for f in os.listdir(IMAGES_FOLDER) if f.lower().endswith(('.png', '.jpg', '.jpeg'))])
648
+
649
+ # Count annotated images
650
+ if os.path.exists(ANNOTATED_FOLDER):
651
+ stats['annotated_images'] = len([f for f in os.listdir(ANNOTATED_FOLDER) if f.lower().endswith(('.png', '.jpg', '.jpeg'))])
652
+
653
+ # Load class mapping
654
+ stats['class_mapping'] = load_class_mapping()
655
+ stats['total_classes'] = len(stats['class_mapping'])
656
+
657
+ return jsonify(stats)
658
+
659
+ @app.route('/download-dataset')
660
+ def download_dataset():
661
+ """Download the complete YOLO dataset as ZIP"""
662
+ if not os.path.exists(DATASET_FOLDER):
663
+ return jsonify({"success": False, "message": "Dataset folder not found"}), 404
664
+
665
+ # Check if there are files to download
666
+ has_files = False
667
+ for folder in [IMAGES_FOLDER, LABELS_FOLDER, ANNOTATED_FOLDER]:
668
+ if os.path.exists(folder) and os.listdir(folder):
669
+ has_files = True
670
+ break
671
+
672
+ if not has_files:
673
+ return jsonify({"success": False, "message": "No files found in dataset"}), 404
674
+
675
+ # Create timestamp for filename
676
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
677
+ zip_filename = f"yolo_dataset_{timestamp}.zip"
678
+ zip_path = os.path.join(UPLOAD_FOLDER, zip_filename)
679
+
680
+ try:
681
+ with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
682
+ # Add all files from dataset structure
683
+ for root, dirs, files in os.walk(DATASET_FOLDER):
684
+ for file in files:
685
+ file_path = os.path.join(root, file)
686
+ arcname = os.path.relpath(file_path, DATASET_FOLDER)
687
+ zipf.write(file_path, arcname)
688
+
689
+ return send_file(zip_path, as_attachment=True, download_name=zip_filename)
690
+
691
+ except Exception as e:
692
+ return jsonify({"success": False, "message": f"Error creating zip: {str(e)}"}), 500
693
+
694
+ finally:
695
+ # Clean up temporary file
696
+ try:
697
+ if os.path.exists(zip_path):
698
+ os.remove(zip_path)
699
+ except:
700
+ pass
701
+
702
+ @app.route('/display_stitched', methods=['POST'])
703
+ def display_stitched():
704
+ """Display stitched images"""
705
+ try:
706
+ showAnnotatedStitched()
707
+ return jsonify({'display_stitched': 'OK'})
708
+ except Exception as e:
709
+ return jsonify({'error': str(e)}), 500
710
+
711
+ if __name__ == '__main__':
712
+ print()
713
+ print(f"UPLOAD FOLDER: {UPLOAD_FOLDER}")
714
+ print(f"DATASET FOLDER: {DATASET_FOLDER}")
715
+ print(f"Starting Enhanced Image Recognition Server...")
716
+ print(f"Web interface available at: http://{HOST}:{PORT}")
717
+
718
+ try:
719
+ app.run(host=HOST, port=PORT, debug=False)
720
+ except:
721
+ print('Unable to connect to configured host and port. Switching to localhost:4000.')
722
+ app.run(host='0.0.0.0', port=4000, debug=True)
http_client_ir.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import PC_CONFIG
3
+
4
+ class ImageRecognitionClient:
5
+ def __init__(self, host, port):
6
+ self.server_url = f"http://{host}:{port}"
7
+
8
+ def check_status(self):
9
+ """Check the status of the server."""
10
+ response = requests.get(f"{self.server_url}/status")
11
+ if response.status_code == 200:
12
+ return response.json()
13
+ else:
14
+ print(f"Error checking status: {response.status_code}")
15
+ return None
16
+
17
+ def send_file(self, file_path, direction, task_type):
18
+ with open(file_path, 'rb') as f:
19
+ files = {'file': f}
20
+ data = {'direction': direction, 'task_type': task_type}
21
+ response = requests.post(f"{self.server_url}/upload", files=files, data=data)
22
+ if response.status_code == 200:
23
+ print("File sent to the server successfully.")
24
+ print(response.json())
25
+ return response.json()
26
+ else:
27
+ print("Failed to send file to the server.")
28
+ print(response.text)
29
+ return None
30
+
31
+ def display_stitched(self):
32
+ response = requests.post(f"{self.server_url}/display_stitched")
33
+ if response.status_code == 200:
34
+ print("Stitched request sent to the server successfully.")
35
+ print(response.json())
36
+ return response.json()
37
+ else:
38
+ print("Failed to send stitched request to the server.")
39
+ print(response.text)
40
+ return None
41
+
42
+ if __name__ == "__main__":
43
+ host = '192.168.100.199'
44
+ port = '4000'
45
+ client = ImageRecognitionClient(host, port)
46
+ #client.check_status()
47
+ client.send_file('/Users/xot/Documents/UniversityWork/Y4S1/MDP/ImgRecAaron/data/image-rec/sample_images/IMG_9606.jpg', 'north', 'TASK_1') # Adjust the file path and direction as needed
48
+ #client.display_stitched()
http_server_ir_task1.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import os
3
+ import importlib.util
4
+ from datetime import datetime
5
+ from predict_task1 import Predictor
6
+ from id_mapping import mapping
7
+ #from show_annotation import start_annotation_process
8
+ #from multiprocessing import Process, Queue
9
+ #import cv2
10
+ from show_stitched import *
11
+
12
+ app = Flask(__name__)
13
+
14
+ config_dir = os.path.abspath(os.path.dirname(__file__))
15
+ config_path = os.path.join(config_dir, 'PC_CONFIG.py')
16
+ spec = importlib.util.spec_from_file_location("PC_CONFIG", config_path)
17
+ PC_CONFIG = importlib.util.module_from_spec(spec)
18
+ spec.loader.exec_module(PC_CONFIG)
19
+
20
+ HOST = PC_CONFIG.HOST
21
+ PORT = PC_CONFIG.IMAGE_REC_PORT
22
+ UPLOAD_FOLDER = os.path.join(PC_CONFIG.FILE_DIRECTORY,"image-rec","images")
23
+ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
24
+
25
+ def process_file(file_path, direction, task_type):
26
+ predictor = Predictor()
27
+ print("File received and saved successfully.")
28
+ print(f"Direction received: {direction}")
29
+ print(f"Task type received: {task_type}")
30
+
31
+ startTime = datetime.now()
32
+ class_name, results, detection_id = predictor.predict_id(file_path, task_type) # Perform prediction
33
+ #show_annotation_queue.put((file_path, results, detection_id))
34
+ class_id = str(mapping.get(class_name, -1))
35
+ endTime = datetime.now()
36
+ totalTime = (endTime - startTime).total_seconds()
37
+ print(f"Predicted ID: {class_id}")
38
+ print(f"Time taken for Predicting Image = {totalTime} s")
39
+ return class_id
40
+
41
+ @app.route('/status', methods=['GET'])
42
+ def server_status():
43
+ return jsonify({'status': 'OK'})
44
+
45
+ @app.route('/upload', methods=['POST'])
46
+ def upload_file():
47
+ if 'file' not in request.files:
48
+ return jsonify({'error': 'No file part'}), 400
49
+ file = request.files['file']
50
+ direction = request.form['direction']
51
+ task_type = request.form['task_type']
52
+ if file.filename == '':
53
+ return jsonify({'error': 'No selected file'}), 400
54
+ if file:
55
+ filename = os.path.basename(file.filename)
56
+ file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
57
+ file.save(file_path)
58
+ # image = cv2.imread(file_path)
59
+ # cv2.imshow("Uploaded Image", image)
60
+ # cv2.waitKey(0) # Wait until a key is pressed
61
+ # cv2.destroyAllWindows() # Close the window
62
+
63
+ # Process the file and predict
64
+ class_id = process_file(file_path, direction, task_type)
65
+ return jsonify({'message': 'File successfully uploaded', 'predicted_id': class_id}), 200
66
+
67
+ @app.route('/display_stitched', methods=['POST'])
68
+ def display_stitched():
69
+ showAnnotatedStitched()
70
+ return jsonify({'display_stitched': 'OK'})
71
+
72
+ if __name__ == '__main__':
73
+ # show_annotation_queue = Queue()
74
+ # process = Process(target=start_annotation_process, args=(show_annotation_queue,))
75
+ # process.start()
76
+
77
+ print()
78
+ print(f"UPLOAD FOLDER: {UPLOAD_FOLDER}")
79
+ # Port 5000 if free
80
+ '''
81
+ try:
82
+ app.run(host=HOST, port=PORT, debug=False)
83
+ except:
84
+ print('Unable to Connect to PC_CONFIG Host and Port. Switching to 0.0.0.0:4000.')
85
+ app.run(host='0.0.0.0', port=4000, debug=True)
86
+ '''
87
+
88
+ # Run on Port 4000
89
+ app.run(host='0.0.0.0', port=4000, debug=True)
90
+
91
+ #process.join()
http_server_ir_task2.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import os
3
+ import importlib.util
4
+ from datetime import datetime
5
+ from predict_task2 import Predictor
6
+ from id_mapping import mapping
7
+ #from show_annotation import start_annotation_process
8
+ #from multiprocessing import Process, Queue
9
+ #import cv2
10
+ from show_stitched import *
11
+
12
+ app = Flask(__name__)
13
+
14
+ config_dir = os.path.abspath(os.path.dirname(__file__))
15
+ config_path = os.path.join(config_dir, 'PC_CONFIG.py')
16
+ spec = importlib.util.spec_from_file_location("PC_CONFIG", config_path)
17
+ PC_CONFIG = importlib.util.module_from_spec(spec)
18
+ spec.loader.exec_module(PC_CONFIG)
19
+
20
+ HOST = PC_CONFIG.HOST
21
+ PORT = PC_CONFIG.IMAGE_REC_PORT
22
+ UPLOAD_FOLDER = os.path.join(PC_CONFIG.FILE_DIRECTORY,"image-rec","images")
23
+ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
24
+
25
+ def process_file(file_path, direction, task_type):
26
+ predictor = Predictor()
27
+ print("File received and saved successfully.")
28
+ print(f"Direction received: {direction}")
29
+ print(f"Task type received: {task_type}")
30
+
31
+ startTime = datetime.now()
32
+ class_name, results, detection_id = predictor.predict_id(file_path, task_type) # Perform prediction
33
+ #show_annotation_queue.put((file_path, results, detection_id))
34
+ class_id = str(mapping.get(class_name, -1))
35
+ endTime = datetime.now()
36
+ totalTime = (endTime - startTime).total_seconds()
37
+ print(f"Predicted ID: {class_id}")
38
+ print(f"Time taken for Predicting Image = {totalTime} s")
39
+ return class_id
40
+
41
+ @app.route('/status', methods=['GET'])
42
+ def server_status():
43
+ return jsonify({'status': 'OK'})
44
+
45
+ @app.route('/upload', methods=['POST'])
46
+ def upload_file():
47
+ if 'file' not in request.files:
48
+ return jsonify({'error': 'No file part'}), 400
49
+ file = request.files['file']
50
+ direction = request.form['direction']
51
+ task_type = request.form['task_type']
52
+ if file.filename == '':
53
+ return jsonify({'error': 'No selected file'}), 400
54
+ if file:
55
+ filename = os.path.basename(file.filename)
56
+ file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
57
+ file.save(file_path)
58
+ # image = cv2.imread(file_path)
59
+ # cv2.imshow("Uploaded Image", image)
60
+ # cv2.waitKey(0) # Wait until a key is pressed
61
+ # cv2.destroyAllWindows() # Close the window
62
+
63
+ # Process the file and predict
64
+ class_id = process_file(file_path, direction, task_type)
65
+ return jsonify({'message': 'File successfully uploaded', 'predicted_id': class_id}), 200
66
+
67
+ @app.route('/display_stitched', methods=['POST'])
68
+ def display_stitched():
69
+ showAnnotatedStitched()
70
+ return jsonify({'display_stitched': 'OK'})
71
+
72
+ if __name__ == '__main__':
73
+ # show_annotation_queue = Queue()
74
+ # process = Process(target=start_annotation_process, args=(show_annotation_queue,))
75
+ # process.start()
76
+
77
+ print()
78
+ print(f"UPLOAD FOLDER: {UPLOAD_FOLDER}")
79
+ # Port 5000 if free
80
+ '''
81
+ try:
82
+ app.run(host=HOST, port=PORT, debug=False)
83
+ except:
84
+ print('Unable to Connect to PC_CONFIG Host and Port. Switching to 0.0.0.0:4000.')
85
+ app.run(host='0.0.0.0', port=4000, debug=True)
86
+ '''
87
+
88
+ # Run on Port 4000
89
+ app.run(host='0.0.0.0', port=4000, debug=True)
90
+
91
+ #process.join()
id_mapping.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mapping = {
2
+ "one": 11,
3
+ "two": 12,
4
+ "three": 13,
5
+ "four": 14,
6
+ "five": 15,
7
+ "six": 16,
8
+ "seven": 17,
9
+ "eight": 18,
10
+ "nine": 19,
11
+ "A": 20,
12
+ "B": 21,
13
+ "C": 22,
14
+ "D": 23,
15
+ "E": 24,
16
+ "F": 25,
17
+ "G": 26,
18
+ "H": 27,
19
+ "S": 28,
20
+ "T": 29,
21
+ "U": 30,
22
+ "V": 31,
23
+ "W": 32,
24
+ "X": 33,
25
+ "Y": 34,
26
+ "Z": 35,
27
+ "up": 36,
28
+ "down": 37,
29
+ "right": 38,
30
+ "left": 39,
31
+ "circle": 40,
32
+ "bullseye": -1,
33
+ None: -1
34
+ }
predict_task1.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import time
4
+ import importlib.util
5
+ import supervision as sv
6
+ from ultralytics import YOLO
7
+
8
+ config_dir = os.path.abspath(os.path.dirname(__file__))
9
+ config_path = os.path.join(config_dir, 'PC_CONFIG.py')
10
+ spec = importlib.util.spec_from_file_location("PC_CONFIG", config_path)
11
+ PC_CONFIG = importlib.util.module_from_spec(spec)
12
+ spec.loader.exec_module(PC_CONFIG)
13
+ dir = str(os.path.join(PC_CONFIG.BASE_DIR, "weights", "best_task1_E256E.pt"))
14
+
15
+ class Predictor:
16
+ def __init__(self):
17
+ # Load a pre-trained yolov8n model
18
+ print("dir:",dir)
19
+ self.model = YOLO(dir) # replace model here
20
+ # self.print_class_ids() # Print class IDs upon initialization
21
+
22
+ # def print_class_ids(self):
23
+ # # Print all class names and their corresponding IDs
24
+ # for id, name in enumerate(self.model.names):
25
+ # print(f"ID: {id}, Name: {name}")
26
+
27
+ # def predict_id(self, image_file_path, task_type):
28
+ # # Load the image
29
+ # image = cv2.imread(image_file_path)
30
+
31
+ # # Run inference on the image
32
+ # results = self.model(image)
33
+
34
+ # # Print results
35
+ # print(results)
36
+ # # Show annotation
37
+ # self.show_annotation(image, results)
38
+
39
+ # # Extract class name
40
+ # class_name, largest_size, detection_id = None, -1, None
41
+ # for result in results: # Assuming 'results' is a list
42
+ # print(f"task_type is {task_type}")
43
+
44
+ # if task_type == "TASK_2":
45
+ # for prediction in result.predictions:
46
+ # print(prediction)
47
+ # class_name = prediction.class_name
48
+ # detection_id = prediction.detection_id
49
+ # if class_name != "Bullseye":
50
+ # break
51
+ # else:
52
+ # for prediction in result.predictions:
53
+ # print(prediction)
54
+ # if largest_size == -1 or max(prediction.width, prediction.height) > largest_size:
55
+ # largest_size = max(prediction.width, prediction.height)
56
+ # class_name = prediction.class_name
57
+ # detection_id = prediction.detection_id
58
+
59
+ # if class_name:
60
+ # print("class_name = " + class_name)
61
+ # else:
62
+ # print("class_name = None")
63
+
64
+ # return class_name, results, detection_id
65
+
66
+ def predict_id(self, image_file_path, task_type):
67
+ # Load the image
68
+ image = cv2.imread(image_file_path)
69
+ # Validation for image existence
70
+ if image is None:
71
+ print(f"Error: Could not read image at {image_file_path}")
72
+ return None, None, None
73
+
74
+ # Check the image size and resize if necessary
75
+ if image.shape[0] != 640 or image.shape[1] != 640:
76
+ image = cv2.resize(image, (640, 640)) # Resize to 640x640
77
+
78
+ # Run inference on the image
79
+ results = self.model(image) # Directly pass the image
80
+
81
+ # Print results
82
+ print(results)
83
+
84
+ # Show annotation (using YOLOv8's plotting capabilities)
85
+ # results[0].show()
86
+
87
+ # Extract class name, largest size, and detection ID
88
+ class_name, largest_size, detection_id = None, -1, 0
89
+ boxes = results[0].boxes.xyxy # Get bounding boxes (x1, y1, x2, y2)
90
+ scores = results[0].boxes.conf # Get confidence scores
91
+ class_ids = results[0].boxes.cls # Get class IDs
92
+
93
+ for i in range(len(boxes)):
94
+ print(f"task_type is {task_type}")
95
+
96
+ if task_type == "TASK_2":
97
+ if int(class_ids[i]) != 16: # Replace with the ID for "Bulls Eye"
98
+ class_name = results[0].names[int(class_ids[i])] # Get class name
99
+ detection_id = i
100
+ break
101
+ else:
102
+ print("Bullseye detected")
103
+ class_name = "bullseye"
104
+ detection_id = i
105
+ else:
106
+ # Determine the largest bounding box
107
+ box_width = boxes[i][2] - boxes[i][0]
108
+ box_height = boxes[i][3] - boxes[i][1]
109
+ size = max(box_width, box_height)
110
+ detection_id = i
111
+
112
+ if largest_size == -1 or size > largest_size:
113
+ largest_size = size
114
+ class_name = results[0].names[int(class_ids[i])]
115
+ detection_id = i
116
+
117
+ if class_name:
118
+ print("class_name = " + class_name)
119
+ timestamp = int(time.time())
120
+ # Save the annotated image
121
+ try:
122
+ results[detection_id].save(f'../data/annotated_images/{class_name}_{timestamp}.jpg')
123
+ except:
124
+ print("error in saving photo!")
125
+ else:
126
+ print("class_name = None")
127
+
128
+ return class_name, results, detection_id
129
+
130
+
131
+ # def show_annotation(self, image, results):
132
+ # # Create supervision annotators
133
+ # bounding_box_annotator = sv.BoundingBoxAnnotator()
134
+ # label_annotator = sv.LabelAnnotator()
135
+
136
+ # # Process results from YOLOv8
137
+ # detections = []
138
+ # for result in results:
139
+ # for detection in result.boxes.data: # Accessing YOLOv8's box data
140
+ # class_id = int(detection[5]) # Class ID
141
+ # x1, y1, x2, y2 = map(int, detection[:4]) # Bounding box coordinates
142
+ # score = float(detection[4]) # Confidence score
143
+
144
+ # # Add to detections
145
+ # detections.append({
146
+ # "bbox": [x1, y1, x2, y2],
147
+ # "confidence": score,
148
+ # "class_id": class_id
149
+ # })
150
+
151
+ # # Convert detections to the expected format for supervision
152
+ # if detections:
153
+ # detections = sv.Detections(
154
+ # xyxy=[d["bbox"] for d in detections],
155
+ # confidence=[d["confidence"] for d in detections],
156
+ # class_id=[d["class_id"] for d in detections]
157
+ # )
158
+
159
+ # # Annotate the image with inference results
160
+ # annotated_image = bounding_box_annotator.annotate(scene=image, detections=detections)
161
+ # annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)
162
+
163
+ # # Display the annotated image
164
+ # try:
165
+ # cv2.imshow("Annotated Image", annotated_image)
166
+ # cv2.waitKey(0) # Wait indefinitely until a key is pressed
167
+ # except Exception as e:
168
+ # print(f"Error displaying image: {e}")
169
+ # finally:
170
+ # cv2.destroyAllWindows() # Close all OpenCV windows
171
+ # else:
172
+ # print("No detections found.")
173
+
174
+
175
+ if __name__ == "__main__":
176
+ # Example usage
177
+ predictor = Predictor()
178
+ # Specify the path to your image
179
+ image_file_path = os.path.join(PC_CONFIG.FILE_DIRECTORY, "image-rec", "sample_images", "IMG_9325.jpg")
180
+ # Predict and display the class name
181
+ predictor.predict_id(image_file_path, "TASK_1")
predict_task2.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import os
3
+ import time
4
+ import importlib.util
5
+ import supervision as sv
6
+ from ultralytics import YOLO
7
+
8
+ config_dir = os.path.abspath(os.path.dirname(__file__))
9
+ config_path = os.path.join(config_dir, 'PC_CONFIG.py')
10
+ spec = importlib.util.spec_from_file_location("PC_CONFIG", config_path)
11
+ PC_CONFIG = importlib.util.module_from_spec(spec)
12
+ spec.loader.exec_module(PC_CONFIG)
13
+ dir = str(os.path.join(PC_CONFIG.BASE_DIR, "weights", "best_task2.pt"))
14
+
15
+ class Predictor:
16
+ def __init__(self):
17
+ # Load a pre-trained yolov8n model
18
+ print("dir:",dir)
19
+ self.model = YOLO(dir) # replace model here
20
+ # self.print_class_ids() # Print class IDs upon initialization
21
+
22
+ # def print_class_ids(self):
23
+ # # Print all class names and their corresponding IDs
24
+ # for id, name in enumerate(self.model.names):
25
+ # print(f"ID: {id}, Name: {name}")
26
+
27
+ # def predict_id(self, image_file_path, task_type):
28
+ # # Load the image
29
+ # image = cv2.imread(image_file_path)
30
+
31
+ # # Run inference on the image
32
+ # results = self.model(image)
33
+
34
+ # # Print results
35
+ # print(results)
36
+ # # Show annotation
37
+ # self.show_annotation(image, results)
38
+
39
+ # # Extract class name
40
+ # class_name, largest_size, detection_id = None, -1, None
41
+ # for result in results: # Assuming 'results' is a list
42
+ # print(f"task_type is {task_type}")
43
+
44
+ # if task_type == "TASK_2":
45
+ # for prediction in result.predictions:
46
+ # print(prediction)
47
+ # class_name = prediction.class_name
48
+ # detection_id = prediction.detection_id
49
+ # if class_name != "Bullseye":
50
+ # break
51
+ # else:
52
+ # for prediction in result.predictions:
53
+ # print(prediction)
54
+ # if largest_size == -1 or max(prediction.width, prediction.height) > largest_size:
55
+ # largest_size = max(prediction.width, prediction.height)
56
+ # class_name = prediction.class_name
57
+ # detection_id = prediction.detection_id
58
+
59
+ # if class_name:
60
+ # print("class_name = " + class_name)
61
+ # else:
62
+ # print("class_name = None")
63
+
64
+ # return class_name, results, detection_id
65
+
66
+ def predict_id(self, image_file_path, task_type):
67
+ # Load the image
68
+ image = cv2.imread(image_file_path)
69
+ # Validation for image existence
70
+ if image is None:
71
+ print(f"Error: Could not read image at {image_file_path}")
72
+ return None, None, None
73
+
74
+ # Check the image size and resize if necessary
75
+ if image.shape[0] != 640 or image.shape[1] != 640:
76
+ image = cv2.resize(image, (640, 640)) # Resize to 640x640
77
+
78
+ # Run inference on the image
79
+ results = self.model(image) # Directly pass the image
80
+
81
+ # Print results
82
+ print(results)
83
+
84
+ # Show annotation (using YOLOv8's plotting capabilities)
85
+ # results[0].show()
86
+
87
+ # Extract class name, largest size, and detection ID
88
+ class_name, largest_size, detection_id = None, -1, 0
89
+ boxes = results[0].boxes.xyxy # Get bounding boxes (x1, y1, x2, y2)
90
+ scores = results[0].boxes.conf # Get confidence scores
91
+ class_ids = results[0].boxes.cls # Get class IDs
92
+
93
+ for i in range(len(boxes)):
94
+ print(f"task_type is {task_type}")
95
+
96
+ if task_type == "TASK_2":
97
+ if int(class_ids[i]) != 16: # Replace with the ID for "Bulls Eye"
98
+ class_name = results[0].names[int(class_ids[i])] # Get class name
99
+ detection_id = i
100
+ break
101
+ else:
102
+ print("Bullseye detected")
103
+ class_name = "bullseye"
104
+ detection_id = i
105
+ else:
106
+ # Determine the largest bounding box
107
+ box_width = boxes[i][2] - boxes[i][0]
108
+ box_height = boxes[i][3] - boxes[i][1]
109
+ size = max(box_width, box_height)
110
+ detection_id = i
111
+
112
+ if largest_size == -1 or size > largest_size:
113
+ largest_size = size
114
+ class_name = results[0].names[int(class_ids[i])]
115
+ detection_id = i
116
+
117
+ if class_name:
118
+ print("class_name = " + class_name)
119
+ timestamp = int(time.time())
120
+ # Save the annotated image
121
+ try:
122
+ results[detection_id].save(f'../data/annotated_images/{class_name}_{timestamp}.jpg')
123
+ except:
124
+ print("error in saving photo!")
125
+ else:
126
+ print("class_name = None")
127
+
128
+ return class_name, results, detection_id
129
+
130
+
131
+ # def show_annotation(self, image, results):
132
+ # # Create supervision annotators
133
+ # bounding_box_annotator = sv.BoundingBoxAnnotator()
134
+ # label_annotator = sv.LabelAnnotator()
135
+
136
+ # # Process results from YOLOv8
137
+ # detections = []
138
+ # for result in results:
139
+ # for detection in result.boxes.data: # Accessing YOLOv8's box data
140
+ # class_id = int(detection[5]) # Class ID
141
+ # x1, y1, x2, y2 = map(int, detection[:4]) # Bounding box coordinates
142
+ # score = float(detection[4]) # Confidence score
143
+
144
+ # # Add to detections
145
+ # detections.append({
146
+ # "bbox": [x1, y1, x2, y2],
147
+ # "confidence": score,
148
+ # "class_id": class_id
149
+ # })
150
+
151
+ # # Convert detections to the expected format for supervision
152
+ # if detections:
153
+ # detections = sv.Detections(
154
+ # xyxy=[d["bbox"] for d in detections],
155
+ # confidence=[d["confidence"] for d in detections],
156
+ # class_id=[d["class_id"] for d in detections]
157
+ # )
158
+
159
+ # # Annotate the image with inference results
160
+ # annotated_image = bounding_box_annotator.annotate(scene=image, detections=detections)
161
+ # annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections)
162
+
163
+ # # Display the annotated image
164
+ # try:
165
+ # cv2.imshow("Annotated Image", annotated_image)
166
+ # cv2.waitKey(0) # Wait indefinitely until a key is pressed
167
+ # except Exception as e:
168
+ # print(f"Error displaying image: {e}")
169
+ # finally:
170
+ # cv2.destroyAllWindows() # Close all OpenCV windows
171
+ # else:
172
+ # print("No detections found.")
173
+
174
+
175
+ if __name__ == "__main__":
176
+ # Example usage
177
+ predictor = Predictor()
178
+ # Specify the path to your image
179
+ image_file_path = os.path.join(PC_CONFIG.FILE_DIRECTORY, "image-rec", "sample_images", "IMG_9325.jpg")
180
+ # Predict and display the class name
181
+ predictor.predict_id(image_file_path, "TASK_1")
requirements.txt CHANGED
@@ -1,17 +1,51 @@
1
- ipython
2
- matplotlib>=3.2.2
3
- numpy>=1.18.5
4
- opencv-python>=4.1.2
5
- Pillow>=7.1.2
6
- PyYAML>=5.3.1
7
- requests>=2.23.0
8
- scipy>=1.4.1
9
- torch>=1.7.0,<2.6.0
10
- torchvision>=0.8.1
11
- tqdm>=4.41.0
12
- tensorboard>=2.4.1
13
- pandas>=1.1.4
14
- seaborn>=0.11.0
15
- imutils~=0.5.4
16
- Flask
17
- flask_cors
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ blinker==1.8.2
2
+ certifi==2024.8.30
3
+ charset-normalizer==3.3.2
4
+ click==8.1.7
5
+ contourpy==1.1.1
6
+ cycler==0.12.1
7
+ defusedxml==0.7.1
8
+ filelock==3.16.1
9
+ flask==3.0.3
10
+ fonttools==4.53.1
11
+ fsspec==2024.9.0
12
+ idna==3.10
13
+ importlib-metadata==8.5.0
14
+ importlib-resources==6.4.5
15
+ itsdangerous==2.2.0
16
+ jinja2==3.1.4
17
+ kiwisolver==1.4.7
18
+ MarkupSafe==2.1.5
19
+ matplotlib==3.7.5
20
+ mpmath==1.3.0
21
+ networkx==3.1
22
+ numpy==1.23.5
23
+ opencv-python==4.10.0.84
24
+ opencv-python-headless==4.10.0.84
25
+ packaging==24.1
26
+ pandas==2.0.3
27
+ pillow==10.4.0
28
+ psutil==6.0.0
29
+ py-cpuinfo==9.0.0
30
+ pyparsing==3.1.4
31
+ python-dateutil==2.9.0.post0
32
+ pytz==2024.2
33
+ PyYAML==6.0.2
34
+ requests==2.32.3
35
+ scipy==1.10.0
36
+ seaborn==0.13.2
37
+ six==1.16.0
38
+ supervision==0.23.0
39
+ sympy==1.13.3
40
+ torch==2.2.2
41
+ torchvision==0.17.2
42
+ tqdm==4.66.5
43
+ typing-extensions==4.12.2
44
+ tzdata==2024.1
45
+ ultralytics==8.2.97
46
+ ultralytics-thop==2.0.6
47
+ urllib3==2.2.3
48
+ werkzeug==3.0.4
49
+ zipp==3.20.2
50
+ flask-cors==4.0.0
51
+ supervision==0.21.0
requirements_enhanced.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Enhanced Image Recognition Server Requirements
2
+ # Additional dependencies for the enhanced server functionality
3
+
4
+ flask-cors==4.0.0
5
+ supervision==0.21.0
show_annotation.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import supervision as sv
3
+ import time
4
+ import os
5
+ import importlib.util
6
+ from id_mapping import mapping
7
+ from multiprocessing import Process, Queue
8
+ from predict import Predictor
9
+ from show_stitched import showAnnotatedStitched
10
+ import shutil
11
+ from datetime import datetime
12
+
13
+
14
+ config_dir = os.path.abspath(os.path.dirname(__file__))
15
+ config_path = os.path.join(config_dir, 'PC_CONFIG.py')
16
+ spec = importlib.util.spec_from_file_location("PC_CONFIG", config_path)
17
+ PC_CONFIG = importlib.util.module_from_spec(spec)
18
+ spec.loader.exec_module(PC_CONFIG)
19
+
20
+
21
+ def start_annotation_process(queue):
22
+ image_count = 0
23
+ file_dir = os.path.join(PC_CONFIG.FILE_DIRECTORY,"image-rec","annotated_images")
24
+
25
+ archive_directory_content(file_dir)
26
+
27
+ while True:
28
+ if not queue.empty():
29
+
30
+ item = queue.get() # Wait for an item from the queue
31
+ if item[0] == "STOP": # Check for the termination signal
32
+ print("Stopping annotation process.")
33
+ break # Exit the loop to end the process
34
+ print(f"item: {item}")
35
+ image_file_path, results, detection_id = item
36
+ image = cv2.imread(image_file_path)
37
+ image_count += 1
38
+ show_annotation(image, results, detection_id, image_count)
39
+
40
+ showAnnotatedStitched()
41
+
42
+ def archive_directory_content(directory_path):
43
+ # Moves files with the specified extension from the given directory to an archive directory,
44
+ # except for files named .gitkeep.
45
+
46
+ archive_dir= os.path.join(PC_CONFIG.FILE_DIRECTORY,"image-rec","annotated_archive")
47
+ file_extension=".jpg"
48
+ os.makedirs(archive_dir, exist_ok=True) # Create the archive directory if it doesn't exist
49
+
50
+ for filename in os.listdir(directory_path):
51
+ if filename.endswith(file_extension) and filename != ".gitkeep":
52
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
53
+ new_filename = f"{filename.rsplit('.', 1)[0]}_{timestamp}.{filename.rsplit('.', 1)[1]}"
54
+ src_path = os.path.join(directory_path, filename)
55
+ dest_path = os.path.join(archive_dir, new_filename)
56
+ shutil.move(src_path, dest_path)
57
+ print(f"Moved and renamed: {src_path} to {dest_path}")
58
+
59
+ def show_annotation(image, results, detection_id, image_count):
60
+ print(f"results[0]: {results[0]}")
61
+ predictions = results[0].predictions
62
+ filtered_predictions = [pred for pred in predictions if pred.detection_id == detection_id]
63
+
64
+ if not filtered_predictions:
65
+ print("No matching detection_id found.")
66
+
67
+ # Update the predictions list with only the filtered predictions
68
+ results[0].predictions = filtered_predictions
69
+ # Load the results into the supervision Detections API
70
+ detections = sv.Detections.from_inference(results[0].dict(by_alias=True, exclude_none=True))
71
+ print(f"detections : {detections}")
72
+
73
+ class_name = "None"
74
+ annotated_image= image
75
+
76
+ if detections and detections.data and detections.data["class_name"]:
77
+ class_name = detections.data["class_name"][0]
78
+
79
+ class_id = str(mapping.get(class_name, -1))
80
+ updated_label = class_name + ", id=" + class_id
81
+ # updated_label = class_name + ", " + class_id
82
+
83
+ # Create supervision annotators
84
+ bounding_box_annotator = sv.BoundingBoxAnnotator()
85
+ label_annotator = sv.LabelAnnotator()
86
+
87
+ # Annotate the image with inference results
88
+ annotated_image = bounding_box_annotator.annotate(scene=image, detections=detections)
89
+ annotated_image = label_annotator.annotate(scene=annotated_image, detections=detections, labels=[updated_label])
90
+
91
+ # Save the annotated image with a unique name
92
+ file_name = f"annotated_image{image_count}.jpg"
93
+ file_path = os.path.join(PC_CONFIG.FILE_DIRECTORY,"image-rec","annotated_images",file_name)
94
+
95
+ cv2.imwrite(file_path, annotated_image)
96
+ print(f"Image saved as {file_path}")
97
+
98
+ # Create a named window
99
+ window_name = f"Annotated Image {image_count}"
100
+ cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
101
+
102
+ # Move the window to a specific position on the screen
103
+ cv2.moveWindow(window_name, 0, 0)
104
+
105
+ # Display the saved annotated image
106
+ cv2.imshow(f"Annotated Image {image_count}", annotated_image)
107
+ cv2.waitKey(3000) # Display the image for 3 seconds
108
+ cv2.destroyAllWindows() # Close all OpenCV windows
109
+
110
+
111
+ if __name__ == '__main__':
112
+ image_folder_path = os.path.join(PC_CONFIG.FILE_DIRECTORY, "image-rec", "sample_images")
113
+ show_annotation_queue = Queue()
114
+ process = Process(target=start_annotation_process, args=(show_annotation_queue,))
115
+ process.start()
116
+
117
+ predictor = Predictor()
118
+
119
+ # List all .jpg files in the specified directory
120
+ file_path_list = [os.path.join(image_folder_path, f) for f in os.listdir(image_folder_path) if f.endswith('.jpg')]
121
+
122
+ for file_path in file_path_list:
123
+ class_name, results, detection_id = predictor.predict_id(file_path) # Perform prediction
124
+
125
+ print(f"result: {results}")
126
+ show_annotation_queue.put((file_path, results, detection_id))
127
+
128
+ # Send the termination signal to the process
129
+ show_annotation_queue.put(("STOP",))
130
+
131
+ # Wait for the process to finish
132
+ process.join()
show_stitched.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib
2
+ matplotlib.use('TkAgg') # Use the TkAgg backend
3
+ import matplotlib.pyplot as plt
4
+ import matplotlib.image as mpimg
5
+ import os
6
+ import importlib.util
7
+ import math
8
+ from datetime import datetime
9
+ import shutil
10
+
11
+ config_dir = os.path.abspath(os.path.dirname(__file__))
12
+ config_path = os.path.join(config_dir, 'PC_CONFIG.py')
13
+ spec = importlib.util.spec_from_file_location("PC_CONFIG", config_path)
14
+ PC_CONFIG = importlib.util.module_from_spec(spec)
15
+ spec.loader.exec_module(PC_CONFIG)
16
+
17
+ def showAnnotatedStitched():
18
+ image_dir = os.path.join(PC_CONFIG.FILE_DIRECTORY,"image-rec","annotated_images")
19
+ image_files = [os.path.join(image_dir, filename) for filename in os.listdir(image_dir) if filename.endswith(".jpg")]
20
+
21
+ # Adjust the layout to 2 by 4
22
+ num_cols = 4 # Number of columns
23
+ num_rows = 2 # Number of rows
24
+
25
+ # Ensure there is at least one row if there are images
26
+ num_rows = max(1, math.ceil(len(image_files) / num_cols)) if image_files else 1
27
+
28
+ fig, axes = plt.subplots(num_rows, num_cols, figsize=(num_cols * 3, num_rows * 3), dpi=150)
29
+
30
+ # Check if there are any images to display
31
+ if image_files:
32
+ for i, ax in enumerate(axes.flat):
33
+ if i < len(image_files):
34
+ img = mpimg.imread(image_files[i])
35
+ ax.imshow(img)
36
+ ax.axis("off")
37
+ else:
38
+ ax.axis("off")
39
+ else:
40
+ # If no images, just hide all axes as there's nothing to display
41
+ for ax in axes.flat:
42
+ ax.axis("off")
43
+
44
+ plt.subplots_adjust(wspace=0, hspace=0)
45
+ plt.tight_layout(pad=0, h_pad=0, w_pad=0)
46
+
47
+ stitched_image_dir = os.path.join(PC_CONFIG.FILE_DIRECTORY, "image-rec", "stitched_images")
48
+ stitched_file_name = generate_filename_with_timestamp()
49
+ save_path = os.path.join(stitched_image_dir, stitched_file_name)
50
+ plt.savefig(save_path, bbox_inches='tight', pad_inches=0)
51
+ print(f"Figure saved to {save_path}")
52
+
53
+ manager = plt.get_current_fig_manager()
54
+ manager.window.wm_geometry("+0+0") # Move the window to position (0, 0)
55
+
56
+ # plt.show()
57
+ archive_directory_content(image_dir)
58
+
59
+ def generate_filename_with_timestamp(prefix="stitched_image", extension=".jpg"):
60
+ # Get the current date and time
61
+ now = datetime.now()
62
+ # Format the date and time as a string
63
+ timestamp = now.strftime("%Y-%m-%d_%H-%M-%S")
64
+ # Combine prefix, timestamp, and extension to form the filename
65
+ filename = f"{prefix}_{timestamp}{extension}"
66
+ return filename
67
+
68
+ def archive_directory_content(directory_path):
69
+ # Moves files with the specified extension from the given directory to an archive directory,
70
+ # except for files named .gitkeep.
71
+
72
+ archive_dir= os.path.join(PC_CONFIG.FILE_DIRECTORY,"image-rec","annotated_archive")
73
+ file_extension=".jpg"
74
+ os.makedirs(archive_dir, exist_ok=True) # Create the archive directory if it doesn't exist
75
+
76
+ for filename in os.listdir(directory_path):
77
+ if filename.endswith(file_extension) and filename != ".gitkeep":
78
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
79
+ new_filename = f"{filename.rsplit('.', 1)[0]}_{timestamp}.{filename.rsplit('.', 1)[1]}"
80
+ src_path = os.path.join(directory_path, filename)
81
+ dest_path = os.path.join(archive_dir, new_filename)
82
+ shutil.move(src_path, dest_path)
83
+ print(f"Moved and renamed: {src_path} to {dest_path}")
84
+
85
+ if __name__ == "__main__":
86
+ # Example usage
87
+ showAnnotatedStitched()
test_processAlgo.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def map_commands_to_paths(data):
2
+ transformed_commands = data["commands"]
3
+ paths = data["path"]
4
+ mapped_commands = []
5
+
6
+ # Initialize variables
7
+ non_snap_path_index = 0 # Tracks which path we're on
8
+
9
+ segment_start = True # Flag to indicate if we are at the start of a new segment
10
+
11
+ for command in transformed_commands:
12
+ if command in ["FW003", "FW002", "BW002"]:
13
+ # At the start of a segment that requires a path
14
+ if segment_start and non_snap_path_index < len(paths):
15
+ mapped_commands.append((command, [paths[non_snap_path_index]]))
16
+ non_snap_path_index += 1
17
+ else:
18
+ mapped_commands.append((command, None))
19
+ segment_start = False # Not at the start anymore
20
+ elif command in ["FL090", "FR090", "BL090", "BR090"]:
21
+ # These commands are continuation of a segment, no path assigned
22
+ mapped_commands.append((command, None))
23
+ segment_start = False # Still not the start of a new segment
24
+ elif command.startswith("SNAP") or command == "FIN":
25
+ # SNAP and FIN are treated as their own segments but don't have paths
26
+ mapped_commands.append((command, None))
27
+ segment_start = True # Next command could be the start of a new segment
28
+ else:
29
+ # For any command not explicitly handled above, assign the next path if available
30
+ if non_snap_path_index < len(paths):
31
+ mapped_commands.append((command, [paths[non_snap_path_index]]))
32
+ non_snap_path_index += 1
33
+ else:
34
+ mapped_commands.append((command, []))
35
+ segment_start = True # Next command could be the start of a new segment
36
+
37
+ return mapped_commands
38
+
39
+ if __name__ == "__main__":
40
+ # Provided input data
41
+
42
+ data = {
43
+ "commands": [
44
+ "FW090",
45
+ "FW003",
46
+ "BR090",
47
+ "BW002",
48
+ "BW020",
49
+ "FW003",
50
+ "BR090",
51
+ "BW002",
52
+ "SNAP6_C",
53
+ "BW040",
54
+ "FW003",
55
+ "BL090",
56
+ "BW002",
57
+ "FW050",
58
+ "SNAP1_C",
59
+ "BW090",
60
+ "FW003",
61
+ "BL090",
62
+ "BW002",
63
+ "FW010",
64
+ "SNAP4_C",
65
+ "BW020",
66
+ "FW003",
67
+ "BL090",
68
+ "BW002",
69
+ "SNAP5_C",
70
+ "BW070",
71
+ "FW003",
72
+ "BR090",
73
+ "BW002",
74
+ "SNAP2_C",
75
+ "BW040",
76
+ "FW003",
77
+ "BL090",
78
+ "BW002",
79
+ "FW090",
80
+ "FW010",
81
+ "FW003",
82
+ "FL090",
83
+ "FW090",
84
+ "FW030",
85
+ "FW003",
86
+ "BR090",
87
+ "BW002",
88
+ "FW020",
89
+ "SNAP3_C",
90
+ "FIN"
91
+ ],
92
+ "distance": 184.0,
93
+ "path": [
94
+ {
95
+ "d": 0,
96
+ "s": -1,
97
+ "x": 1,
98
+ "y": 1
99
+ },
100
+ {
101
+ "d": 0,
102
+ "s": -1,
103
+ "x": 1,
104
+ "y": 10
105
+ },
106
+ {
107
+ "d": 6,
108
+ "s": -1,
109
+ "x": 3,
110
+ "y": 7
111
+ },
112
+ {
113
+ "d": 6,
114
+ "s": -1,
115
+ "x": 5,
116
+ "y": 7
117
+ },
118
+ {
119
+ "d": 4,
120
+ "s": 6,
121
+ "x": 8,
122
+ "y": 9
123
+ },
124
+ {
125
+ "d": 4,
126
+ "s": -1,
127
+ "x": 8,
128
+ "y": 13
129
+ },
130
+ {
131
+ "d": 6,
132
+ "s": -1,
133
+ "x": 10,
134
+ "y": 16
135
+ },
136
+ {
137
+ "d": 6,
138
+ "s": 1,
139
+ "x": 5,
140
+ "y": 16
141
+ },
142
+ {
143
+ "d": 6,
144
+ "s": -1,
145
+ "x": 14,
146
+ "y": 16
147
+ },
148
+ {
149
+ "d": 0,
150
+ "s": -1,
151
+ "x": 17,
152
+ "y": 14
153
+ },
154
+ {
155
+ "d": 0,
156
+ "s": 4,
157
+ "x": 17,
158
+ "y": 15
159
+ },
160
+ {
161
+ "d": 0,
162
+ "s": -1,
163
+ "x": 17,
164
+ "y": 13
165
+ },
166
+ {
167
+ "d": 2,
168
+ "s": 5,
169
+ "x": 15,
170
+ "y": 10
171
+ },
172
+ {
173
+ "d": 2,
174
+ "s": -1,
175
+ "x": 8,
176
+ "y": 10
177
+ },
178
+ {
179
+ "d": 0,
180
+ "s": 2,
181
+ "x": 5,
182
+ "y": 8
183
+ },
184
+ {
185
+ "d": 0,
186
+ "s": -1,
187
+ "x": 5,
188
+ "y": 4
189
+ },
190
+ {
191
+ "d": 2,
192
+ "s": -1,
193
+ "x": 3,
194
+ "y": 1
195
+ },
196
+ {
197
+ "d": 2,
198
+ "s": -1,
199
+ "x": 12,
200
+ "y": 1
201
+ },
202
+ {
203
+ "d": 2,
204
+ "s": 6,
205
+ "x": 13,
206
+ "y": 1
207
+ },
208
+ {
209
+ "d": 0,
210
+ "s": -1,
211
+ "x": 15,
212
+ "y": 4
213
+ },
214
+ {
215
+ "d": 0,
216
+ "s": -1,
217
+ "x": 15,
218
+ "y": 13
219
+ },
220
+ {
221
+ "d": 0,
222
+ "s": -1,
223
+ "x": 15,
224
+ "y": 16
225
+ },
226
+ {
227
+ "d": 6,
228
+ "s": -1,
229
+ "x": 17,
230
+ "y": 13
231
+ },
232
+ {
233
+ "d": 6,
234
+ "s": 3,
235
+ "x": 15,
236
+ "y": 13
237
+ }
238
+ ]
239
+ }
240
+ # Correct function call
241
+ command_path_mapping = map_commands_to_paths(data)
242
+
243
+ # Output the mapping for review
244
+ for mapping in command_path_mapping:
245
+ print(mapping)
test_yolocam.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from ultralytics import YOLO
4
+
5
+ def get_camera_index():
6
+ index = 0
7
+ arr = []
8
+ while True:
9
+ cap = cv2.VideoCapture(index)
10
+ if not cap.read()[0]:
11
+ break
12
+ else:
13
+ arr.append(index)
14
+ cap.release()
15
+ index += 1
16
+ return arr
17
+
18
+ # Get available cameras
19
+ camera_indexes = get_camera_index()
20
+ print(f"Available cameras: {camera_indexes}")
21
+
22
+ # Try to use the last available camera (which is often an external or Continuity Camera)
23
+ camera_index = camera_indexes[-1] if camera_indexes else 0
24
+
25
+ # Initialize the webcam
26
+ cap = cv2.VideoCapture(camera_index)
27
+
28
+ # Initialize YOLOv8 model
29
+ model = YOLO('../weights/best_task2.pt')
30
+
31
+ # Set the input size
32
+ INPUT_SIZE = (720, 720)
33
+
34
+ while True:
35
+ # Read a frame from the webcam
36
+ ret, frame = cap.read()
37
+ if not ret:
38
+ break
39
+
40
+ # Resize the frame to 640x640
41
+ resized_frame = cv2.resize(frame, INPUT_SIZE)
42
+
43
+ # Run YOLOv8 inference on the resized frame
44
+ results = model(resized_frame)
45
+
46
+ # Visualize the results on the original frame
47
+ annotated_frame = results[0].plot()
48
+
49
+ # Resize the annotated frame back to the original size for display
50
+ annotated_frame = cv2.resize(annotated_frame, (frame.shape[1], frame.shape[0]))
51
+
52
+ # Display the annotated frame
53
+ cv2.imshow("YOLOv8 Inference", annotated_frame)
54
+
55
+ # Break the loop if 'q' is pressed
56
+ if cv2.waitKey(1) & 0xFF == ord('q'):
57
+ break
58
+
59
+ # Release the webcam and close windows
60
+ cap.release()
61
+ cv2.destroyAllWindows()
weights/best_FullDS150E.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cfa3c6d1fdf43160f08bac9d121c4cde5daf5f8a827920e5dbfaf4d25351842
3
+ size 6248483
weights/best_FullDS200E.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68442001cb9008aaa6c84b9fedc2b5773a437fab739b5221d0e34d138150da70
3
+ size 6248483
weights/best_task1_E128E.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad82a2d814241cee5659c24a1d55bdfc5ad884608f4361019f6c8e36171dd10b
3
+ size 6257763
weights/best_task1_E256E.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:632ed6138802fa71037494ef2aa707b5a9929f57c0743f4529b8f5cf013a5f42
3
+ size 6257763
weights/best_task1_R128E.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3342982d0c13fb05b4cd0cc0c117eb09c93f8056e8c3a011b370f54c5d3b49ca
3
+ size 6257699
weights/best_task2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16085d85fbb216167840cd2b132fce9db0775ccbdf1bd8d107ad8f11f5bfa19e
3
+ size 6229475