Zynaly commited on
Commit
4882220
·
verified ·
1 Parent(s): 41c9e36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +439 -436
app.py CHANGED
@@ -1,437 +1,440 @@
1
- # -*- coding: utf-8 -*-
2
- """app.py
3
- Automated Fire and Accident Detection for CCTV with Freshdesk Ticket Creation
4
- """
5
-
6
- import cv2
7
- import os
8
- import PIL.Image as Image
9
- import gradio as gr
10
- import numpy as np
11
- from ultralytics import YOLO
12
- import requests
13
- import json
14
- from datetime import datetime
15
- import tempfile
16
- import torch
17
- import uuid # Added for unique filenames
18
-
19
- # Freshdesk Configuration
20
- FRESHDESK_DOMAIN = "7kctech-supportdesk.freshdesk.com"
21
- API_KEY = os.getenv("FRESHDESK_API_KEY", "JoJNI8nIY3hWQsk87e") # Fallback for local testing
22
-
23
- # Base URL for Hugging Face Space
24
- BASE_URL = "https://huggingface.co/spaces/Zynaly/Surveillance-Intelligent-Camera/tree/main"
25
-
26
- # Directory for saving images
27
- MEDIA_DIR = "media"
28
- FIRE_DIR = os.path.join(MEDIA_DIR, "fire")
29
- ACCIDENT_DIR = os.path.join(MEDIA_DIR, "accidents")
30
-
31
- # Create directories if they don't exist
32
- os.makedirs(FIRE_DIR, exist_ok=True)
33
- os.makedirs(ACCIDENT_DIR, exist_ok=True)
34
-
35
- # Fixed thresholds for automated detection
36
- FIRE_CONF_THRESHOLD = 0.25
37
- FIRE_IOU_THRESHOLD = 0.45
38
- ACCIDENT_CONF_THRESHOLD = 0.3
39
- ACCIDENT_IOU_THRESHOLD = 0.55
40
-
41
- # Load models with explicit task definition
42
- fire_model = YOLO("fire.pt", task="detect") # Fire detection model
43
- accident_model = YOLO("best.pt", task="detect") # Accident detection model
44
-
45
- # Function to save image and return its URL
46
- def save_image(image, incident_type):
47
- try:
48
- # Generate unique filename
49
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
50
- unique_id = str(uuid.uuid4())[:8]
51
- filename = f"{incident_type}_{timestamp}_{unique_id}.jpg"
52
- save_path = os.path.join(MEDIA_DIR, incident_type, filename)
53
-
54
- # Save the image
55
- image.save(save_path)
56
-
57
- # Construct the URL
58
- url = f"{BASE_URL}/{save_path}"
59
- return url
60
- except Exception as e:
61
- print(f"Error saving image: {str(e)}")
62
- return None
63
-
64
- # Function to create Freshdesk ticket
65
- # Function to create Freshdesk ticket with image attachment
66
- # Function to create Freshdesk ticket with image attachment
67
- def create_freshdesk_ticket(incident_type, confidence_score, img):
68
- # Save the image to the appropriate directory and get its local path
69
- image_url = None
70
- image_path = None
71
- if incident_type.lower() == "fire incident":
72
- image_url = save_image(img, "fire")
73
- image_path = os.path.join(MEDIA_DIR, "fire", os.path.basename(image_url.split("/")[-1]))
74
- elif incident_type.lower() == "accident incident":
75
- image_url = save_image(img, "accidents")
76
- image_path = os.path.join(MEDIA_DIR, "accidents", os.path.basename(image_url.split("/")[-1]))
77
- elif incident_type.lower() == "fire and accident incident":
78
- fire_url = save_image(img, "fire")
79
- accident_url = save_image(img, "accidents")
80
- image_url = fire_url or accident_url
81
- image_path = os.path.join(MEDIA_DIR, "fire" if fire_url else "accidents", os.path.basename(image_url.split("/")[-1]))
82
-
83
- # Shortened subject
84
- subject = f"{incident_type} Detected - Confidence: {confidence_score*100:.1f}%"
85
-
86
- # Detailed description
87
- description = f"""
88
- {incident_type} is critical.
89
- Details:
90
- 1. Address: 123 Main Street, Lahore
91
- 2. Phone: 923013225853
92
- 3. Confidence Score: {confidence_score*100:.1f}%
93
- 4. Image URL: {image_url or 'https://example.com/roboi.jpg'}
94
- 5. Incident Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
95
- """
96
-
97
- ticket_data = {
98
- "email": "safe.city@example.com",
99
- "subject": subject,
100
- "description": description,
101
- "priority": 4, # Urgent
102
- "status": 2 # Open
103
- }
104
-
105
- # Create ticket
106
- url = f"https://{FRESHDESK_DOMAIN}/api/v2/tickets"
107
- headers = {"Content-Type": "application/json"}
108
-
109
- response = requests.post(
110
- url,
111
- auth=(API_KEY, "X"),
112
- headers=headers,
113
- data=json.dumps(ticket_data)
114
- )
115
-
116
- if response.status_code == 201:
117
- ticket = response.json()
118
- ticket_id = ticket.get('id')
119
- print(f"✅ Ticket created successfully: Ticket ID {ticket_id}")
120
- print(json.dumps(ticket, indent=2))
121
-
122
- # Attach image to ticket if image_path exists
123
- if image_path and os.path.exists(image_path):
124
- attachment_url = f"https://{FRESHDESK_DOMAIN}/api/v2/tickets/{ticket_id}/attachments"
125
- try:
126
- with open(image_path, 'rb') as f:
127
- files = {'attachments[]': (os.path.basename(image_path), f, 'image/jpeg')}
128
- # Do not set Content-Type header; let requests handle it
129
- attachment_response = requests.post(
130
- attachment_url,
131
- auth=(API_KEY, "X"),
132
- files=files
133
- )
134
- if attachment_response.status_code == 201:
135
- print(f"✅ Image attached to ticket {ticket_id}")
136
- else:
137
- print(f"❌ Failed to attach image: {attachment_response.status_code} - {attachment_response.text}")
138
- except Exception as e:
139
- print(f"❌ Error accessing image file {image_path}: {str(e)}")
140
- else:
141
- print(f"❌ Image file not found: {image_path}")
142
-
143
- return f"Ticket created for {incident_type} with ID {ticket_id}"
144
- else:
145
- print(f"❌ Failed to create ticket: {response.status_code} - {response.text}")
146
- return f"Failed to create ticket for {incident_type}: {response.status_code} - {response.text}"
147
- # Image inference function
148
- def detect_image(image):
149
- try:
150
- pil_img = image
151
-
152
- # Fire detection
153
- fire_results = fire_model.predict(
154
- source=pil_img,
155
- conf=FIRE_CONF_THRESHOLD,
156
- iou=FIRE_IOU_THRESHOLD,
157
- show_labels=True,
158
- show_conf=True,
159
- imgsz=640,
160
- verbose=False
161
- )
162
-
163
- fire_detected = False
164
- fire_confidence = 0.0
165
- fire_annotated_img = fire_results[0].plot()
166
- fire_confidences = []
167
- fire_classes = []
168
- for r in fire_results:
169
- if r.boxes:
170
- for box in r.boxes:
171
- confidence = box.conf[0].item()
172
- class_id = int(box.cls[0].item())
173
- fire_confidences.append(confidence)
174
- fire_classes.append(class_id)
175
- if confidence >= FIRE_CONF_THRESHOLD:
176
- fire_detected = True
177
- fire_confidence = max(fire_confidence, confidence)
178
- print(f"Fire model raw confidences: {fire_confidences}, classes: {fire_classes}")
179
-
180
- # Accident detection
181
- accident_results = accident_model.predict(
182
- source=pil_img,
183
- conf=ACCIDENT_CONF_THRESHOLD,
184
- iou=ACCIDENT_IOU_THRESHOLD,
185
- show_labels=True,
186
- show_conf=True,
187
- imgsz=640,
188
- verbose=False
189
- )
190
-
191
- accident_detected = False
192
- accident_confidence = 0.0
193
- accident_annotated_img = accident_results[0].plot()
194
- accident_confidences = []
195
- accident_classes = []
196
- accident_boxes = accident_results[0].boxes
197
- if accident_boxes:
198
- for box in accident_boxes:
199
- confidence = box.conf[0].item()
200
- class_id = int(box.cls[0].item())
201
- accident_confidences.append(confidence)
202
- accident_classes.append(class_id)
203
- if confidence >= ACCIDENT_CONF_THRESHOLD:
204
- accident_detected = True
205
- accident_confidence = max(accident_confidence, confidence)
206
- print(f"Accident model raw confidences: {accident_confidences}, classes: {accident_classes}")
207
-
208
- # Combine annotated images
209
- fire_annotated_img = np.array(fire_annotated_img)
210
- accident_annotated_img = np.array(accident_annotated_img)
211
- combined_img = Image.fromarray(np.maximum(fire_annotated_img, accident_annotated_img))
212
-
213
- # Detection info
214
- detection_info = "Detection Results:\n"
215
- if fire_detected:
216
- detection_info += f"Fire detected with confidence: {fire_confidence*100:.1f}%\n"
217
- else:
218
- detection_info += f"No fire detected. Raw confidences: {fire_confidences}, Classes: {fire_classes}\n"
219
- if accident_detected:
220
- detection_info += f"Accident detected with confidence: {accident_confidence*100:.1f}%\n"
221
- else:
222
- detection_info += f"No accident detected. Raw confidences: {accident_confidences}, Classes: {accident_classes}\n"
223
-
224
- # Create a single Freshdesk ticket
225
- ticket_info = ""
226
- if fire_detected and not accident_detected:
227
- ticket_info = create_freshdesk_ticket("Fire Incident", fire_confidence, pil_img)
228
- elif accident_detected and not fire_detected:
229
- ticket_info = create_freshdesk_ticket("Accident Incident", accident_confidence, pil_img)
230
- elif fire_detected and accident_detected:
231
- ticket_info = create_freshdesk_ticket("Fire and Accident Incident", max(fire_confidence, accident_confidence), pil_img)
232
- else:
233
- ticket_info = "No ticket created: No incidents detected"
234
-
235
- return combined_img, detection_info, ticket_info
236
-
237
- except Exception as e:
238
- return image, f"Error during detection: {str(e)}\nRaw confidences: Fire {fire_confidences}, Accident {accident_confidences}", "No ticket created due to error"
239
-
240
- # Video processing function
241
- def detect_video(video_path):
242
- try:
243
- cap = cv2.VideoCapture(video_path)
244
-
245
- frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
246
- frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
247
- fps = int(cap.get(cv2.CAP_PROP_FPS))
248
-
249
- output_path = tempfile.mktemp(suffix='.mp4')
250
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
251
- out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
252
-
253
- frame_count = 0
254
- fire_detected_once = False
255
- accident_detected_once = False
256
- fire_detection_frames = []
257
- accident_detection_frames = []
258
- fire_confidences_all = []
259
- accident_confidences_all = []
260
- fire_classes_all = []
261
- accident_classes_all = []
262
-
263
- while cap.isOpened():
264
- ret, frame = cap.read()
265
- if not ret:
266
- break
267
-
268
- frame_count += 1
269
- pil_img = Image.fromarray(frame[..., ::-1]) # Convert BGR to RGB
270
-
271
- # Fire detection
272
- fire_results = fire_model.predict(
273
- source=pil_img,
274
- conf=FIRE_CONF_THRESHOLD,
275
- iou=FIRE_IOU_THRESHOLD,
276
- show_labels=True,
277
- show_conf=True,
278
- imgsz=640,
279
- verbose=False
280
- )
281
-
282
- fire_detected = False
283
- fire_confidence = 0.0
284
- fire_annotated_frame = fire_results[0].plot()
285
- fire_confidences = []
286
- fire_classes = []
287
- for r in fire_results:
288
- if r.boxes:
289
- for box in r.boxes:
290
- confidence = box.conf[0].item()
291
- class_id = int(box.cls[0].item())
292
- fire_confidences.append(confidence)
293
- fire_classes.append(class_id)
294
- if confidence >= FIRE_CONF_THRESHOLD:
295
- fire_detected = True
296
- fire_confidence = max(fire_confidence, confidence)
297
- fire_detection_frames.append(frame_count)
298
- fire_confidences_all.extend(fire_confidences)
299
- fire_classes_all.extend(fire_classes)
300
-
301
- # Accident detection
302
- accident_results = accident_model.predict(
303
- source=pil_img,
304
- conf=ACCIDENT_CONF_THRESHOLD,
305
- iou=ACCIDENT_IOU_THRESHOLD,
306
- show_labels=True,
307
- show_conf=True,
308
- imgsz=640,
309
- verbose=False
310
- )
311
-
312
- accident_detected = False
313
- accident_confidence = 0.0
314
- accident_annotated_frame = accident_results[0].plot()
315
- accident_confidences = []
316
- accident_classes = []
317
- accident_boxes = accident_results[0].boxes
318
- if accident_boxes:
319
- for box in accident_boxes:
320
- confidence = box.conf[0].item()
321
- class_id = int(box.cls[0].item())
322
- accident_confidences.append(confidence)
323
- accident_classes.append(class_id)
324
- if confidence >= ACCIDENT_CONF_THRESHOLD:
325
- accident_detected = True
326
- accident_confidence = max(accident_confidence, confidence)
327
- accident_detection_frames.append(frame_count)
328
- accident_confidences_all.extend(accident_confidences)
329
- accident_classes_all.extend(accident_classes)
330
-
331
- # Combine annotated frames
332
- fire_annotated_frame = np.array(fire_annotated_frame)
333
- accident_annotated_frame = np.array(accident_annotated_frame)
334
- combined_frame = np.maximum(fire_annotated_frame, accident_annotated_frame)
335
- out.write(combined_frame)
336
-
337
- # Create a single ticket for the first detection of each incident type
338
- if fire_detected and not fire_detected_once:
339
- create_freshdesk_ticket("Fire Incident", fire_confidence, pil_img)
340
- fire_detected_once = True
341
- if accident_detected and not accident_detected_once:
342
- create_freshdesk_ticket("Accident Incident", accident_confidence, pil_img)
343
- accident_detected_once = True
344
-
345
- cap.release()
346
- out.release()
347
-
348
- detection_info = f"Video processed successfully!\n"
349
- detection_info += f"Total frames: {frame_count}\n"
350
- detection_info += f"Frames with fire detections: {len(set(fire_detection_frames))}\n"
351
- detection_info += f"Frames with accident detections: {len(set(accident_detection_frames))}\n"
352
- if fire_detection_frames:
353
- detection_info += f"Fire detection frames: {sorted(set(fire_detection_frames))[:10]}...\n"
354
- else:
355
- detection_info += f"No fire detections. Raw confidences (sample): {fire_confidences_all[:10]}, Classes: {fire_classes_all[:10]}...\n"
356
- if accident_detection_frames:
357
- detection_info += f"Accident detection frames: {sorted(set(accident_detection_frames))[:10]}...\n"
358
- else:
359
- detection_info += f"No accident detections. Raw confidences (sample): {accident_confidences_all[:10]}, Classes: {accident_classes_all[:10]}...\n"
360
-
361
- ticket_info = f"Tickets created: {'Fire' if fire_detected_once else ''}{' and ' if fire_detected_once and accident_detected_once else ''}{'Accident' if accident_detected_once else ''}." if fire_detected_once or accident_detected_once else "No tickets created: No incidents detected"
362
-
363
- return output_path, detection_info, ticket_info
364
-
365
- except Exception as e:
366
- return None, f"Error processing video: {str(e)}\nRaw confidences: Fire {fire_confidences_all[:10]}, Accident {accident_confidences_all[:10]}", "No ticket created due to error"
367
-
368
- # Create Gradio interface for CCTV automation
369
- with gr.Blocks(title="Rapid Rescue - Automated CCTV Fire and Accident Detection") as iface:
370
- gr.Markdown("""
371
- # 🚨 Rapid Rescue - Automated CCTV Fire and Accident Detection System
372
-
373
- This AI system automatically detects fires and accidents in images and videos from CCTV feeds using two YOLO models:
374
- - YOLOv8n for fire detection (Confidence: 0.25, IoU: 0.45)
375
- - YOLOv8m for accident detection (Confidence: 0.3, IoU: 0.55)
376
-
377
- **Features:**
378
- - Fully automated detection with fixed thresholds
379
- - Creates Freshdesk tickets for detected incidents with saved image URLs
380
- - Supports both images and videos from CCTV feeds
381
- - Images saved in media/fire and media/accidents directories
382
- - Optimized for deployment on Hugging Face Spaces
383
-
384
- **Usage:**
385
- 1. Upload an image or video from a CCTV feed
386
- 2. Click process to run detection
387
- 3. View results with bounding boxes, confidence scores, class labels, and ticket creation status
388
- """)
389
-
390
- with gr.Tabs():
391
- with gr.Tab("Image Detection"):
392
- with gr.Row():
393
- with gr.Column():
394
- image_input = gr.Image(type="pil", label="Upload CCTV Image")
395
- image_button = gr.Button("Detect Fire and Accidents", variant="primary")
396
-
397
- with gr.Column():
398
- image_output = gr.Image(label="Detection Results")
399
- image_info = gr.Textbox(label="Detection Information", lines=8)
400
- ticket_info = gr.Textbox(label="Ticket Creation Status", lines=2)
401
-
402
- image_button.click(
403
- fn=detect_image,
404
- inputs=[image_input],
405
- outputs=[image_output, image_info, ticket_info]
406
- )
407
-
408
- with gr.Tab("Video Detection"):
409
- with gr.Row():
410
- with gr.Column():
411
- video_input = gr.Video(label="Upload Video")
412
- video_button = gr.Button("Process Video", variant="primary")
413
-
414
- with gr.Column():
415
- video_output = gr.Video(label="Processed Video")
416
- video_info = gr.Textbox(label="Processing Information", lines=8)
417
- ticket_info = gr.Textbox(label="Ticket Creation Status", lines=2)
418
-
419
- video_button.click(
420
- fn=detect_video,
421
- inputs=[video_input],
422
- outputs=[video_output, video_info, ticket_info]
423
- )
424
-
425
- gr.Markdown("""
426
- ### Notes
427
- - Freshdesk tickets are created automatically when fire or accident is detected (one per incident type).
428
- - Images are saved in media/fire or media/accidents directories with unique filenames.
429
- - Ticket includes URL to the saved image.
430
- - For videos, one ticket is created per incident type with the first detected frame saved.
431
- - Deploy on Hugging Face Spaces with `requirements.txt` and model files (`fire.pt`, `best.pt`).
432
- - Debug info includes raw confidence scores and class labels to verify detection performance.
433
- """)
434
-
435
- # Launch the app
436
- if __name__ == "__main__":
 
 
 
437
  iface.launch()
 
1
+ # -*- coding: utf-8 -*-
2
+ """app.py
3
+ Automated Fire and Accident Detection for CCTV with Freshdesk Ticket Creation
4
+ """
5
+ # Near the top of app.py, after imports
6
+ import ultralytics.nn.modules.block
7
+ from custom_blocks import SCDown
8
+ ultralytics.nn.modules.block.SCDown = SCDown
9
+ import cv2
10
+ import os
11
+ import PIL.Image as Image
12
+ import gradio as gr
13
+ import numpy as np
14
+ from ultralytics import YOLO
15
+ import requests
16
+ import json
17
+ from datetime import datetime
18
+ import tempfile
19
+ import torch
20
+ import uuid # Added for unique filenames
21
+
22
+ # Freshdesk Configuration
23
+ FRESHDESK_DOMAIN = "7kctech-supportdesk.freshdesk.com"
24
+ API_KEY = os.getenv("FRESHDESK_API_KEY", "JoJNI8nIY3hWQsk87e") # Fallback for local testing
25
+
26
+ # Base URL for Hugging Face Space
27
+ BASE_URL = "https://huggingface.co/spaces/Zynaly/Surveillance-Intelligent-Camera/tree/main"
28
+
29
+ # Directory for saving images
30
+ MEDIA_DIR = "media"
31
+ FIRE_DIR = os.path.join(MEDIA_DIR, "fire")
32
+ ACCIDENT_DIR = os.path.join(MEDIA_DIR, "accidents")
33
+
34
+ # Create directories if they don't exist
35
+ os.makedirs(FIRE_DIR, exist_ok=True)
36
+ os.makedirs(ACCIDENT_DIR, exist_ok=True)
37
+
38
+ # Fixed thresholds for automated detection
39
+ FIRE_CONF_THRESHOLD = 0.25
40
+ FIRE_IOU_THRESHOLD = 0.45
41
+ ACCIDENT_CONF_THRESHOLD = 0.3
42
+ ACCIDENT_IOU_THRESHOLD = 0.55
43
+
44
+ # Load models with explicit task definition
45
+ fire_model = YOLO("fire.pt", task="detect") # Fire detection model
46
+ accident_model = YOLO("best.pt", task="detect") # Accident detection model
47
+
48
+ # Function to save image and return its URL
49
+ def save_image(image, incident_type):
50
+ try:
51
+ # Generate unique filename
52
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
53
+ unique_id = str(uuid.uuid4())[:8]
54
+ filename = f"{incident_type}_{timestamp}_{unique_id}.jpg"
55
+ save_path = os.path.join(MEDIA_DIR, incident_type, filename)
56
+
57
+ # Save the image
58
+ image.save(save_path)
59
+
60
+ # Construct the URL
61
+ url = f"{BASE_URL}/{save_path}"
62
+ return url
63
+ except Exception as e:
64
+ print(f"Error saving image: {str(e)}")
65
+ return None
66
+
67
+ # Function to create Freshdesk ticket
68
+ # Function to create Freshdesk ticket with image attachment
69
+ # Function to create Freshdesk ticket with image attachment
70
+ def create_freshdesk_ticket(incident_type, confidence_score, img):
71
+ # Save the image to the appropriate directory and get its local path
72
+ image_url = None
73
+ image_path = None
74
+ if incident_type.lower() == "fire incident":
75
+ image_url = save_image(img, "fire")
76
+ image_path = os.path.join(MEDIA_DIR, "fire", os.path.basename(image_url.split("/")[-1]))
77
+ elif incident_type.lower() == "accident incident":
78
+ image_url = save_image(img, "accidents")
79
+ image_path = os.path.join(MEDIA_DIR, "accidents", os.path.basename(image_url.split("/")[-1]))
80
+ elif incident_type.lower() == "fire and accident incident":
81
+ fire_url = save_image(img, "fire")
82
+ accident_url = save_image(img, "accidents")
83
+ image_url = fire_url or accident_url
84
+ image_path = os.path.join(MEDIA_DIR, "fire" if fire_url else "accidents", os.path.basename(image_url.split("/")[-1]))
85
+
86
+ # Shortened subject
87
+ subject = f"{incident_type} Detected - Confidence: {confidence_score*100:.1f}%"
88
+
89
+ # Detailed description
90
+ description = f"""
91
+ {incident_type} is critical.
92
+ Details:
93
+ 1. Address: 123 Main Street, Lahore
94
+ 2. Phone: 923013225853
95
+ 3. Confidence Score: {confidence_score*100:.1f}%
96
+ 4. Image URL: {image_url or 'https://example.com/roboi.jpg'}
97
+ 5. Incident Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
98
+ """
99
+
100
+ ticket_data = {
101
+ "email": "safe.city@example.com",
102
+ "subject": subject,
103
+ "description": description,
104
+ "priority": 4, # Urgent
105
+ "status": 2 # Open
106
+ }
107
+
108
+ # Create ticket
109
+ url = f"https://{FRESHDESK_DOMAIN}/api/v2/tickets"
110
+ headers = {"Content-Type": "application/json"}
111
+
112
+ response = requests.post(
113
+ url,
114
+ auth=(API_KEY, "X"),
115
+ headers=headers,
116
+ data=json.dumps(ticket_data)
117
+ )
118
+
119
+ if response.status_code == 201:
120
+ ticket = response.json()
121
+ ticket_id = ticket.get('id')
122
+ print(f"✅ Ticket created successfully: Ticket ID {ticket_id}")
123
+ print(json.dumps(ticket, indent=2))
124
+
125
+ # Attach image to ticket if image_path exists
126
+ if image_path and os.path.exists(image_path):
127
+ attachment_url = f"https://{FRESHDESK_DOMAIN}/api/v2/tickets/{ticket_id}/attachments"
128
+ try:
129
+ with open(image_path, 'rb') as f:
130
+ files = {'attachments[]': (os.path.basename(image_path), f, 'image/jpeg')}
131
+ # Do not set Content-Type header; let requests handle it
132
+ attachment_response = requests.post(
133
+ attachment_url,
134
+ auth=(API_KEY, "X"),
135
+ files=files
136
+ )
137
+ if attachment_response.status_code == 201:
138
+ print(f"✅ Image attached to ticket {ticket_id}")
139
+ else:
140
+ print(f"❌ Failed to attach image: {attachment_response.status_code} - {attachment_response.text}")
141
+ except Exception as e:
142
+ print(f"❌ Error accessing image file {image_path}: {str(e)}")
143
+ else:
144
+ print(f"❌ Image file not found: {image_path}")
145
+
146
+ return f"Ticket created for {incident_type} with ID {ticket_id}"
147
+ else:
148
+ print(f"❌ Failed to create ticket: {response.status_code} - {response.text}")
149
+ return f"Failed to create ticket for {incident_type}: {response.status_code} - {response.text}"
150
+ # Image inference function
151
+ def detect_image(image):
152
+ try:
153
+ pil_img = image
154
+
155
+ # Fire detection
156
+ fire_results = fire_model.predict(
157
+ source=pil_img,
158
+ conf=FIRE_CONF_THRESHOLD,
159
+ iou=FIRE_IOU_THRESHOLD,
160
+ show_labels=True,
161
+ show_conf=True,
162
+ imgsz=640,
163
+ verbose=False
164
+ )
165
+
166
+ fire_detected = False
167
+ fire_confidence = 0.0
168
+ fire_annotated_img = fire_results[0].plot()
169
+ fire_confidences = []
170
+ fire_classes = []
171
+ for r in fire_results:
172
+ if r.boxes:
173
+ for box in r.boxes:
174
+ confidence = box.conf[0].item()
175
+ class_id = int(box.cls[0].item())
176
+ fire_confidences.append(confidence)
177
+ fire_classes.append(class_id)
178
+ if confidence >= FIRE_CONF_THRESHOLD:
179
+ fire_detected = True
180
+ fire_confidence = max(fire_confidence, confidence)
181
+ print(f"Fire model raw confidences: {fire_confidences}, classes: {fire_classes}")
182
+
183
+ # Accident detection
184
+ accident_results = accident_model.predict(
185
+ source=pil_img,
186
+ conf=ACCIDENT_CONF_THRESHOLD,
187
+ iou=ACCIDENT_IOU_THRESHOLD,
188
+ show_labels=True,
189
+ show_conf=True,
190
+ imgsz=640,
191
+ verbose=False
192
+ )
193
+
194
+ accident_detected = False
195
+ accident_confidence = 0.0
196
+ accident_annotated_img = accident_results[0].plot()
197
+ accident_confidences = []
198
+ accident_classes = []
199
+ accident_boxes = accident_results[0].boxes
200
+ if accident_boxes:
201
+ for box in accident_boxes:
202
+ confidence = box.conf[0].item()
203
+ class_id = int(box.cls[0].item())
204
+ accident_confidences.append(confidence)
205
+ accident_classes.append(class_id)
206
+ if confidence >= ACCIDENT_CONF_THRESHOLD:
207
+ accident_detected = True
208
+ accident_confidence = max(accident_confidence, confidence)
209
+ print(f"Accident model raw confidences: {accident_confidences}, classes: {accident_classes}")
210
+
211
+ # Combine annotated images
212
+ fire_annotated_img = np.array(fire_annotated_img)
213
+ accident_annotated_img = np.array(accident_annotated_img)
214
+ combined_img = Image.fromarray(np.maximum(fire_annotated_img, accident_annotated_img))
215
+
216
+ # Detection info
217
+ detection_info = "Detection Results:\n"
218
+ if fire_detected:
219
+ detection_info += f"Fire detected with confidence: {fire_confidence*100:.1f}%\n"
220
+ else:
221
+ detection_info += f"No fire detected. Raw confidences: {fire_confidences}, Classes: {fire_classes}\n"
222
+ if accident_detected:
223
+ detection_info += f"Accident detected with confidence: {accident_confidence*100:.1f}%\n"
224
+ else:
225
+ detection_info += f"No accident detected. Raw confidences: {accident_confidences}, Classes: {accident_classes}\n"
226
+
227
+ # Create a single Freshdesk ticket
228
+ ticket_info = ""
229
+ if fire_detected and not accident_detected:
230
+ ticket_info = create_freshdesk_ticket("Fire Incident", fire_confidence, pil_img)
231
+ elif accident_detected and not fire_detected:
232
+ ticket_info = create_freshdesk_ticket("Accident Incident", accident_confidence, pil_img)
233
+ elif fire_detected and accident_detected:
234
+ ticket_info = create_freshdesk_ticket("Fire and Accident Incident", max(fire_confidence, accident_confidence), pil_img)
235
+ else:
236
+ ticket_info = "No ticket created: No incidents detected"
237
+
238
+ return combined_img, detection_info, ticket_info
239
+
240
+ except Exception as e:
241
+ return image, f"Error during detection: {str(e)}\nRaw confidences: Fire {fire_confidences}, Accident {accident_confidences}", "No ticket created due to error"
242
+
243
+ # Video processing function
244
+ def detect_video(video_path):
245
+ try:
246
+ cap = cv2.VideoCapture(video_path)
247
+
248
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
249
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
250
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
251
+
252
+ output_path = tempfile.mktemp(suffix='.mp4')
253
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
254
+ out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height))
255
+
256
+ frame_count = 0
257
+ fire_detected_once = False
258
+ accident_detected_once = False
259
+ fire_detection_frames = []
260
+ accident_detection_frames = []
261
+ fire_confidences_all = []
262
+ accident_confidences_all = []
263
+ fire_classes_all = []
264
+ accident_classes_all = []
265
+
266
+ while cap.isOpened():
267
+ ret, frame = cap.read()
268
+ if not ret:
269
+ break
270
+
271
+ frame_count += 1
272
+ pil_img = Image.fromarray(frame[..., ::-1]) # Convert BGR to RGB
273
+
274
+ # Fire detection
275
+ fire_results = fire_model.predict(
276
+ source=pil_img,
277
+ conf=FIRE_CONF_THRESHOLD,
278
+ iou=FIRE_IOU_THRESHOLD,
279
+ show_labels=True,
280
+ show_conf=True,
281
+ imgsz=640,
282
+ verbose=False
283
+ )
284
+
285
+ fire_detected = False
286
+ fire_confidence = 0.0
287
+ fire_annotated_frame = fire_results[0].plot()
288
+ fire_confidences = []
289
+ fire_classes = []
290
+ for r in fire_results:
291
+ if r.boxes:
292
+ for box in r.boxes:
293
+ confidence = box.conf[0].item()
294
+ class_id = int(box.cls[0].item())
295
+ fire_confidences.append(confidence)
296
+ fire_classes.append(class_id)
297
+ if confidence >= FIRE_CONF_THRESHOLD:
298
+ fire_detected = True
299
+ fire_confidence = max(fire_confidence, confidence)
300
+ fire_detection_frames.append(frame_count)
301
+ fire_confidences_all.extend(fire_confidences)
302
+ fire_classes_all.extend(fire_classes)
303
+
304
+ # Accident detection
305
+ accident_results = accident_model.predict(
306
+ source=pil_img,
307
+ conf=ACCIDENT_CONF_THRESHOLD,
308
+ iou=ACCIDENT_IOU_THRESHOLD,
309
+ show_labels=True,
310
+ show_conf=True,
311
+ imgsz=640,
312
+ verbose=False
313
+ )
314
+
315
+ accident_detected = False
316
+ accident_confidence = 0.0
317
+ accident_annotated_frame = accident_results[0].plot()
318
+ accident_confidences = []
319
+ accident_classes = []
320
+ accident_boxes = accident_results[0].boxes
321
+ if accident_boxes:
322
+ for box in accident_boxes:
323
+ confidence = box.conf[0].item()
324
+ class_id = int(box.cls[0].item())
325
+ accident_confidences.append(confidence)
326
+ accident_classes.append(class_id)
327
+ if confidence >= ACCIDENT_CONF_THRESHOLD:
328
+ accident_detected = True
329
+ accident_confidence = max(accident_confidence, confidence)
330
+ accident_detection_frames.append(frame_count)
331
+ accident_confidences_all.extend(accident_confidences)
332
+ accident_classes_all.extend(accident_classes)
333
+
334
+ # Combine annotated frames
335
+ fire_annotated_frame = np.array(fire_annotated_frame)
336
+ accident_annotated_frame = np.array(accident_annotated_frame)
337
+ combined_frame = np.maximum(fire_annotated_frame, accident_annotated_frame)
338
+ out.write(combined_frame)
339
+
340
+ # Create a single ticket for the first detection of each incident type
341
+ if fire_detected and not fire_detected_once:
342
+ create_freshdesk_ticket("Fire Incident", fire_confidence, pil_img)
343
+ fire_detected_once = True
344
+ if accident_detected and not accident_detected_once:
345
+ create_freshdesk_ticket("Accident Incident", accident_confidence, pil_img)
346
+ accident_detected_once = True
347
+
348
+ cap.release()
349
+ out.release()
350
+
351
+ detection_info = f"Video processed successfully!\n"
352
+ detection_info += f"Total frames: {frame_count}\n"
353
+ detection_info += f"Frames with fire detections: {len(set(fire_detection_frames))}\n"
354
+ detection_info += f"Frames with accident detections: {len(set(accident_detection_frames))}\n"
355
+ if fire_detection_frames:
356
+ detection_info += f"Fire detection frames: {sorted(set(fire_detection_frames))[:10]}...\n"
357
+ else:
358
+ detection_info += f"No fire detections. Raw confidences (sample): {fire_confidences_all[:10]}, Classes: {fire_classes_all[:10]}...\n"
359
+ if accident_detection_frames:
360
+ detection_info += f"Accident detection frames: {sorted(set(accident_detection_frames))[:10]}...\n"
361
+ else:
362
+ detection_info += f"No accident detections. Raw confidences (sample): {accident_confidences_all[:10]}, Classes: {accident_classes_all[:10]}...\n"
363
+
364
+ ticket_info = f"Tickets created: {'Fire' if fire_detected_once else ''}{' and ' if fire_detected_once and accident_detected_once else ''}{'Accident' if accident_detected_once else ''}." if fire_detected_once or accident_detected_once else "No tickets created: No incidents detected"
365
+
366
+ return output_path, detection_info, ticket_info
367
+
368
+ except Exception as e:
369
+ return None, f"Error processing video: {str(e)}\nRaw confidences: Fire {fire_confidences_all[:10]}, Accident {accident_confidences_all[:10]}", "No ticket created due to error"
370
+
371
+ # Create Gradio interface for CCTV automation
372
+ with gr.Blocks(title="Rapid Rescue - Automated CCTV Fire and Accident Detection") as iface:
373
+ gr.Markdown("""
374
+ # 🚨 Rapid Rescue - Automated CCTV Fire and Accident Detection System
375
+
376
+ This AI system automatically detects fires and accidents in images and videos from CCTV feeds using two YOLO models:
377
+ - YOLOv8n for fire detection (Confidence: 0.25, IoU: 0.45)
378
+ - YOLOv8m for accident detection (Confidence: 0.3, IoU: 0.55)
379
+
380
+ **Features:**
381
+ - Fully automated detection with fixed thresholds
382
+ - Creates Freshdesk tickets for detected incidents with saved image URLs
383
+ - Supports both images and videos from CCTV feeds
384
+ - Images saved in media/fire and media/accidents directories
385
+ - Optimized for deployment on Hugging Face Spaces
386
+
387
+ **Usage:**
388
+ 1. Upload an image or video from a CCTV feed
389
+ 2. Click process to run detection
390
+ 3. View results with bounding boxes, confidence scores, class labels, and ticket creation status
391
+ """)
392
+
393
+ with gr.Tabs():
394
+ with gr.Tab("Image Detection"):
395
+ with gr.Row():
396
+ with gr.Column():
397
+ image_input = gr.Image(type="pil", label="Upload CCTV Image")
398
+ image_button = gr.Button("Detect Fire and Accidents", variant="primary")
399
+
400
+ with gr.Column():
401
+ image_output = gr.Image(label="Detection Results")
402
+ image_info = gr.Textbox(label="Detection Information", lines=8)
403
+ ticket_info = gr.Textbox(label="Ticket Creation Status", lines=2)
404
+
405
+ image_button.click(
406
+ fn=detect_image,
407
+ inputs=[image_input],
408
+ outputs=[image_output, image_info, ticket_info]
409
+ )
410
+
411
+ with gr.Tab("Video Detection"):
412
+ with gr.Row():
413
+ with gr.Column():
414
+ video_input = gr.Video(label="Upload Video")
415
+ video_button = gr.Button("Process Video", variant="primary")
416
+
417
+ with gr.Column():
418
+ video_output = gr.Video(label="Processed Video")
419
+ video_info = gr.Textbox(label="Processing Information", lines=8)
420
+ ticket_info = gr.Textbox(label="Ticket Creation Status", lines=2)
421
+
422
+ video_button.click(
423
+ fn=detect_video,
424
+ inputs=[video_input],
425
+ outputs=[video_output, video_info, ticket_info]
426
+ )
427
+
428
+ gr.Markdown("""
429
+ ### Notes
430
+ - Freshdesk tickets are created automatically when fire or accident is detected (one per incident type).
431
+ - Images are saved in media/fire or media/accidents directories with unique filenames.
432
+ - Ticket includes URL to the saved image.
433
+ - For videos, one ticket is created per incident type with the first detected frame saved.
434
+ - Deploy on Hugging Face Spaces with `requirements.txt` and model files (`fire.pt`, `best.pt`).
435
+ - Debug info includes raw confidence scores and class labels to verify detection performance.
436
+ """)
437
+
438
+ # Launch the app
439
+ if __name__ == "__main__":
440
  iface.launch()