Abubakar740 commited on
Commit
021aa52
·
1 Parent(s): a5bb7e3

add update and delete stream endpoints

Browse files
Files changed (1) hide show
  1. main.py +44 -68
main.py CHANGED
@@ -37,57 +37,48 @@ description = """
37
 
38
  <br>
39
 
40
- ### 📽️ Section 1: Live Stream Monitoring
41
  *Use these endpoints to manage and watch live AI security feeds.*
42
 
43
  1. **`POST /stream/create` (Register Camera)**
44
- - **What it does:** Saves your camera's RTSP address and location into the system.
45
- - **How to use:** Enter a name (e.g., "Cashier-1"), the RTSP link from your camera, and the location.
46
 
47
  2. **`POST /stream/start/{id}` (Activate AI)**
48
- - **What it does:** Powers up the AI "Brain" for a specific camera.
49
- - **How to use:** Enter the ID (like `cam-1`). It returns a **view_url**. Paste this URL into a new browser tab to see the live AI.
50
 
51
- 3. **`GET /cameras/{id}/frame` (Main Video Feed)**
52
- - **What it does:** The actual live video stream with AI boxes and the security card.
53
- - **How to use:** This is the URL used by your browser or dashboard to display the video.
54
 
55
- 4. **`GET /stream/list_cameras` (Camera Directory)**
56
- - **What it does:** Displays a list of all added cameras.
57
- - **How to use:** Use this to see which cameras are currently "Online" or to find a Camera ID.
58
 
59
- 5. **`POST /stream/stop/{id}` (Deactivate AI)**
60
- - **What it does:** Shuts down the AI processing for a camera to save computer resources.
61
- - **How to use:** Enter the ID and click Execute. The camera will move to "Offline" status.
 
 
 
 
 
62
 
63
  <br>
64
 
65
- ### 📁 Section 2: Recorded Video Analysis
66
  *Use these endpoints to scan uploaded files for theft incidents.*
67
 
68
  1. **`POST /video/detect` (Upload for Analysis)**
69
- - **What it does:** Uploads a video file (MP4/MOV) to the server for a full AI scan.
70
- - **How to use:** Select your video file and click Execute. You will receive a **job_id**.
71
 
72
  2. **`GET /video/status/{job_id}` (Check Progress)**
73
- - **What it does:** Provides the scan percentage (0% to 100%) and current state.
74
- - **How to use:** Enter your **job_id**. Once it reaches 100%, your file is ready.
75
 
76
  3. **`GET /video/jobs` (Task History)**
77
- - **What it does:** Lists every video ever uploaded and whether the scan is finished or failed.
78
- - **How to use:** Helpful for finding old `job_id`s or checking multiple scans at once.
79
 
80
  4. **`GET /video/download/{job_id}` (Get Result)**
81
- - **What it does:** Allows you to download the final processed video.
82
- - **How to use:** Once status is 100%, enter the `job_id` here to save the analyzed video to your PC.
83
-
84
- <br>
85
-
86
- ### 🔔 Section 3: Discord Notifications
87
- *Automatic alerts for security teams.*
88
-
89
- - **How it works:** Whenever the AI detects a theft (Live or Recorded), it takes a snapshot of the person and sends a high-priority alert card to your Discord channel.
90
- - **Requirement:** Ensure your `DISCORD_WEBHOOK_URL` is set in the system settings.
91
 
92
  </details>
93
 
@@ -110,6 +101,7 @@ os.makedirs(OUTPUT_DIR, exist_ok=True)
110
 
111
  app = FastAPI(title="AI Theft Detection Backend",
112
  description=description,
 
113
  version="1.0.0")
114
 
115
  # --- DATABASE & STATE ---
@@ -201,51 +193,34 @@ def draw_corner_rect(img, pt1, pt2, color, thickness=2, r=15, d=25):
201
  cv2.ellipse(img, (x2-r, y2-r), (r,r), 0, 0, 90, color, thickness)
202
 
203
  def draw_security_card(frame, avg_prob, theft_flag, title="AI ANALYZER"):
204
- # 1. Card Layout Configuration
205
  card_x, card_y = 35, 35
206
  padding = 30
207
  line_spacing = 45
208
- card_w, card_h = 620, 260 # Increased size for better spacing
209
-
210
  orange = (0, 165, 255)
211
- bg_color = (25, 25, 25) # Slightly darker for better contrast
212
-
213
- # 2. Draw Background with Transparency
214
  overlay = frame.copy()
215
  cv2.rectangle(overlay, (card_x, card_y), (card_x + card_w, card_y + card_h), bg_color, -1)
216
  cv2.addWeighted(overlay, 0.85, frame, 0.15, 0, frame)
217
-
218
- # 3. Draw Bold Orange Border
219
  cv2.rectangle(frame, (card_x, card_y), (card_x + card_w, card_y + card_h), orange, 3)
220
-
221
- # 4. Prepare Content
222
  status_label = "ALERT: THEFT DETECTED" if theft_flag else "STATUS: SYSTEM NORMAL"
223
  status_color = (0, 0, 255) if theft_flag else (0, 255, 0)
224
  now = datetime.datetime.now().strftime("%b %d, %Y | %I:%M:%S %p")
225
  confidence = f"AI CONFIDENCE: {int(avg_prob * 100)}%"
226
-
227
- # 5. Render Text with Padding and Anti-Aliasing (cv2.LINE_AA)
228
  curr_y = card_y + padding + 15
229
-
230
- # Header - Centered Bold
231
  header_text = "THEFT DETECTION LIVE MONITORING"
232
  (tw, th), _ = cv2.getTextSize(header_text, cv2.FONT_HERSHEY_DUPLEX, 0.9, 2)
233
  cv2.putText(frame, header_text, (card_x + (card_w - tw) // 2, curr_y),
234
  cv2.FONT_HERSHEY_DUPLEX, 0.9, (255, 255, 255), 2, cv2.LINE_AA)
235
-
236
- # Rows with Spacing
237
  curr_y += line_spacing + 5
238
  cv2.putText(frame, f"SOURCE: {title.upper()}", (card_x + padding, curr_y),
239
  cv2.FONT_HERSHEY_DUPLEX, 0.8, (200, 200, 200), 1, cv2.LINE_AA)
240
-
241
  curr_y += line_spacing
242
  cv2.putText(frame, status_label, (card_x + padding, curr_y),
243
  cv2.FONT_HERSHEY_DUPLEX, 1.0, status_color, 2, cv2.LINE_AA)
244
-
245
  curr_y += line_spacing
246
  cv2.putText(frame, confidence, (card_x + padding, curr_y),
247
  cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
248
-
249
  curr_y += line_spacing
250
  cv2.putText(frame, now, (card_x + padding, curr_y),
251
  cv2.FONT_HERSHEY_SIMPLEX, 0.6, (150, 150, 150), 1, cv2.LINE_AA)
@@ -368,7 +343,7 @@ class CameraPipeline:
368
  with torch.no_grad():
369
  probs = torch.softmax(slowfast_model([clip_ts[:,:,::4,:,:], clip_ts]), dim=1)
370
  self.prediction_buffer.append(probs[0][1].item())
371
- avg_prob = np.mean(self.prediction_buffer)
372
  is_theft = avg_prob > THEFT_THRESHOLD
373
  draw_corner_rect(frame, (x1,y1), (x2,y2), (0,0,255) if is_theft else (0,255,0))
374
  if is_theft:
@@ -429,18 +404,32 @@ async def start_camera(id: str, request: Request):
429
 
430
  # Generate URLs for the user
431
  local_url = f"{request.base_url}cameras/{id}/frame"
432
- hf_host = request.headers.get("x-forwarded-host")
433
- hf_url = f"https://{hf_host}/cameras/{id}/frame" if hf_host else "Not running on Hugging Face"
434
 
435
  return {
436
  "success": started,
437
  "job_id": id,
438
  "view_urls": {
439
- "huggingface_url": hf_url,
440
- "local_machine_url": str(local_url)
441
  }
442
  }
443
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444
  @app.post("/stream/stop/{id}", tags=["Live Stream Monitoring"])
445
  def stop_camera(id: str):
446
  if id not in MOCK_DB["cameras"]:
@@ -451,19 +440,6 @@ def stop_camera(id: str):
451
  MOCK_DB["cameras"][id]["isStreaming"] = False
452
  return {"success": stopped}
453
 
454
- # @app.get("/stream/frame/{id}", tags=["Live Stream Monitoring"])
455
- # def get_frame(id: str):
456
- # if id not in manager.active_pipelines:
457
- # raise HTTPException(404)
458
- # def generate():
459
- # while id in manager.active_pipelines:
460
- # frame = manager.active_pipelines[id].latest_frame
461
- # if frame is not None:
462
- # _, buffer = cv2.imencode('.jpg', frame)
463
- # yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + buffer.tobytes() + b'\r\n')
464
- # time.sleep(0.05)
465
- # return StreamingResponse(generate(), media_type="multipart/x-mixed-replace; boundary=frame")
466
-
467
  @app.get("/cameras/{id}/frame", tags=["Live Stream Monitoring"])
468
  def get_frame_legacy(id: str):
469
  if id not in manager.active_pipelines:
 
37
 
38
  <br>
39
 
40
+ ### Section 1: Live Stream Monitoring
41
  *Use these endpoints to manage and watch live AI security feeds.*
42
 
43
  1. **`POST /stream/create` (Register Camera)**
44
+ - **What it does:** Saves your camera's RTSP address into the system.
45
+ - **How to use:** Enter a name, the RTSP link, and the location.
46
 
47
  2. **`POST /stream/start/{id}` (Activate AI)**
48
+ - **What it does:** Starts the AI processing.
49
+ - **How to use:** Returns stream URL to view the stream.
50
 
51
+ 3. **`PUT /stream/update/{id}` (Change Settings)**
52
+ - **What it does:** Updates the name or RTSP link of an existing camera.
 
53
 
54
+ 4. **`DELETE /stream/delete/{id}` (Remove Camera)**
55
+ - **What it does:** Stops the stream and removes the camera from the list.
 
56
 
57
+ 5. **`GET /cameras/{id}/frame` (Main Video Feed)**
58
+ - **What it does:** The actual live video stream.
59
+
60
+ 6. **`GET /stream/list_cameras` (Camera Directory)**
61
+ - **What it does:** Displays all added cameras and their status.
62
+
63
+ 7. **`POST /stream/stop/{id}` (Deactivate AI)**
64
+ - **What it does:** Shuts down the AI for a camera.
65
 
66
  <br>
67
 
68
+ ### Section 2: Recorded Video Analysis
69
  *Use these endpoints to scan uploaded files for theft incidents.*
70
 
71
  1. **`POST /video/detect` (Upload for Analysis)**
72
+ - **What it does:** Uploads a video file for a full AI scan. Returns a **job_id**.
 
73
 
74
  2. **`GET /video/status/{job_id}` (Check Progress)**
75
+ - **What it does:** Provides the scan percentage (0% to 100%).
 
76
 
77
  3. **`GET /video/jobs` (Task History)**
78
+ - **What it does:** Lists every video ever uploaded.
 
79
 
80
  4. **`GET /video/download/{job_id}` (Get Result)**
81
+ - **What it does:** Download the final processed video once status is 100%.
 
 
 
 
 
 
 
 
 
82
 
83
  </details>
84
 
 
101
 
102
  app = FastAPI(title="AI Theft Detection Backend",
103
  description=description,
104
+ openapi_tags=tags_metadata,
105
  version="1.0.0")
106
 
107
  # --- DATABASE & STATE ---
 
193
  cv2.ellipse(img, (x2-r, y2-r), (r,r), 0, 0, 90, color, thickness)
194
 
195
  def draw_security_card(frame, avg_prob, theft_flag, title="AI ANALYZER"):
 
196
  card_x, card_y = 35, 35
197
  padding = 30
198
  line_spacing = 45
199
+ card_w, card_h = 620, 260
 
200
  orange = (0, 165, 255)
201
+ bg_color = (25, 25, 25)
 
 
202
  overlay = frame.copy()
203
  cv2.rectangle(overlay, (card_x, card_y), (card_x + card_w, card_y + card_h), bg_color, -1)
204
  cv2.addWeighted(overlay, 0.85, frame, 0.15, 0, frame)
 
 
205
  cv2.rectangle(frame, (card_x, card_y), (card_x + card_w, card_y + card_h), orange, 3)
 
 
206
  status_label = "ALERT: THEFT DETECTED" if theft_flag else "STATUS: SYSTEM NORMAL"
207
  status_color = (0, 0, 255) if theft_flag else (0, 255, 0)
208
  now = datetime.datetime.now().strftime("%b %d, %Y | %I:%M:%S %p")
209
  confidence = f"AI CONFIDENCE: {int(avg_prob * 100)}%"
 
 
210
  curr_y = card_y + padding + 15
 
 
211
  header_text = "THEFT DETECTION LIVE MONITORING"
212
  (tw, th), _ = cv2.getTextSize(header_text, cv2.FONT_HERSHEY_DUPLEX, 0.9, 2)
213
  cv2.putText(frame, header_text, (card_x + (card_w - tw) // 2, curr_y),
214
  cv2.FONT_HERSHEY_DUPLEX, 0.9, (255, 255, 255), 2, cv2.LINE_AA)
 
 
215
  curr_y += line_spacing + 5
216
  cv2.putText(frame, f"SOURCE: {title.upper()}", (card_x + padding, curr_y),
217
  cv2.FONT_HERSHEY_DUPLEX, 0.8, (200, 200, 200), 1, cv2.LINE_AA)
 
218
  curr_y += line_spacing
219
  cv2.putText(frame, status_label, (card_x + padding, curr_y),
220
  cv2.FONT_HERSHEY_DUPLEX, 1.0, status_color, 2, cv2.LINE_AA)
 
221
  curr_y += line_spacing
222
  cv2.putText(frame, confidence, (card_x + padding, curr_y),
223
  cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
 
224
  curr_y += line_spacing
225
  cv2.putText(frame, now, (card_x + padding, curr_y),
226
  cv2.FONT_HERSHEY_SIMPLEX, 0.6, (150, 150, 150), 1, cv2.LINE_AA)
 
343
  with torch.no_grad():
344
  probs = torch.softmax(slowfast_model([clip_ts[:,:,::4,:,:], clip_ts]), dim=1)
345
  self.prediction_buffer.append(probs[0][1].item())
346
+ avg_prob = np.mean(prediction_buffer)
347
  is_theft = avg_prob > THEFT_THRESHOLD
348
  draw_corner_rect(frame, (x1,y1), (x2,y2), (0,0,255) if is_theft else (0,255,0))
349
  if is_theft:
 
404
 
405
  # Generate URLs for the user
406
  local_url = f"{request.base_url}cameras/{id}/frame"
 
 
407
 
408
  return {
409
  "success": started,
410
  "job_id": id,
411
  "view_urls": {
412
+ "stream_url": str(local_url)
 
413
  }
414
  }
415
 
416
+ @app.put("/stream/update/{id}", tags=["Live Stream Monitoring"])
417
+ def update_camera(id: str, cam_data: CameraUpdate):
418
+ if id not in MOCK_DB["cameras"]:
419
+ raise HTTPException(status_code=404, detail="Camera not found")
420
+ current_cam = MOCK_DB["cameras"][id]
421
+ update_dict = cam_data.dict(exclude_unset=True)
422
+ current_cam.update(update_dict)
423
+ return {"message": "Camera updated successfully", "camera": current_cam}
424
+
425
+ @app.delete("/stream/delete/{id}", tags=["Live Stream Monitoring"])
426
+ def delete_camera(id: str):
427
+ if id not in MOCK_DB["cameras"]:
428
+ raise HTTPException(status_code=404, detail="Camera not found")
429
+ manager.stop_camera(id)
430
+ del MOCK_DB["cameras"][id]
431
+ return {"message": f"Camera {id} deleted successfully"}
432
+
433
  @app.post("/stream/stop/{id}", tags=["Live Stream Monitoring"])
434
  def stop_camera(id: str):
435
  if id not in MOCK_DB["cameras"]:
 
440
  MOCK_DB["cameras"][id]["isStreaming"] = False
441
  return {"success": stopped}
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
  @app.get("/cameras/{id}/frame", tags=["Live Stream Monitoring"])
444
  def get_frame_legacy(id: str):
445
  if id not in manager.active_pipelines: