# main.py from fastapi import FastAPI, WebSocket, WebSocketDisconnect from inference import VideoProcessor import threading import queue import asyncio import json app = FastAPI() @app.websocket("/ws/inference") async def websocket_endpoint(websocket: WebSocket): await websocket.accept() processor = VideoProcessor() result_queue = queue.Queue() try: # 1. Nhận link RTSP từ Laptop data = await websocket.receive_text() config = json.loads(data) rtsp_url = config.get("url") print(f"Received request for: {rtsp_url}") # 2. Chạy Model trong luồng riêng (Background Thread) # Để không chặn WebSocket process_thread = threading.Thread( target=processor.start_processing, args=(rtsp_url, result_queue) ) process_thread.start() # 3. Vòng lặp gửi kết quả về Laptop while True: # Kiểm tra queue xem có kết quả mới từ Model không try: # Non-blocking get result = result_queue.get_nowait() await websocket.send_json(result) except queue.Empty: # Nếu không có kết quả, ngủ xíu để đỡ tốn CPU await asyncio.sleep(0.1) # Kiểm tra nếu client ngắt kết nối thì dừng model if not process_thread.is_alive(): await websocket.send_json({"status": "Processing finished"}) break except WebSocketDisconnect: print("Client disconnected") processor.running = False # Ra lệnh dừng model process_thread.join() except Exception as e: print(f"Error: {e}") processor.running = False