File size: 1,827 Bytes
164cfb7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
# main.py
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from inference import VideoProcessor
import threading
import queue
import asyncio
import json
app = FastAPI()
@app.websocket("/ws/inference")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
processor = VideoProcessor()
result_queue = queue.Queue()
try:
# 1. Nhận link RTSP từ Laptop
data = await websocket.receive_text()
config = json.loads(data)
rtsp_url = config.get("url")
print(f"Received request for: {rtsp_url}")
# 2. Chạy Model trong luồng riêng (Background Thread)
# Để không chặn WebSocket
process_thread = threading.Thread(
target=processor.start_processing,
args=(rtsp_url, result_queue)
)
process_thread.start()
# 3. Vòng lặp gửi kết quả về Laptop
while True:
# Kiểm tra queue xem có kết quả mới từ Model không
try:
# Non-blocking get
result = result_queue.get_nowait()
await websocket.send_json(result)
except queue.Empty:
# Nếu không có kết quả, ngủ xíu để đỡ tốn CPU
await asyncio.sleep(0.1)
# Kiểm tra nếu client ngắt kết nối thì dừng model
if not process_thread.is_alive():
await websocket.send_json({"status": "Processing finished"})
break
except WebSocketDisconnect:
print("Client disconnected")
processor.running = False # Ra lệnh dừng model
process_thread.join()
except Exception as e:
print(f"Error: {e}")
processor.running = False |