Hussein El-Hadidy commited on
Commit
408d491
·
1 Parent(s): fe147ec

Trial for sockets

Browse files
Files changed (2) hide show
  1. app.py +47 -1
  2. requirements.txt +2 -1
app.py CHANGED
@@ -18,6 +18,10 @@ from ECG import classify_new_ecg
18
  from ultralytics import YOLO
19
  import tensorflow as tf
20
  from fastapi import HTTPException
 
 
 
 
21
 
22
 
23
  app = FastAPI()
@@ -245,4 +249,46 @@ async def process_image(file: UploadFile = File(...)):
245
  "message": "Image processed successfully",
246
  "KeypointsXY": keypoints.tolist(),
247
  "confidences": confidences.tolist()
248
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  from ultralytics import YOLO
19
  import tensorflow as tf
20
  from fastapi import HTTPException
21
+ from fastapi import WebSocket, WebSocketDisconnect
22
+ import base64
23
+ import cv2
24
+
25
 
26
 
27
  app = FastAPI()
 
249
  "message": "Image processed successfully",
250
  "KeypointsXY": keypoints.tolist(),
251
  "confidences": confidences.tolist()
252
+ })
253
+
254
+
255
+ # WebSocket endpoint to handle image processing
256
+ @app.websocket("/ws/image")
257
+ async def websocket_endpoint(websocket: WebSocket):
258
+ model = YOLO("yolo11n-pose_float16.tflite")
259
+ print("Model loaded successfully")
260
+ await websocket.accept()
261
+ try:
262
+ while True:
263
+ data = await websocket.receive_text() # Receive base64-encoded image data
264
+ image_data = base64.b64decode(data) # Decode the image data
265
+
266
+ # Convert image bytes to numpy array and decode with OpenCV
267
+ np_arr = np.frombuffer(image_data, np.uint8)
268
+ frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
269
+
270
+ # Run YOLO pose estimation
271
+ if model is not None:
272
+ try:
273
+ results = model.predict(frame, save=False, conf=0.3)
274
+
275
+ if results and results[0].keypoints:
276
+ keypoints = results[0].keypoints.xy.cpu().numpy().tolist() # Extract keypoints
277
+ confidences = results[0].boxes.conf.cpu().numpy().tolist() if results[0].boxes else []
278
+
279
+ # Send the results back to the client
280
+ response = {
281
+ "message": "Pose detected",
282
+ "KeypointsXY": keypoints[:5], # Limit to first 5 keypoints for brevity
283
+ "Confidences": confidences[:5], # Limit to first 5 confidences
284
+ }
285
+ await websocket.send_text(str(response))
286
+ else:
287
+ await websocket.send_text("❌ No keypoints detected.")
288
+ except Exception as e:
289
+ await websocket.send_text(f"⚠️ Error processing image: {str(e)}")
290
+ else:
291
+ await websocket.send_text("⚠️ Model not loaded.")
292
+
293
+ except WebSocketDisconnect:
294
+ print("🔌 Client disconnected")
requirements.txt CHANGED
@@ -100,4 +100,5 @@ uvicorn==0.34.2
100
  Werkzeug==3.1.3
101
  wfdb==4.3.0
102
  wrapt==1.17.2
103
- yarl==1.20.0
 
 
100
  Werkzeug==3.1.3
101
  wfdb==4.3.0
102
  wrapt==1.17.2
103
+ yarl==1.20.0
104
+ websockets