Spaces:
Sleeping
Sleeping
| # app.py | |
| import os | |
| os.environ['QT_QPA_PLATFORM'] = 'offscreen' | |
| import uvicorn | |
| from fastapi import FastAPI, UploadFile, File, Form, HTTPException | |
| from fastapi.responses import JSONResponse, HTMLResponse | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| import io | |
| import base64 | |
| from contextlib import asynccontextmanager | |
| # Global model variable | |
| model = None | |
| # 8 emotions mapping | |
| EMOTIONS = { | |
| 0: "angry", | |
| 1: "contempt", | |
| 2: "disgust", | |
| 3: "fear", | |
| 4: "happy", | |
| 5: "neutral", | |
| 6: "sad", | |
| 7: "surprise" | |
| } | |
| async def lifespan(app: FastAPI): | |
| global model | |
| print("🚀 بدء تحميل موديل التعرف على المشاعر...") | |
| try: | |
| from ultralytics import YOLO | |
| model = YOLO('best.pt') | |
| print("✅ تم تحميل الموديل بنجاح!") | |
| print(f"📊 الموديل يدعم {len(EMOTIONS)} مشاعر: {list(EMOTIONS.values())}") | |
| except Exception as e: | |
| print(f"❌ خطأ في تحميل الموديل: {e}") | |
| model = None | |
| yield | |
| print("🔄 إيقاف التطبيق...") | |
| app = FastAPI( | |
| title="Face Emotion Recognition API", | |
| description="API للتعرف على 8 مشاعر من تعبيرات الوجه", | |
| version="1.0.0", | |
| lifespan=lifespan | |
| ) | |
| async def root(): | |
| html_content = """ | |
| <!DOCTYPE html> | |
| <html> | |
| <head> | |
| <title>Face Emotion Recognition</title> | |
| <style> | |
| body { font-family: Arial, sans-serif; margin: 40px; } | |
| .container { max-width: 600px; margin: 0 auto; } | |
| .upload-form { border: 2px dashed #ccc; padding: 20px; text-align: center; } | |
| .emotions { display: grid; grid-template-columns: repeat(4, 1fr); gap: 10px; margin: 20px 0; } | |
| .emotion { padding: 8px; background: #f0f0f0; border-radius: 5px; text-align: center; } | |
| </style> | |
| </head> | |
| <body> | |
| <div class="container"> | |
| <h1>🎭 التعرف على مشاعر الوجه</h1> | |
| <p>ارفع صورة للتعرف على المشاعر الموجودة فيها</p> | |
| <div class="emotions"> | |
| <div class="emotion">😠 Angry</div> | |
| <div class="emotion">😤 Contempt</div> | |
| <div class="emotion">🤢 Disgust</div> | |
| <div class="emotion">😨 Fear</div> | |
| <div class="emotion">😊 Happy</div> | |
| <div class="emotion">😐 Neutral</div> | |
| <div class="emotion">😢 Sad</div> | |
| <div class="emotion">😲 Surprise</div> | |
| </div> | |
| <form class="upload-form" action="/predict" method="post" enctype="multipart/form-data"> | |
| <input type="file" name="file" accept="image/*" required> | |
| <br><br> | |
| <button type="submit">🔍 تحليل المشاعر</button> | |
| </form> | |
| <div style="margin-top: 20px;"> | |
| <h3>API Endpoints:</h3> | |
| <ul> | |
| <li><strong>POST /predict</strong> - رفع صورة للتنبؤ</li> | |
| <li><strong>GET /emotions</strong> - قائمة المشاعر المدعومة</li> | |
| <li><strong>GET /health</strong> - فحص حالة الخدمة</li> | |
| </ul> | |
| </div> | |
| </div> | |
| </body> | |
| </html> | |
| """ | |
| return HTMLResponse(content=html_content) | |
| async def health_check(): | |
| return { | |
| "status": "healthy", | |
| "model_loaded": model is not None, | |
| "supported_emotions": len(EMOTIONS) | |
| } | |
| async def get_emotions(): | |
| return { | |
| "emotions": EMOTIONS, | |
| "count": len(EMOTIONS), | |
| "description": "8 مشاعر أساسية يمكن التعرف عليها" | |
| } | |
| async def predict_emotion(file: UploadFile = File(...)): | |
| if model is None: | |
| raise HTTPException(status_code=503, detail="Model not loaded") | |
| try: | |
| contents = await file.read() | |
| image = Image.open(io.BytesIO(contents)) | |
| image_np = np.array(image) | |
| if len(image_np.shape) == 3 and image_np.shape[2] == 3: | |
| image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) | |
| results = model.predict(image_np, verbose=False, conf=0.25) | |
| predictions = [] | |
| for result in results: | |
| if result.boxes is not None and len(result.boxes) > 0: | |
| for box in result.boxes: | |
| conf = float(box.conf[0]) | |
| cls = int(box.cls[0]) | |
| bbox = box.xyxy[0].tolist() | |
| emotion_name = EMOTIONS.get(cls, f"unknown_{cls}") | |
| # احتمالات باقي المشاعر (لو متوفرة) | |
| probs_dict = {} | |
| if hasattr(box, 'probs') and box.probs is not None: | |
| probs = box.probs[0].tolist() | |
| for i, p in enumerate(probs): | |
| probs_dict[EMOTIONS.get(i, f"unknown_{i}")] = round(p, 4) | |
| else: | |
| probs_dict = {emotion_name: round(conf, 4)} | |
| predictions.append({ | |
| "emotion": emotion_name, | |
| "confidence": round(conf, 4), | |
| "bbox": { | |
| "x1": int(bbox[0]), | |
| "y1": int(bbox[1]), | |
| "x2": int(bbox[2]), | |
| "y2": int(bbox[3]) | |
| }, | |
| "class_id": cls, | |
| "probabilities": probs_dict | |
| }) | |
| predictions.sort(key=lambda x: x["confidence"], reverse=True) | |
| return JSONResponse(content={ | |
| "success": True, | |
| "image_name": file.filename, | |
| "predictions": predictions, | |
| "faces_detected": len(predictions), | |
| "model_info": { | |
| "model_file": "best.pt", | |
| "supported_emotions": list(EMOTIONS.values()) | |
| } | |
| }, status_code=200) | |
| except Exception as e: | |
| return JSONResponse(content={ | |
| "success": False, | |
| "error": str(e), | |
| "message": "فشل في تحليل الصورة" | |
| }, status_code=500) | |
| async def predict_emotion_base64(image_data: str = Form(...)): | |
| if model is None: | |
| raise HTTPException(status_code=503, detail="Model not loaded") | |
| try: | |
| image_bytes = base64.b64decode(image_data) | |
| image = Image.open(io.BytesIO(image_bytes)) | |
| image_np = np.array(image) | |
| if len(image_np.shape) == 3 and image_np.shape[2] == 3: | |
| image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR) | |
| results = model.predict(image_np, verbose=False, conf=0.25) | |
| predictions = [] | |
| for result in results: | |
| if result.boxes is not None and len(result.boxes) > 0: | |
| for box in result.boxes: | |
| conf = float(box.conf[0]) | |
| cls = int(box.cls[0]) | |
| bbox = box.xyxy[0].tolist() | |
| probs_dict = {} | |
| if hasattr(box, 'probs') and box.probs is not None: | |
| probs = box.probs[0].tolist() | |
| for i, p in enumerate(probs): | |
| probs_dict[EMOTIONS.get(i, f"unknown_{i}")] = round(p, 4) | |
| else: | |
| emotion_name = EMOTIONS.get(cls, f"unknown_{cls}") | |
| probs_dict = {emotion_name: round(conf, 4)} | |
| predictions.append({ | |
| "emotion": EMOTIONS.get(cls, f"unknown_{cls}"), | |
| "confidence": round(conf, 4), | |
| "bbox": { | |
| "x1": int(bbox[0]), | |
| "y1": int(bbox[1]), | |
| "x2": int(bbox[2]), | |
| "y2": int(bbox[3]) | |
| }, | |
| "probabilities": probs_dict | |
| }) | |
| predictions.sort(key=lambda x: x["confidence"], reverse=True) | |
| return { | |
| "success": True, | |
| "predictions": predictions, | |
| "faces_detected": len(predictions) | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Base64 prediction error: {str(e)}") | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) | |