Spaces:
Paused
Paused
Upload 3 files
Browse files- README.md +3 -2
- app.py +124 -61
- requirements.txt +2 -3
README.md
CHANGED
|
@@ -3,8 +3,9 @@ title: Emtithal PPE Detection Demo
|
|
| 3 |
emoji: 🛡️
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: blue
|
| 6 |
-
sdk:
|
| 7 |
-
|
|
|
|
| 8 |
pinned: false
|
| 9 |
---
|
| 10 |
|
|
|
|
| 3 |
emoji: 🛡️
|
| 4 |
colorFrom: green
|
| 5 |
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 4.44.0
|
| 8 |
+
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
app.py
CHANGED
|
@@ -1,76 +1,139 @@
|
|
| 1 |
-
|
| 2 |
-
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
from ultralytics import YOLO
|
| 4 |
import cv2
|
| 5 |
import numpy as np
|
| 6 |
-
|
| 7 |
-
import io
|
| 8 |
|
| 9 |
# Load YOLO model
|
| 10 |
model = YOLO("best.pt")
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
# Enable CORS for your website
|
| 15 |
-
app.add_middleware(
|
| 16 |
-
CORSMiddleware,
|
| 17 |
-
allow_origins=["*"], # In production, specify your domain
|
| 18 |
-
allow_credentials=True,
|
| 19 |
-
allow_methods=["*"],
|
| 20 |
-
allow_headers=["*"],
|
| 21 |
-
)
|
| 22 |
-
|
| 23 |
-
@app.get("/")
|
| 24 |
-
def root():
|
| 25 |
-
return {
|
| 26 |
-
"message": "Emtithal PPE Detection API",
|
| 27 |
-
"status": "running",
|
| 28 |
-
"endpoints": {
|
| 29 |
-
"/detect": "POST - Upload image for PPE detection"
|
| 30 |
-
}
|
| 31 |
-
}
|
| 32 |
-
|
| 33 |
-
@app.post("/detect")
|
| 34 |
-
async def detect_ppe(file: UploadFile = File(...)):
|
| 35 |
"""
|
| 36 |
-
Detect PPE violations in
|
| 37 |
-
Returns detection results as JSON
|
| 38 |
"""
|
| 39 |
-
# Read image
|
| 40 |
-
contents = await file.read()
|
| 41 |
-
nparr = np.frombuffer(contents, np.uint8)
|
| 42 |
-
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
| 43 |
-
|
| 44 |
if image is None:
|
| 45 |
-
return
|
| 46 |
|
| 47 |
# Run YOLO inference
|
| 48 |
results = model(image, conf=0.4)
|
| 49 |
|
| 50 |
-
#
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
detection = {
|
| 56 |
-
"class_id": int(box.cls[0]),
|
| 57 |
-
"class_name": result.names[int(box.cls[0])],
|
| 58 |
-
"confidence": float(box.conf[0]),
|
| 59 |
-
"bbox": {
|
| 60 |
-
"x1": float(box.xyxy[0][0]),
|
| 61 |
-
"y1": float(box.xyxy[0][1]),
|
| 62 |
-
"x2": float(box.xyxy[0][2]),
|
| 63 |
-
"y2": float(box.xyxy[0][3])
|
| 64 |
-
}
|
| 65 |
-
}
|
| 66 |
-
detections.append(detection)
|
| 67 |
|
| 68 |
-
return
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
|
|
|
| 2 |
from ultralytics import YOLO
|
| 3 |
import cv2
|
| 4 |
import numpy as np
|
| 5 |
+
import spaces
|
|
|
|
| 6 |
|
| 7 |
# Load YOLO model
|
| 8 |
model = YOLO("best.pt")
|
| 9 |
|
| 10 |
+
@spaces.GPU
|
| 11 |
+
def detect_ppe(image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
"""
|
| 13 |
+
Detect PPE violations in the image
|
|
|
|
| 14 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
if image is None:
|
| 16 |
+
return None
|
| 17 |
|
| 18 |
# Run YOLO inference
|
| 19 |
results = model(image, conf=0.4)
|
| 20 |
|
| 21 |
+
# Get the annotated image
|
| 22 |
+
annotated_image = results[0].plot()
|
| 23 |
+
|
| 24 |
+
# Convert BGR to RGB for Gradio
|
| 25 |
+
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
return annotated_image
|
| 28 |
+
|
| 29 |
+
# Custom CSS for professional look
|
| 30 |
+
custom_css = """
|
| 31 |
+
@import url('https://fonts.googleapis.com/css2?family=Cairo:wght@400;500;600;700&display=swap');
|
| 32 |
+
|
| 33 |
+
* {
|
| 34 |
+
font-family: 'Cairo', sans-serif !important;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
.gradio-container {
|
| 38 |
+
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%) !important;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
#component-0 {
|
| 42 |
+
background: white !important;
|
| 43 |
+
border-radius: 16px !important;
|
| 44 |
+
padding: 2rem !important;
|
| 45 |
+
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.1) !important;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
.gr-button-primary {
|
| 49 |
+
background: linear-gradient(135deg, #267649, #339966) !important;
|
| 50 |
+
border: none !important;
|
| 51 |
+
font-size: 18px !important;
|
| 52 |
+
font-weight: 600 !important;
|
| 53 |
+
padding: 12px 32px !important;
|
| 54 |
+
border-radius: 8px !important;
|
| 55 |
+
transition: all 0.3s !important;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
.gr-button-primary:hover {
|
| 59 |
+
transform: translateY(-2px) !important;
|
| 60 |
+
box-shadow: 0 4px 12px rgba(38, 118, 73, 0.3) !important;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
h1 {
|
| 64 |
+
color: #267649 !important;
|
| 65 |
+
font-size: 32px !important;
|
| 66 |
+
font-weight: 700 !important;
|
| 67 |
+
text-align: center !important;
|
| 68 |
+
margin-bottom: 0.5rem !important;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
.gr-prose p {
|
| 72 |
+
color: #555 !important;
|
| 73 |
+
font-size: 16px !important;
|
| 74 |
+
text-align: center !important;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
.gr-box {
|
| 78 |
+
border-radius: 12px !important;
|
| 79 |
+
border: 2px solid #e0e0e0 !important;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
.gr-input-label {
|
| 83 |
+
color: #267649 !important;
|
| 84 |
+
font-weight: 600 !important;
|
| 85 |
+
font-size: 16px !important;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
footer {
|
| 89 |
+
display: none !important;
|
| 90 |
+
}
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
# Create Gradio interface
|
| 94 |
+
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="green")) as demo:
|
| 95 |
+
gr.HTML("""
|
| 96 |
+
<div style="text-align: center; padding: 20px 0;">
|
| 97 |
+
<h1>🛡️ نظام امتثال لكشف معدات السلامة</h1>
|
| 98 |
+
<p style="font-size: 18px; color: #666; margin-top: 10px;">
|
| 99 |
+
اختبر نظام الذكاء الاصطناعي لكشف عدم ارتداء القفازات، الكمامات، والقبعات
|
| 100 |
+
</p>
|
| 101 |
+
</div>
|
| 102 |
+
""")
|
| 103 |
+
|
| 104 |
+
with gr.Row():
|
| 105 |
+
with gr.Column():
|
| 106 |
+
input_image = gr.Image(
|
| 107 |
+
sources=["webcam", "upload"],
|
| 108 |
+
type="numpy",
|
| 109 |
+
label="📷 الكاميرا أو رفع صورة",
|
| 110 |
+
height=400
|
| 111 |
+
)
|
| 112 |
+
submit_btn = gr.Button("🔍 ابدأ الكشف", variant="primary", size="lg")
|
| 113 |
+
|
| 114 |
+
with gr.Column():
|
| 115 |
+
output_image = gr.Image(
|
| 116 |
+
label="✅ نتائج الكشف",
|
| 117 |
+
type="numpy",
|
| 118 |
+
height=400
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
gr.HTML("""
|
| 122 |
+
<div style="text-align: center; padding: 20px; background: #f5f5f5; border-radius: 12px; margin-top: 20px;">
|
| 123 |
+
<p style="margin: 0; color: #666; font-size: 14px;">
|
| 124 |
+
✅ اسمح بالوصول للكاميرا عند الطلب •
|
| 125 |
+
👤 ضع نفسك أمام الكاميرا •
|
| 126 |
+
🎯 سيقوم النظام بكشف المخالفات تلقائياً
|
| 127 |
+
</p>
|
| 128 |
+
</div>
|
| 129 |
+
""")
|
| 130 |
+
|
| 131 |
+
# Event handlers
|
| 132 |
+
submit_btn.click(
|
| 133 |
+
fn=detect_ppe,
|
| 134 |
+
inputs=input_image,
|
| 135 |
+
outputs=output_image
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
demo.launch()
|
requirements.txt
CHANGED
|
@@ -1,9 +1,8 @@
|
|
| 1 |
-
|
| 2 |
-
uvicorn[standard]>=0.27.0
|
| 3 |
ultralytics>=8.0.0
|
| 4 |
opencv-python-headless>=4.8.0
|
| 5 |
numpy>=1.24.0
|
| 6 |
pillow>=10.0.0
|
| 7 |
torch>=2.0.0
|
| 8 |
torchvision>=0.15.0
|
| 9 |
-
|
|
|
|
| 1 |
+
gradio>=4.44.0
|
|
|
|
| 2 |
ultralytics>=8.0.0
|
| 3 |
opencv-python-headless>=4.8.0
|
| 4 |
numpy>=1.24.0
|
| 5 |
pillow>=10.0.0
|
| 6 |
torch>=2.0.0
|
| 7 |
torchvision>=0.15.0
|
| 8 |
+
spaces
|