Spaces:
Paused
Paused
File size: 4,596 Bytes
7b84282 ca44ae5 4371961 ca44ae5 4371961 7b84282 4371961 7b84282 ca44ae5 4371961 ca44ae5 7b84282 4371961 7b84282 4371961 9d5a94e 4371961 7b84282 4371961 ca44ae5 4371961 ea71843 ca44ae5 4371961 ca44ae5 4371961 ca44ae5 4371961 7b84282 4371961 7b84282 ea71843 7b84282 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | from ultralytics import YOLO
from huggingface_hub import hf_hub_download
from PIL import Image
import numpy as np
_model_general = None
_model_litter = None
WASTE_KEYWORDS = [
"bottle", "cup", "bag", "wrapper", "can", "trash", "bin",
"cardboard", "paper", "plastic", "litter", "garbage", "waste",
"box", "container", "food", "fruit", "banana", "orange",
"fork", "knife", "spoon", "bowl", "sandwich", "hotdog",
"pizza", "donut", "cake", "suitcase", "backpack", "handbag"
]
def get_general_model():
global _model_general
if _model_general is None:
_model_general = YOLO("yolov8n.pt")
return _model_general
def get_litter_model():
global _model_litter
if _model_litter is None:
try:
path = hf_hub_download(
repo_id="BowerApp/bowie-yolov8-multihead-trash-detection",
filename="best.pt"
)
_model_litter = YOLO(path)
except Exception:
_model_litter = None
return _model_litter
def analyze_image(image) -> tuple[str, float]:
if image is None:
return "Please upload an image.", 0.5
detected_all = []
methods_used = []
# ββ Model 1: YOLOv8n COCO βββββββββββββββββββββββββββββββββββββββββ
try:
model_g = get_general_model()
results = model_g.predict(image, conf=0.05, verbose=False)
for box in results[0].boxes:
cls_name = model_g.names[int(box.cls[0])]
conf = float(box.conf[0])
detected_all.append({
"object": cls_name,
"confidence": f"{conf:.1%}",
"source": "YOLOv8-COCO"
})
methods_used.append("YOLOv8n-COCO")
except Exception as e:
methods_used.append(f"YOLOv8n failed: {e}")
# ββ Model 2: Litter-specific ββββββββββββββββββββββββββββββββββββββ
try:
model_l = get_litter_model()
if model_l:
results2 = model_l.predict(image, conf=0.05, verbose=False)
for box in results2[0].boxes:
cls_name = model_l.names[int(box.cls[0])]
conf = float(box.conf[0])
detected_all.append({
"object": cls_name,
"confidence": f"{conf:.1%}",
"source": "Litter-Model"
})
methods_used.append("Litter-Detection")
except Exception as e:
methods_used.append(f"Litter model failed: {e}")
# ββ Pixel heuristic βββββββββββββββββββββββββββββββββββββββββββββββ
try:
arr = np.array(Image.fromarray(np.array(image)).convert("RGB"))
r, g, b = arr[:,:,0], arr[:,:,1], arr[:,:,2]
dirty_brown = float(np.mean((r>120)&(g>80)&(g<140)&(b<80)))
dirty_grey = float(np.mean((r>100)&(g>100)&(b>100)&
(np.abs(r.astype(int)-g.astype(int))<20)))
pixel_risk = round(min((dirty_brown*0.6 + dirty_grey*0.4)*4, 1.0), 3)
except Exception:
pixel_risk = 0.0
# ββ TΓnh risk βββββββββββββββββββββββββββββββββββββββββββββββββββββ
waste_items = [
d for d in detected_all
if any(kw in d["object"].lower() for kw in WASTE_KEYWORDS)
]
seen = set()
unique_waste = []
for item in waste_items:
if item["object"] not in seen:
seen.add(item["object"])
unique_waste.append(item)
waste_count = len(unique_waste)
yolo_risk = round(min(waste_count / 5, 1.0), 3)
img_risk = round(min(yolo_risk * 0.7 + pixel_risk * 0.3, 1.0), 3)
# ββ Verdict βββββββββββββββββββββββββββββββββββββββββββββββββββββββ
if img_risk > 0.5:
verdict = "π¨ Visible waste detected"
elif img_risk > 0.2:
verdict = "β οΈ Some waste indicators found"
else:
verdict = "β
Image appears clean"
output = f"""## {verdict}
| Metric | Value |
|--------|-------|
| Models used | {', '.join(methods_used)} |
| Waste objects found | {waste_count} |
| **Image Risk Score** | **{img_risk:.3f} / 1.0** |
"""
return output, img_risk |