Spaces:
Paused
Paused
| from ultralytics import YOLO | |
| from huggingface_hub import hf_hub_download | |
| from PIL import Image | |
| import numpy as np | |
| _model_general = None | |
| _model_litter = None | |
| WASTE_KEYWORDS = [ | |
| "bottle", "cup", "bag", "wrapper", "can", "trash", "bin", | |
| "cardboard", "paper", "plastic", "litter", "garbage", "waste", | |
| "box", "container", "food", "fruit", "banana", "orange", | |
| "fork", "knife", "spoon", "bowl", "sandwich", "hotdog", | |
| "pizza", "donut", "cake", "suitcase", "backpack", "handbag" | |
| ] | |
| def get_general_model(): | |
| global _model_general | |
| if _model_general is None: | |
| _model_general = YOLO("yolov8n.pt") | |
| return _model_general | |
| def get_litter_model(): | |
| global _model_litter | |
| if _model_litter is None: | |
| try: | |
| path = hf_hub_download( | |
| repo_id="BowerApp/bowie-yolov8-multihead-trash-detection", | |
| filename="best.pt" | |
| ) | |
| _model_litter = YOLO(path) | |
| except Exception: | |
| _model_litter = None | |
| return _model_litter | |
| def analyze_image(image) -> tuple[str, float]: | |
| if image is None: | |
| return "Please upload an image.", 0.5 | |
| detected_all = [] | |
| methods_used = [] | |
| # ββ Model 1: YOLOv8n COCO βββββββββββββββββββββββββββββββββββββββββ | |
| try: | |
| model_g = get_general_model() | |
| results = model_g.predict(image, conf=0.05, verbose=False) | |
| for box in results[0].boxes: | |
| cls_name = model_g.names[int(box.cls[0])] | |
| conf = float(box.conf[0]) | |
| detected_all.append({ | |
| "object": cls_name, | |
| "confidence": f"{conf:.1%}", | |
| "source": "YOLOv8-COCO" | |
| }) | |
| methods_used.append("YOLOv8n-COCO") | |
| except Exception as e: | |
| methods_used.append(f"YOLOv8n failed: {e}") | |
| # ββ Model 2: Litter-specific ββββββββββββββββββββββββββββββββββββββ | |
| try: | |
| model_l = get_litter_model() | |
| if model_l: | |
| results2 = model_l.predict(image, conf=0.05, verbose=False) | |
| for box in results2[0].boxes: | |
| cls_name = model_l.names[int(box.cls[0])] | |
| conf = float(box.conf[0]) | |
| detected_all.append({ | |
| "object": cls_name, | |
| "confidence": f"{conf:.1%}", | |
| "source": "Litter-Model" | |
| }) | |
| methods_used.append("Litter-Detection") | |
| except Exception as e: | |
| methods_used.append(f"Litter model failed: {e}") | |
| # ββ Pixel heuristic βββββββββββββββββββββββββββββββββββββββββββββββ | |
| try: | |
| arr = np.array(Image.fromarray(np.array(image)).convert("RGB")) | |
| r, g, b = arr[:,:,0], arr[:,:,1], arr[:,:,2] | |
| dirty_brown = float(np.mean((r>120)&(g>80)&(g<140)&(b<80))) | |
| dirty_grey = float(np.mean((r>100)&(g>100)&(b>100)& | |
| (np.abs(r.astype(int)-g.astype(int))<20))) | |
| pixel_risk = round(min((dirty_brown*0.6 + dirty_grey*0.4)*4, 1.0), 3) | |
| except Exception: | |
| pixel_risk = 0.0 | |
| # ββ TΓnh risk βββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| waste_items = [ | |
| d for d in detected_all | |
| if any(kw in d["object"].lower() for kw in WASTE_KEYWORDS) | |
| ] | |
| seen = set() | |
| unique_waste = [] | |
| for item in waste_items: | |
| if item["object"] not in seen: | |
| seen.add(item["object"]) | |
| unique_waste.append(item) | |
| waste_count = len(unique_waste) | |
| yolo_risk = round(min(waste_count / 5, 1.0), 3) | |
| img_risk = round(min(yolo_risk * 0.7 + pixel_risk * 0.3, 1.0), 3) | |
| # ββ Verdict βββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| if img_risk > 0.5: | |
| verdict = "π¨ Visible waste detected" | |
| elif img_risk > 0.2: | |
| verdict = "β οΈ Some waste indicators found" | |
| else: | |
| verdict = "β Image appears clean" | |
| output = f"""## {verdict} | |
| | Metric | Value | | |
| |--------|-------| | |
| | Models used | {', '.join(methods_used)} | | |
| | Waste objects found | {waste_count} | | |
| | **Image Risk Score** | **{img_risk:.3f} / 1.0** | | |
| """ | |
| return output, img_risk |