import io from fastapi import FastAPI, UploadFile, File, HTTPException from PIL import Image from ultralytics import YOLO import torch app = FastAPI(title="Plant Disease Detection API") # Load the model strictly once on startup # Ensure you have uploaded your 'best.pt' renamed as 'model.pt' to the root directory try: model = YOLO("model.pt") except Exception as e: print(f"Error loading model: {e}") # Fallback to a tiny model just so the app doesn't crash if the file is missing during build print("Warning: 'model.pt' not found, loading 'yolov8n.pt' as placeholder.") model = YOLO("yolov8n.pt") @app.get("/") def home(): return {"message": "Plant Disease Detection API is running. POST to /predict with an image."} @app.post("/predict") async def predict_image(file: UploadFile = File(...)): """ Receives an image file, runs inference, and returns bounding boxes and confidence. """ # 1. Validate Input if not file.content_type or not file.content_type.startswith("image/"): raise HTTPException(status_code=400, detail="File provided is not an image.") try: # 2. Read Image image_data = await file.read() image = Image.open(io.BytesIO(image_data)).convert("RGB") # 3. Run Inference # conf=0.25 is standard, you can lower it if you need more sensitivity results = model.predict(image, conf=0.25) # 4. Process Results detections = [] result = results[0] # We only processed one image for box in result.boxes: # Extract coordinates (x1, y1, x2, y2) coords = box.xyxy[0].tolist() # Extract confidence confidence = float(box.conf[0]) # Extract class (should be 0 for 'diseased' based on your training) cls_id = int(box.cls[0]) cls_name = result.names[cls_id] detections.append({ "bbox": coords, # [x_min, y_min, x_max, y_max] "confidence": confidence, "class_id": cls_id, "class_name": cls_name }) return { "filename": file.filename, "count": len(detections), "predictions": detections } except Exception as e: raise HTTPException(status_code=500, detail=str(e))