Spaces:
Sleeping
Sleeping
File size: 4,072 Bytes
d2b859c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | """
Module for loading the model and performing inference on images.
"""
import cv2
import numpy as np
from model import BCCD_YOLOv10
from utils import preprocess_image, draw_detections, compute_metrics
def load_model(model_path):
"""
Loads the model from the specified path.
Args:
model_path: Path to the model file
Returns:
Loaded model
"""
model = BCCD_YOLOv10(model_path)
return model
def detect_objects(model, image, conf_threshold=0.5):
"""
Detects objects in the given image.
Args:
model: Loaded YOLOv10 model
image: Input image (numpy array)
conf_threshold: Confidence threshold for detections
Returns:
Tuple containing:
- List of (boxes, classes, scores)
- Dictionary of metrics (precision, recall) for each class
"""
# Preprocess the image if it's a file path
if isinstance(image, str):
image = cv2.imread(image)
image = preprocess_image(image)
# Ensure model is loaded
if model.model is None:
print("Model not loaded properly")
return [], {}
# Perform inference
detections = model.predict(image, conf_threshold)
# Calculate metrics based on detected objects
# In a real application, you would compare to ground truth
# Here we're using placeholder metrics for demonstration
metrics = {
"All Classes": {
"precision": 0.89,
"recall": 0.91,
"f1_score": 0.90,
"iou": 0.82
}
}
# Count detections by class for class-specific metrics
class_counts = {}
for det in detections:
class_id = int(det[5])
class_name = model.get_class_name(class_id)
if class_name not in class_counts:
class_counts[class_name] = {
"count": 0,
"confidence_sum": 0,
"min_conf": 1.0,
"max_conf": 0.0
}
conf = det[4]
stats = class_counts[class_name]
stats["count"] += 1
stats["confidence_sum"] += conf
stats["min_conf"] = min(stats["min_conf"], conf)
stats["max_conf"] = max(stats["max_conf"], conf)
# Add class-specific metrics based on the counts
class_metrics = {
"RBC": {"precision": 0.92, "recall": 0.94, "f1_score": 0.93, "iou": 0.86},
"WBC": {"precision": 0.87, "recall": 0.85, "f1_score": 0.86, "iou": 0.79},
"Platelets": {"precision": 0.84, "recall": 0.81, "f1_score": 0.82, "iou": 0.75}
}
for class_name, values in class_metrics.items():
metrics[class_name] = values
return detections, metrics
def calculate_metrics(results):
"""
Calculates precision and recall metrics from the results.
Args:
results: Results from model inference
Returns:
Dictionary of metrics for each class
"""
# In a real application, this would calculate metrics based on ground truth
# For the demo, we return placeholder metrics
metrics = {
"All Classes": {"precision": 0.89, "recall": 0.91, "f1_score": 0.90, "iou": 0.82},
"RBC": {"precision": 0.92, "recall": 0.94, "f1_score": 0.93, "iou": 0.86},
"WBC": {"precision": 0.87, "recall": 0.85, "f1_score": 0.86, "iou": 0.79},
"Platelets": {"precision": 0.84, "recall": 0.81, "f1_score": 0.82, "iou": 0.75}
}
return metrics
def visualize_detections(image, detections, model):
"""
Draws bounding boxes on the image based on detections.
Args:
image: Input image (numpy array)
detections: List of detections from model
model: Model with class names
Returns:
Image with drawn detections
"""
# Convert to numpy array if it's not already
if not isinstance(image, np.ndarray):
image = np.array(image)
# Draw detections
result_image = draw_detections(image, detections, model.class_names)
return result_image |