planogram updated
Browse files- BAT.ipynb +0 -0
- Data/__pycache__/config.cpython-311.pyc +0 -0
- Data/__pycache__/config.cpython-312.pyc +0 -0
- Data/config.py +11 -0
- Models/best.pt +3 -0
- Models/blankDet.pt +3 -0
- Models/blankDet_v1.1.pt +3 -0
- Models/seg_v1.1.pt +3 -0
- Utils/__pycache__/demo.cpython-311.pyc +0 -0
- Utils/__pycache__/segment.cpython-311.pyc +0 -0
- Utils/__pycache__/segment.cpython-312.pyc +0 -0
- Utils/__pycache__/sorting.cpython-311.pyc +0 -0
- Utils/__pycache__/sorting.cpython-312.pyc +0 -0
- Utils/segment.py +130 -0
- Utils/sorting.py +134 -0
- __pycache__/main.cpython-311.pyc +0 -0
- __pycache__/main.cpython-312.pyc +0 -0
- api.py +76 -0
- demo.jpg +0 -0
- drinksLog.log +0 -0
- main.py +71 -0
- note.py +122 -0
- store/image_131e9995.jpg +0 -0
- store/image_2a48cd6d.jpg +0 -0
- store/image_4c3aadd5.jpg +0 -0
- store/image_77ee6cae.jpg +0 -0
- store/image_821a5d43.jpg +0 -0
- store/image_86670086.jpg +0 -0
- store/image_8fb305d2.jpg +0 -0
- store/image_9571455b.jpg +0 -0
- store/image_ceb48629.jpg +0 -0
- store/image_e3c6d95f.jpg +0 -0
- store/image_f05387dd.jpg +0 -0
BAT.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Data/__pycache__/config.cpython-311.pyc
ADDED
|
Binary file (607 Bytes). View file
|
|
|
Data/__pycache__/config.cpython-312.pyc
ADDED
|
Binary file (572 Bytes). View file
|
|
|
Data/config.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ultralytics import YOLO
|
| 2 |
+
|
| 3 |
+
blankModel = YOLO('Models/blankDet_v1.1.pt')
|
| 4 |
+
frameModel = YOLO('Models/seg_v1.1.pt')
|
| 5 |
+
frame_shape = (416, 416) # Set the input frame shape
|
| 6 |
+
EXPAND_RATIO = 0.1 # Inner padding
|
| 7 |
+
OUTER_PADDING_SIZE = 5 # Outer padding size
|
| 8 |
+
seg_conf = 0.60
|
| 9 |
+
det_conf = 0.50
|
| 10 |
+
classes_to_delete = ['ghw']
|
| 11 |
+
expected_segments = ['ff', 'bg', 'sw', 'fns']
|
Models/best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ead1f561a024f3d416b360a008ec196aa56a29e6a57dd2932c8fb9e5515f90e3
|
| 3 |
+
size 87689203
|
Models/blankDet.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b0d01752063565222f6ff4d9c656d2700bf60c1ee22b1713ce8c6abd703f0cb
|
| 3 |
+
size 87685289
|
Models/blankDet_v1.1.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4e44928ff0203bded268c72d8e05fb6b9f827e6a5c759e4ab0b74514e3520a78
|
| 3 |
+
size 52253474
|
Models/seg_v1.1.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c7ea9a6887702e4998e222100a3d4e3a04d4838f24f42db587bff6680d5ae2c
|
| 3 |
+
size 92340083
|
Utils/__pycache__/demo.cpython-311.pyc
ADDED
|
Binary file (6.97 kB). View file
|
|
|
Utils/__pycache__/segment.cpython-311.pyc
ADDED
|
Binary file (6.96 kB). View file
|
|
|
Utils/__pycache__/segment.cpython-312.pyc
ADDED
|
Binary file (5.51 kB). View file
|
|
|
Utils/__pycache__/sorting.cpython-311.pyc
ADDED
|
Binary file (6.49 kB). View file
|
|
|
Utils/__pycache__/sorting.cpython-312.pyc
ADDED
|
Binary file (5.33 kB). View file
|
|
|
Utils/segment.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import os
|
| 3 |
+
import uuid
|
| 4 |
+
import numpy as np
|
| 5 |
+
from Data.config import *
|
| 6 |
+
|
| 7 |
+
class CropModel:
|
| 8 |
+
|
| 9 |
+
def __init__(self, model, model_input_shape=(frame_shape[0], frame_shape[1], 3)):
|
| 10 |
+
self.model = model
|
| 11 |
+
self.model_input_shape = model_input_shape
|
| 12 |
+
self.conf_threshold = seg_conf
|
| 13 |
+
|
| 14 |
+
def image_prediction_mask(self, image):
|
| 15 |
+
predict = self.model.predict(image)
|
| 16 |
+
|
| 17 |
+
# Extract the masks and move to CPU only if necessary
|
| 18 |
+
mask_tensor = predict[0].masks.data[0] # Assuming this is the mask tensor
|
| 19 |
+
if mask_tensor.is_cuda:
|
| 20 |
+
mask = mask_tensor.cpu().numpy() * 255 # Convert to NumPy array after moving to CPU
|
| 21 |
+
else:
|
| 22 |
+
mask = mask_tensor.numpy() * 255 # No need to move to CPU if it's already on CPU
|
| 23 |
+
|
| 24 |
+
mask = mask.astype("uint8")
|
| 25 |
+
|
| 26 |
+
# Extract class IDs and class names from the predictions
|
| 27 |
+
class_ids_tensor = predict[0].boxes.cls
|
| 28 |
+
if class_ids_tensor.is_cuda:
|
| 29 |
+
class_ids = class_ids_tensor.cpu().numpy() # Move to CPU
|
| 30 |
+
else:
|
| 31 |
+
class_ids = class_ids_tensor.numpy()
|
| 32 |
+
|
| 33 |
+
class_names = [self.model.names[int(cls_id)] for cls_id in class_ids] # Convert indices to names
|
| 34 |
+
|
| 35 |
+
# If any class is found, return the first one
|
| 36 |
+
if class_names:
|
| 37 |
+
class_name = class_names[0] # Return the first detected class
|
| 38 |
+
else:
|
| 39 |
+
class_name = "Not Found" # If no class is detected
|
| 40 |
+
|
| 41 |
+
return mask, class_name
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@staticmethod
|
| 45 |
+
def get_mask_corner_points(mask):
|
| 46 |
+
_, thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 47 |
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
| 48 |
+
|
| 49 |
+
if not contours:
|
| 50 |
+
return None
|
| 51 |
+
|
| 52 |
+
cnt = max(contours, key=cv2.contourArea)
|
| 53 |
+
cnt_approx = cv2.approxPolyDP(cnt, 0.03 * cv2.arcLength(cnt, True), True)
|
| 54 |
+
|
| 55 |
+
return cnt_approx.reshape((4, 2)) if len(cnt_approx) == 4 else None
|
| 56 |
+
|
| 57 |
+
@staticmethod
|
| 58 |
+
def get_order_points(points):
|
| 59 |
+
rect = np.zeros((4, 2), dtype="float32")
|
| 60 |
+
s = points.sum(axis=1)
|
| 61 |
+
diff = np.diff(points, axis=1)
|
| 62 |
+
|
| 63 |
+
rect[0] = points[np.argmin(s)]
|
| 64 |
+
rect[2] = points[np.argmax(s)]
|
| 65 |
+
rect[1] = points[np.argmin(diff)]
|
| 66 |
+
rect[3] = points[np.argmax(diff)]
|
| 67 |
+
|
| 68 |
+
return rect
|
| 69 |
+
|
| 70 |
+
@staticmethod
|
| 71 |
+
def expand_bounding_box(points, expand_ratio):
|
| 72 |
+
center = np.mean(points, axis=0)
|
| 73 |
+
expanded_points = points + (points - center) * expand_ratio
|
| 74 |
+
return expanded_points.astype("float32")
|
| 75 |
+
|
| 76 |
+
def point_transform(self, image, points):
|
| 77 |
+
ordered_points = self.get_order_points(points)
|
| 78 |
+
expanded_points = self.expand_bounding_box(ordered_points, EXPAND_RATIO)
|
| 79 |
+
height, width = image.shape[:2]
|
| 80 |
+
|
| 81 |
+
dst = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]], dtype="float32")
|
| 82 |
+
M = cv2.getPerspectiveTransform(expanded_points, dst)
|
| 83 |
+
warped_image = cv2.warpPerspective(image, M, (width, height))
|
| 84 |
+
|
| 85 |
+
return warped_image
|
| 86 |
+
|
| 87 |
+
@staticmethod
|
| 88 |
+
def add_padding_to_image(image, padding_size):
|
| 89 |
+
return cv2.copyMakeBorder(image, padding_size, padding_size, padding_size, padding_size, cv2.BORDER_CONSTANT, value=[0, 0, 0])
|
| 90 |
+
|
| 91 |
+
# def get_predicted_warped_image(self, image):
|
| 92 |
+
# mask, class_name = self.image_prediction_mask(image)
|
| 93 |
+
# corner_points = self.get_mask_corner_points(mask)
|
| 94 |
+
|
| 95 |
+
# if corner_points is None:
|
| 96 |
+
# return None, class_name
|
| 97 |
+
|
| 98 |
+
# warped_image = self.point_transform(image, corner_points)
|
| 99 |
+
# padded_image = self.add_padding_to_image(warped_image, OUTER_PADDING_SIZE)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# return padded_image, class_name
|
| 103 |
+
|
| 104 |
+
def get_predicted_warped_image(self, image, save_dir="store"):
|
| 105 |
+
mask, class_name = self.image_prediction_mask(image)
|
| 106 |
+
corner_points = self.get_mask_corner_points(mask)
|
| 107 |
+
|
| 108 |
+
if corner_points is None:
|
| 109 |
+
return None, class_name
|
| 110 |
+
|
| 111 |
+
# Perform the transformation
|
| 112 |
+
warped_image = self.point_transform(image, corner_points)
|
| 113 |
+
|
| 114 |
+
# Add padding to the image
|
| 115 |
+
padded_image = self.add_padding_to_image(warped_image, OUTER_PADDING_SIZE)
|
| 116 |
+
|
| 117 |
+
# Ensure the directory exists
|
| 118 |
+
if not os.path.exists(save_dir):
|
| 119 |
+
os.makedirs(save_dir)
|
| 120 |
+
|
| 121 |
+
# Generate a random image name
|
| 122 |
+
random_name = f"image_{uuid.uuid4().hex[:8]}.jpg" # First 8 characters of a UUID
|
| 123 |
+
save_path = os.path.join(save_dir, random_name)
|
| 124 |
+
|
| 125 |
+
# Save the padded image
|
| 126 |
+
cv2.imwrite(save_path, padded_image)
|
| 127 |
+
print(f"Image saved at: {save_path}")
|
| 128 |
+
|
| 129 |
+
return padded_image, class_name
|
| 130 |
+
|
Utils/sorting.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from Data.config import *
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class SortModel:
|
| 6 |
+
def __init__(self, model, classes_to_delete=classes_to_delete, conf_threshold=det_conf, expected_segments=expected_segments):
|
| 7 |
+
self.model = model
|
| 8 |
+
self.classes_to_delete = classes_to_delete
|
| 9 |
+
self.conf_threshold = conf_threshold
|
| 10 |
+
self.expected_segments = expected_segments
|
| 11 |
+
|
| 12 |
+
def get_center(self, detection):
|
| 13 |
+
_, x_min, y_min, x_max, y_max = detection
|
| 14 |
+
center_x = (x_min + x_max) / 2
|
| 15 |
+
center_y = (y_min + y_max) / 2
|
| 16 |
+
return center_x, center_y
|
| 17 |
+
|
| 18 |
+
def sort_and_group_detections(self, detections):
|
| 19 |
+
detections_with_centers = [(d[0], *self.get_center(d)) for d in detections]
|
| 20 |
+
sorted_detections = sorted(detections_with_centers, key=lambda x: (x[1], x[2]))
|
| 21 |
+
|
| 22 |
+
if not sorted_detections:
|
| 23 |
+
return []
|
| 24 |
+
|
| 25 |
+
threshold_x = (sorted_detections[0][1] * 0.5)
|
| 26 |
+
rows = []
|
| 27 |
+
current_row = []
|
| 28 |
+
current_x = sorted_detections[0][1]
|
| 29 |
+
|
| 30 |
+
for detection in sorted_detections:
|
| 31 |
+
class_name, center_x, center_y = detection
|
| 32 |
+
|
| 33 |
+
if abs(center_x - current_x) > threshold_x:
|
| 34 |
+
rows.append(sorted(current_row, key=lambda x: x[2]))
|
| 35 |
+
current_row = []
|
| 36 |
+
current_x = center_x
|
| 37 |
+
|
| 38 |
+
current_row.append(detection)
|
| 39 |
+
|
| 40 |
+
if current_row:
|
| 41 |
+
rows.append(sorted(current_row, key=lambda x: x[2]))
|
| 42 |
+
|
| 43 |
+
max_columns = max(len(row) for row in rows) if rows else 0
|
| 44 |
+
grid_matrix = []
|
| 45 |
+
for row in rows:
|
| 46 |
+
grid_row = [d[0] for d in row]
|
| 47 |
+
grid_row.extend([''] * (max_columns - len(row)))
|
| 48 |
+
grid_matrix.append(grid_row)
|
| 49 |
+
|
| 50 |
+
transposed_matrix = list(map(list, zip(*grid_matrix)))
|
| 51 |
+
return transposed_matrix
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def sequence(matrix, expected_segments):
|
| 55 |
+
if not expected_segments:
|
| 56 |
+
return False
|
| 57 |
+
|
| 58 |
+
for sequence in matrix:
|
| 59 |
+
segment_index = 0
|
| 60 |
+
for item in sequence:
|
| 61 |
+
if item == expected_segments[segment_index]:
|
| 62 |
+
continue
|
| 63 |
+
elif segment_index < len(expected_segments) - 1 and item == expected_segments[segment_index + 1]:
|
| 64 |
+
segment_index += 1
|
| 65 |
+
else:
|
| 66 |
+
return False
|
| 67 |
+
if segment_index != len(expected_segments) - 1:
|
| 68 |
+
return False
|
| 69 |
+
return True
|
| 70 |
+
|
| 71 |
+
def process_image(self, image_path, predicted_class):
|
| 72 |
+
# Run the detection on an image
|
| 73 |
+
results = self.model(image_path)
|
| 74 |
+
|
| 75 |
+
planogram_ghw_count = 0
|
| 76 |
+
planogram_blanks_count = 0
|
| 77 |
+
planogram_ghw = ''
|
| 78 |
+
planogram_valid_sequence = ''
|
| 79 |
+
yolo_detections = []
|
| 80 |
+
|
| 81 |
+
for result in results:
|
| 82 |
+
for box in result.boxes:
|
| 83 |
+
class_name = self.model.names[int(box.cls[0])]
|
| 84 |
+
|
| 85 |
+
if box.conf[0] >= self.conf_threshold:
|
| 86 |
+
if class_name in self.classes_to_delete:
|
| 87 |
+
planogram_ghw_count += 1
|
| 88 |
+
continue
|
| 89 |
+
|
| 90 |
+
if box.conf[0] >= self.conf_threshold:
|
| 91 |
+
x_min, y_min, x_max, y_max = box.xyxy[0]
|
| 92 |
+
yolo_detections.append((class_name, x_min.item(), y_min.item(), x_max.item(), y_max.item()))
|
| 93 |
+
|
| 94 |
+
planogram_blanks_count = len(yolo_detections)
|
| 95 |
+
|
| 96 |
+
grid_matrix = self.sort_and_group_detections(yolo_detections)
|
| 97 |
+
|
| 98 |
+
# Print the matrix
|
| 99 |
+
print("Grid Matrix:")
|
| 100 |
+
for row in grid_matrix:
|
| 101 |
+
print(row)
|
| 102 |
+
print("\n")
|
| 103 |
+
|
| 104 |
+
if planogram_blanks_count == planogram_ghw_count:
|
| 105 |
+
planogram_ghw = "yes"
|
| 106 |
+
else:
|
| 107 |
+
planogram_ghw = "no"
|
| 108 |
+
|
| 109 |
+
planogram_valid_sequence = self.sequence(grid_matrix, self.expected_segments)
|
| 110 |
+
if planogram_valid_sequence:
|
| 111 |
+
planogram_valid_sequence = "yes"
|
| 112 |
+
else:
|
| 113 |
+
planogram_valid_sequence = "no"
|
| 114 |
+
|
| 115 |
+
return {
|
| 116 |
+
"Planogram Blanks Count": planogram_blanks_count,
|
| 117 |
+
"Planogram GHW Count": planogram_ghw_count,
|
| 118 |
+
"Planogram Valid Sequence": planogram_valid_sequence,
|
| 119 |
+
"Planogram GHW": planogram_ghw,
|
| 120 |
+
"Planogram Name": predicted_class,
|
| 121 |
+
|
| 122 |
+
"POSM Blanks Count": "N/A",
|
| 123 |
+
"POSM GHW Count": "N/A",
|
| 124 |
+
"POSM Valid Sequence": "N/A",
|
| 125 |
+
"POSM GHW": "N/A",
|
| 126 |
+
"POSM Name": "N/A",
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
__pycache__/main.cpython-311.pyc
ADDED
|
Binary file (5.19 kB). View file
|
|
|
__pycache__/main.cpython-312.pyc
ADDED
|
Binary file (4.4 kB). View file
|
|
|
api.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from typing import Union, List
|
| 4 |
+
import uvicorn
|
| 5 |
+
import logging
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
import pytz
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from main import ImageProcessor
|
| 11 |
+
image_processor = ImageProcessor() # Initialize the image processor
|
| 12 |
+
|
| 13 |
+
logging.basicConfig(filename="drinksLog.log", filemode='w')
|
| 14 |
+
logger = logging.getLogger("drinks")
|
| 15 |
+
logger.setLevel(logging.DEBUG)
|
| 16 |
+
file_handler = logging.FileHandler("drinksLog.log")
|
| 17 |
+
logger.addHandler(file_handler)
|
| 18 |
+
|
| 19 |
+
app = FastAPI()
|
| 20 |
+
|
| 21 |
+
class RequestBody(BaseModel):
|
| 22 |
+
cat: str
|
| 23 |
+
img: str
|
| 24 |
+
|
| 25 |
+
class RequestData(BaseModel):
|
| 26 |
+
body: Union[RequestBody, List[RequestBody]]
|
| 27 |
+
|
| 28 |
+
@app.get("/status")
|
| 29 |
+
async def status():
|
| 30 |
+
return {"status": "AI Server is running"}
|
| 31 |
+
|
| 32 |
+
# Function to process the image based on the category
|
| 33 |
+
async def process_image(item: RequestBody):
|
| 34 |
+
category = item.cat
|
| 35 |
+
img_url = item.img
|
| 36 |
+
|
| 37 |
+
if category == "posm":
|
| 38 |
+
result = await image_processor.process_image(img_url)
|
| 39 |
+
return result
|
| 40 |
+
elif category == "planogram":
|
| 41 |
+
result = await image_processor.process_image(img_url)
|
| 42 |
+
return result
|
| 43 |
+
else:
|
| 44 |
+
return {"error": f"Unsupported category {category}"}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@app.post("/bats")
|
| 48 |
+
async def detect_items(request_data: RequestData):
|
| 49 |
+
try:
|
| 50 |
+
# Initialize an empty list to hold the processed results
|
| 51 |
+
results = []
|
| 52 |
+
|
| 53 |
+
# Check if data.body is a list or a single item
|
| 54 |
+
if isinstance(request_data.body, list):
|
| 55 |
+
# If body is already a list, iterate through each item
|
| 56 |
+
for item in request_data.body:
|
| 57 |
+
# Process each item and append the result to results
|
| 58 |
+
processed_result = await process_image(item) # Use await here
|
| 59 |
+
results.append(processed_result)
|
| 60 |
+
else:
|
| 61 |
+
# If body is a single item, process it directly
|
| 62 |
+
processed_result = await process_image(request_data.body) # Use await here
|
| 63 |
+
results.append(processed_result)
|
| 64 |
+
|
| 65 |
+
# Return the results as a JSON response
|
| 66 |
+
return results
|
| 67 |
+
|
| 68 |
+
except Exception as e:
|
| 69 |
+
logger.error(f"Error during detection: {str(e)}")
|
| 70 |
+
return {"error": "An error occurred during detection"}
|
| 71 |
+
|
| 72 |
+
if __name__ == "__main__":
|
| 73 |
+
try:
|
| 74 |
+
uvicorn.run(app, host="127.0.0.1", port=4444)
|
| 75 |
+
finally:
|
| 76 |
+
torch.cuda.empty_cache()
|
demo.jpg
ADDED
|
drinksLog.log
ADDED
|
File without changes
|
main.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# main.py
|
| 2 |
+
import json
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
from aiohttp import ClientSession
|
| 8 |
+
from io import BytesIO
|
| 9 |
+
import asyncio
|
| 10 |
+
import logging
|
| 11 |
+
from Data.config import *
|
| 12 |
+
from Utils.segment import CropModel
|
| 13 |
+
from Utils.sorting import SortModel
|
| 14 |
+
from Data.config import frameModel
|
| 15 |
+
from Data.config import blankModel
|
| 16 |
+
import matplotlib.pyplot as plt
|
| 17 |
+
|
| 18 |
+
# Set up logging
|
| 19 |
+
logging.basicConfig(level=logging.INFO)
|
| 20 |
+
|
| 21 |
+
class ImageFetcher:
|
| 22 |
+
@staticmethod
|
| 23 |
+
async def fetch_image(url, session):
|
| 24 |
+
try:
|
| 25 |
+
async with session.get(url) as response:
|
| 26 |
+
if response.status == 200:
|
| 27 |
+
img_data = await response.read()
|
| 28 |
+
return Image.open(BytesIO(img_data))
|
| 29 |
+
else:
|
| 30 |
+
logging.error(f"Failed to fetch image from {url}, status code: {response.status}")
|
| 31 |
+
return None
|
| 32 |
+
except Exception as e:
|
| 33 |
+
logging.error(f"Exception during image fetching from {url}: {e}")
|
| 34 |
+
return None
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ImageProcessor:
|
| 38 |
+
def __init__(self):
|
| 39 |
+
self.frameModel = frameModel
|
| 40 |
+
self.blankModel = blankModel
|
| 41 |
+
|
| 42 |
+
async def process_image(self, img_url):
|
| 43 |
+
async with ClientSession() as session:
|
| 44 |
+
image = await ImageFetcher.fetch_image(img_url, session)
|
| 45 |
+
if image is None:
|
| 46 |
+
return {"error": "Failed to fetch image"}
|
| 47 |
+
|
| 48 |
+
image = np.array(image) # Convert PIL image to NumPy array if needed
|
| 49 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 50 |
+
image = cv2.resize(image, (frame_shape[0], frame_shape[1])) # Resize to match the frame shape
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
crop_model = CropModel(model=frameModel) # Initialize with your model
|
| 54 |
+
padded_image, other_class_name = crop_model.get_predicted_warped_image(image)
|
| 55 |
+
|
| 56 |
+
if other_class_name:
|
| 57 |
+
print(f"Other class name: {other_class_name}")
|
| 58 |
+
else:
|
| 59 |
+
print("No other class detected.")
|
| 60 |
+
|
| 61 |
+
# plt.imshow(padded_image)
|
| 62 |
+
# plt.axis('off') # Hide axes
|
| 63 |
+
# plt.show()
|
| 64 |
+
|
| 65 |
+
sort_model = SortModel(model=blankModel, classes_to_delete=classes_to_delete, conf_threshold=det_conf, expected_segments=expected_segments)
|
| 66 |
+
sequence_str = sort_model.process_image(padded_image, other_class_name)
|
| 67 |
+
|
| 68 |
+
return {"result": sequence_str}
|
| 69 |
+
except Exception as e:
|
| 70 |
+
logging.error(f"Exception during image processing: {e}")
|
| 71 |
+
return {"error": "Image processing failed."}
|
note.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from Data.config import *
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class SortModel:
|
| 6 |
+
def __init__(self, model, classes_to_delete=classes_to_delete, conf_threshold=det_conf, expected_segments=expected_segments):
|
| 7 |
+
self.model = model
|
| 8 |
+
self.classes_to_delete = classes_to_delete
|
| 9 |
+
self.conf_threshold = conf_threshold
|
| 10 |
+
self.expected_segments = expected_segments
|
| 11 |
+
|
| 12 |
+
def get_center(self, detection):
|
| 13 |
+
_, x_min, y_min, x_max, y_max = detection
|
| 14 |
+
center_x = (x_min + x_max) / 2
|
| 15 |
+
center_y = (y_min + y_max) / 2
|
| 16 |
+
return center_x, center_y
|
| 17 |
+
|
| 18 |
+
def sort_and_group_detections(self, detections):
|
| 19 |
+
detections_with_centers = [(d[0], *self.get_center(d)) for d in detections]
|
| 20 |
+
sorted_detections = sorted(detections_with_centers, key=lambda x: (x[1], x[2]))
|
| 21 |
+
|
| 22 |
+
if not sorted_detections:
|
| 23 |
+
return []
|
| 24 |
+
|
| 25 |
+
threshold_x = (sorted_detections[0][1] * 0.5)
|
| 26 |
+
rows = []
|
| 27 |
+
current_row = []
|
| 28 |
+
current_x = sorted_detections[0][1]
|
| 29 |
+
|
| 30 |
+
for detection in sorted_detections:
|
| 31 |
+
class_name, center_x, center_y = detection
|
| 32 |
+
|
| 33 |
+
if abs(center_x - current_x) > threshold_x:
|
| 34 |
+
rows.append(sorted(current_row, key=lambda x: x[2]))
|
| 35 |
+
current_row = []
|
| 36 |
+
current_x = center_x
|
| 37 |
+
|
| 38 |
+
current_row.append(detection)
|
| 39 |
+
|
| 40 |
+
if current_row:
|
| 41 |
+
rows.append(sorted(current_row, key=lambda x: x[2]))
|
| 42 |
+
|
| 43 |
+
max_columns = max(len(row) for row in rows) if rows else 0
|
| 44 |
+
grid_matrix = []
|
| 45 |
+
for row in rows:
|
| 46 |
+
grid_row = [d[0] for d in row]
|
| 47 |
+
grid_row.extend([''] * (max_columns - len(row)))
|
| 48 |
+
grid_matrix.append(grid_row)
|
| 49 |
+
|
| 50 |
+
transposed_matrix = list(map(list, zip(*grid_matrix)))
|
| 51 |
+
return transposed_matrix
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def sequence(matrix, expected_segments):
|
| 55 |
+
if not expected_segments:
|
| 56 |
+
return False
|
| 57 |
+
|
| 58 |
+
for sequence in matrix:
|
| 59 |
+
segment_index = 0
|
| 60 |
+
for item in sequence:
|
| 61 |
+
if item == expected_segments[segment_index]:
|
| 62 |
+
continue
|
| 63 |
+
elif segment_index < len(expected_segments) - 1 and item == expected_segments[segment_index + 1]:
|
| 64 |
+
segment_index += 1
|
| 65 |
+
else:
|
| 66 |
+
return False
|
| 67 |
+
if segment_index != len(expected_segments) - 1:
|
| 68 |
+
return False
|
| 69 |
+
return True
|
| 70 |
+
|
| 71 |
+
def process_image(self, image_path, predicted_class):
|
| 72 |
+
# Run the detection on an image
|
| 73 |
+
results = self.model(image_path)
|
| 74 |
+
|
| 75 |
+
planogram_ghw_count = 0
|
| 76 |
+
planogram_blanks_count = 0
|
| 77 |
+
planogram_ghw = ''
|
| 78 |
+
planogram_valid_sequence = ''
|
| 79 |
+
yolo_detections = []
|
| 80 |
+
|
| 81 |
+
for result in results:
|
| 82 |
+
for box in result.boxes:
|
| 83 |
+
class_name = self.model.names[int(box.cls[0])]
|
| 84 |
+
|
| 85 |
+
if class_name in self.classes_to_delete:
|
| 86 |
+
planogram_ghw_count += 1
|
| 87 |
+
continue
|
| 88 |
+
|
| 89 |
+
if box.conf[0] >= self.conf_threshold:
|
| 90 |
+
x_min, y_min, x_max, y_max = box.xyxy[0]
|
| 91 |
+
yolo_detections.append((class_name, x_min.item(), y_min.item(), x_max.item(), y_max.item()))
|
| 92 |
+
|
| 93 |
+
planogram_blanks_count = len(yolo_detections)
|
| 94 |
+
|
| 95 |
+
# Sort and group detections
|
| 96 |
+
grid_matrix = self.sort_and_group_detections(yolo_detections)
|
| 97 |
+
print(f"ddddddddd{yolo_detections}")
|
| 98 |
+
|
| 99 |
+
# Print the matrix
|
| 100 |
+
print("Grid Matrix:")
|
| 101 |
+
for row in grid_matrix:
|
| 102 |
+
print(row)
|
| 103 |
+
print("\n")
|
| 104 |
+
|
| 105 |
+
if planogram_blanks_count == planogram_ghw_count:
|
| 106 |
+
planogram_ghw = "yes"
|
| 107 |
+
else:
|
| 108 |
+
planogram_ghw = "no"
|
| 109 |
+
|
| 110 |
+
planogram_valid_sequence = self.sequence(grid_matrix, self.expected_segments)
|
| 111 |
+
if planogram_valid_sequence:
|
| 112 |
+
planogram_valid_sequence = "y"
|
| 113 |
+
else:
|
| 114 |
+
planogram_valid_sequence = "n/a"
|
| 115 |
+
|
| 116 |
+
return {
|
| 117 |
+
"Planogram Blanks Count": planogram_blanks_count,
|
| 118 |
+
"Planogram GHW Count": planogram_ghw_count,
|
| 119 |
+
"Planogram Valid Sequence": planogram_valid_sequence,
|
| 120 |
+
"Planogram GHW": planogram_ghw,
|
| 121 |
+
"Class Name": predicted_class
|
| 122 |
+
}
|
store/image_131e9995.jpg
ADDED
|
store/image_2a48cd6d.jpg
ADDED
|
store/image_4c3aadd5.jpg
ADDED
|
store/image_77ee6cae.jpg
ADDED
|
store/image_821a5d43.jpg
ADDED
|
store/image_86670086.jpg
ADDED
|
store/image_8fb305d2.jpg
ADDED
|
store/image_9571455b.jpg
ADDED
|
store/image_ceb48629.jpg
ADDED
|
store/image_e3c6d95f.jpg
ADDED
|
store/image_f05387dd.jpg
ADDED
|