File size: 4,351 Bytes
452a352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50e95b3
452a352
 
 
 
50e95b3
 
452a352
 
 
 
 
50e95b3
 
 
 
452a352
50e95b3
 
 
 
452a352
50e95b3
 
452a352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import cv2
import numpy as np
from typing import List
import pandas as pd


def detect_and_scale(image_path, model) -> List[tuple]:
    """
    Detect drawingtype (plantegning, fasade, snitt or situasjonskart).
    Filter by only plantegning, and scale bboxes to original image size.

    Args: 
        - image_path: path to uploaded image
        - model: Local stored object detection model
    Returns:
        List of tuples with bbox coordinates (x1,y1,x2,y2)
    
    """
    original_image = cv2.imread(image_path)
    orig_h, orig_w = original_image.shape[:2]

    results = model(image_path)
    detected_boxes = []
    
    for result in results:
        boxes = result.boxes
        conf_boxes = boxes.conf
        class_labels = result.names 

        yolo_h, yolo_w = result.orig_shape[:2]

        scale_x = orig_w / yolo_w
        scale_y = orig_h / yolo_h

        for box, cls in zip(boxes.xyxy, boxes.cls):
            label = class_labels[int(cls)]
            if label == "plantegning":
                x1, y1, x2, y2 = box.tolist()

                x1 = int(x1 * scale_x)
                y1 = int(y1 * scale_y)
                x2 = int(x2 * scale_x)
                y2 = int(y2 * scale_y)

                detected_boxes.append((x1, y1, x2, y2))
                #cv2.rectangle(original_image, (x1, y1), (x2, y2), (0, 255, 0), 2)
    
    return detected_boxes, original_image

def detect_and_plot_plantegning(model, image):
    results = model.predict(image, classes=1)
    annotated_img = results[0].plot()
    return annotated_img


def obj_to_pandas(objdet_results):
    detected_obj_list = []
    obj_count = 0
    for box in objdet_results:
        obj_count +=1
        detected_obj_list.append({"mask_id": obj_count, 'bboxes': box})
    
    return pd.DataFrame(detected_obj_list)


# ------------ AREAL MÅLEREN --------------------------

def detect_resize_walls(image_path, model):
    """ Detect and plot YOLO bounding boxes on the original image size """

    original_image = cv2.imread(image_path)
    orig_h, orig_w = original_image.shape[:2]

    results = model.predict(image_path)
    detected_boxes = []
    
    for result in results:
        boxes = result.boxes  
        class_labels = result.names

        yolo_h, yolo_w = result.orig_shape[:2]

        scale_x = orig_w / yolo_w
        scale_y = orig_h / yolo_h

        for box, cls in zip(boxes.xyxy, boxes.cls):
            label = class_labels[int(cls)]
            if label.lower() == "wall":
                x1, y1, x2, y2 = box.tolist()

                x1 = int(x1 * scale_x)
                y1 = int(y1 * scale_y)
                x2 = int(x2 * scale_x)
                y2 = int(y2 * scale_y)

                detected_boxes.append((x1, y1, x2, y2))
                cv2.rectangle(original_image, (x1, y1), (x2, y2), (0, 255, 0), 2)

    return detected_boxes

def remove_interior_walls(masks, bboxes):
    filtered_bboxes = []
    for bbox in bboxes:
        x1, y1, x2, y2 = bbox
        inside_mask = False  
        for mask in masks:
            mask_region = mask[y1:y2, x1:x2]
            mask_coverage = np.sum(mask_region > 0) / (mask_region.shape[0] * mask_region.shape[1])

            if mask_coverage > 0.8:  # If 80% or more of the bbox is covered by a mask, remove it
                inside_mask = True
                break

        if not inside_mask:
            filtered_bboxes.append(bbox)

    return filtered_bboxes

def reference_object(image_path, model, object_type="wall"):
    img = cv2.imread(image_path)
    results = model.predict(image_path, conf=0.5)
    longest_x = 0
    bbox = None
    longest_y = 0

    for result in results:
        boxes = result.boxes 
        class_labels = result.names 

        for box, cls in zip(boxes.xyxy, boxes.cls):
            label = class_labels[int(cls)] 

            if label.lower() == object_type:
                x1, y1, x2, y2 = map(int, box.tolist())
                length_x = x2 - x1
                length_y = y2 - y1

                if length_x > longest_x:
                    longest_x = length_x
                    bbox = (x1, y1, x2, y2)

                if length_y > longest_y:
                    longest_y = length_y

    if bbox:
        cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 2)

    return img, longest_x, longest_y