MeasurementDUPLICATE / Code_2_7.py
Marthee's picture
Update Code_2_7.py
f7df77e verified
# -*- coding: utf-8 -*-
"""2.7 Code to be deployed 21.02.2025
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1RWSQn0GW_KXoHkJLcbYzLAGGyc0tiDWl
"""
"""## Imports"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
import math
from PIL import Image , ImageDraw, ImageFont , ImageColor
import fitz
import ezdxf as ez
import sys
from ezdxf import units
# from google.colab.patches import cv2_imshow
from ezdxf.math import OCS, Matrix44, Vec3
import ezdxf
print(ezdxf.__version__)
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from shapely.geometry import Point, Polygon as ShapelyPolygon
from ezdxf.math import Vec2
import random
import pandas as pd
import google_sheet_Legend
# import tsadropboxretrieval
from ezdxf import bbox
from math import sin, cos, radians
# from ezdxf.tools import rgb
from ezdxf.colors import aci2rgb
# from ezdxf.math import rgb_from_color
from collections import Counter
import xml.etree.ElementTree as ET
from PyPDF2 import PdfReader, PdfWriter
from PyPDF2.generic import TextStringObject, NameObject, ArrayObject, FloatObject
from PyPDF2.generic import NameObject, TextStringObject, DictionaryObject, FloatObject, ArrayObject, NumberObject
from typing import NewType
from ctypes import sizeof
from io import BytesIO
def normalize_vertices(vertices):
"""Sort vertices to ensure consistent order."""
return tuple(sorted(tuple(v) for v in vertices))
def areas_are_similar(area1, area2, tolerance=0.2):
"""Check if two areas are within a given tolerance."""
return abs(area1 - area2) <= tolerance
# -*- coding: utf-8 -*-wj
"""Version to be deployed of 3.2 Calculating area/perimeter
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1XPeCoTBgWSNBYZ3aMKBteP4YG3w4bORs
"""
"""## Notes"""
#new approach to get width and height of dxf plan
'''
This portion is used to convert vertices read from dxf to pixels in order to accurately locate shapes in the image and pdf
ratio :
MeasuredMetric* PixelValue/ DxfMetric = MeasuredPixel
PixelValue: get from pixel conversion code , second number in the bracker represents the perimeter
DxfMetric: measured perimeter from foxit
divide pixelvalue by dxfmetric, will give u a ratio , this is ur dxfratio
'''
"""PDF to image"""
def pdftoimg(datadoc):
doc =fitz.open('pdf',datadoc)
page=doc[0]
pix = page.get_pixmap() # render page to an image
pl=Image.frombytes('RGB', [pix.width,pix.height],pix.samples)
img=np.array(pl)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
print("IMAGE")
# cv2_imshow(img)
return img,pix
# Standard ISO paper sizes in inches
ISO_SIZES_INCHES = {
"A0": (33.11, 46.81),
"A1": (23.39, 33.11),
"A2": (16.54, 23.39),
"A3": (11.69, 16.54),
"A4": (8.27, 11.69),
"A5": (5.83, 8.27),
"A6": (4.13, 5.83),
"A7": (2.91, 4.13),
"A8": (2.05, 2.91),
"A9": (1.46, 2.05),
"A10": (1.02, 1.46)
}
def get_paper_size_in_inches(width, height):
"""Find the closest matching paper size in inches."""
for size, (w, h) in ISO_SIZES_INCHES.items():
if (abs(w - width) < 0.1 and abs(h - height) < 0.1) or (abs(w - height) < 0.1 and abs(h - width) < 0.1):
return size
return "Unknown Size"
def analyze_pdf(datadoc):
# Open the PDF file
pdf_document = fitz.open('pdf',datadoc)
# Iterate through pages and print their sizes
for page_number in range(len(pdf_document)):
page = pdf_document[page_number]
rect = page.rect
width_points, height_points = rect.width, rect.height
# Convert points to inches
width_inches, height_inches = width_points / 72, height_points / 72
paper_size = get_paper_size_in_inches(width_inches, height_inches)
print(f"Page {page_number + 1}: {width_inches:.2f} x {height_inches:.2f} inches ({paper_size})")
pdf_document.close()
return width_inches , height_inches , paper_size
def get_dxfSize(dxfpath):
doc = ezdxf.readfile(dxfpath)
msp = doc.modelspace()
# Create a cache for bounding box calculations
# Get the overall bounding box for all entities in the modelspace
cache = bbox.Cache()
overall_bbox = bbox.extents(msp, cache=cache)
print("Overall Bounding Box:", overall_bbox)
print(overall_bbox.extmin[0]+overall_bbox.extmax[0], overall_bbox.extmin[1]+overall_bbox.extmax[1])
return overall_bbox.extmin[0]+overall_bbox.extmax[0], overall_bbox.extmin[1]+overall_bbox.extmax[1]
def switch_case(argument):
switcher = {
"A0": 1.27,
"A1": 2.54,
"A2": 5.08,
"A3": 10.16,
"A4": 20.32,
"A5": 40.64,
"A6": 81.28,
"A7": 162.56,
"A8": 325.12,
"A9": 650.24,
"A10": 1300.48
}
# Get the value from the dictionary; if not found, return a default value
print("Final Ratio=",switcher.get(argument, 1))
return switcher.get(argument, 1)
def RetriveRatio(datadoc,dxfpath):
width,height,paper_size = analyze_pdf (datadoc)
if(width > height ):
bigger=width
else:
bigger=height
width_dxf,height_dxf = get_dxfSize(dxfpath)
if(width_dxf > height_dxf ):
bigger_dxf=width_dxf
else:
bigger_dxf=height_dxf
if(0.2 < bigger_dxf/bigger < 1.2):
print("bigger_dxf/bigger",bigger/bigger_dxf)
argument = paper_size
FinalRatio=switch_case(argument)
else:
FinalRatio=1
return FinalRatio
"""Flips image
DXF origin is at the bottom left while img origin is top left
"""
def flip(img):
height, width = img.shape[:2]
# Define the rotation angle (clockwise)
angle = 180
# Calculate the rotation matrix
rotation_matrix = cv2.getRotationMatrix2D((width/2, height/2), angle, 1)
# Rotate the image
rotated_image = cv2.warpAffine(img, rotation_matrix, (width, height))
flipped_horizontal = cv2.flip(rotated_image, 1)
return flipped_horizontal
def aci_to_rgb(aci):
aci_rgb_map = {
0: (0, 0, 0),
1: (255, 0, 0),
2: (255, 255, 0),
3: (0, 255, 0),
4: (0, 255, 255),
5: (0, 0, 255),
6: (255, 0, 255),
7: (255, 255, 255),
8: (65, 65, 65),
9: (128, 128, 128),
10: (255, 0, 0),
11: (255, 170, 170),
12: (189, 0, 0),
13: (189, 126, 126),
14: (129, 0, 0),
15: (129, 86, 86),
16: (104, 0, 0),
17: (104, 69, 69),
18: (79, 0, 0),
19: (79, 53, 53),
20: (255, 63, 0),
21: (255, 191, 170),
22: (189, 46, 0),
23: (189, 141, 126),
24: (129, 31, 0),
25: (129, 96, 86),
26: (104, 25, 0),
27: (104, 78, 69),
28: (79, 19, 0),
29: (79, 59, 53),
30: (255, 127, 0),
31: (255, 212, 170),
32: (189, 94, 0),
33: (189, 157, 126),
34: (129, 64, 0),
35: (129, 107, 86),
36: (104, 52, 0),
37: (104, 86, 69),
38: (79, 39, 0),
39: (79, 66, 53),
40: (255, 191, 0),
41: (255, 234, 170),
42: (189, 141, 0),
43: (189, 173, 126),
44: (129, 96, 0),
45: (129, 118, 86),
46: (104, 78, 0),
47: (104, 95, 69),
48: (79, 59, 0),
49: (79, 73, 53),
50: (255, 255, 0),
51: (255, 255, 170),
52: (189, 189, 0),
53: (189, 189, 126),
54: (129, 129, 0),
55: (129, 129, 86),
56: (104, 104, 0),
57: (104, 104, 69),
58: (79, 79, 0),
59: (79, 79, 53),
60: (191, 255, 0),
61: (234, 255, 170),
62: (141, 189, 0),
63: (173, 189, 126),
64: (96, 129, 0),
65: (118, 129, 86),
66: (78, 104, 0),
67: (95, 104, 69),
68: (59, 79, 0),
69: (73, 79, 53),
70: (127, 255, 0),
71: (212, 255, 170),
72: (94, 189, 0),
73: (157, 189, 126),
74: (64, 129, 0),
75: (107, 129, 86),
76: (52, 104, 0),
77: (86, 104, 69),
78: (39, 79, 0),
79: (66, 79, 53),
80: (63, 255, 0),
81: (191, 255, 170),
82: (46, 189, 0),
83: (141, 189, 126),
84: (31, 129, 0),
85: (96, 129, 86),
86: (25, 104, 0),
87: (78, 104, 69),
88: (19, 79, 0),
89: (59, 79, 53),
90: (0, 255, 0),
91: (170, 255, 170),
92: (0, 189, 0),
93: (126, 189, 126),
94: (0, 129, 0),
95: (86, 129, 86),
96: (0, 104, 0),
97: (69, 104, 69),
98: (0, 79, 0),
99: (53, 79, 53),
100: (0, 255, 63),
101: (170, 255, 191),
102: (0, 189, 46),
103: (126, 189, 141),
104: (0, 129, 31),
105: (86, 129, 96),
106: (0, 104, 25),
107: (69, 104, 78),
108: (0, 79, 19),
109: (53, 79, 59),
110: (0, 255, 127),
111: (170, 255, 212),
112: (0, 189, 94),
113: (126, 189, 157),
114: (0, 129, 64),
115: (86, 129, 107),
116: (0, 104, 52),
117: (69, 104, 86),
118: (0, 79, 39),
119: (53, 79, 66),
120: (0, 255, 191),
121: (170, 255, 234),
122: (0, 189, 141),
123: (126, 189, 173),
124: (0, 129, 96),
125: (86, 129, 118),
126: (0, 104, 78),
127: (69, 104, 95),
128: (0, 79, 59),
129: (53, 79, 73),
130: (0, 255, 255),
131: (170, 255, 255),
132: (0, 189, 189),
133: (126, 189, 189),
134: (0, 129, 129),
135: (86, 129, 129),
136: (0, 104, 104),
137: (69, 104, 104),
138: (0, 79, 79),
139: (53, 79, 79),
140: (0, 191, 255),
141: (170, 234, 255),
142: (0, 141, 189),
143: (126, 173, 189),
144: (0, 96, 129),
145: (86, 118, 129),
146: (0, 78, 104),
147: (69, 95, 104),
148: (0, 59, 79),
149: (53, 73, 79),
150: (0, 127, 255),
151: (170, 212, 255),
152: (0, 94, 189),
153: (126, 157, 189),
154: (0, 64, 129),
155: (86, 107, 129),
156: (0, 52, 104),
157: (69, 86, 104),
158: (0, 39, 79),
159: (53, 66, 79),
160: (0, 63, 255),
161: (170, 191, 255),
162: (0, 46, 189),
163: (126, 141, 189),
164: (0, 31, 129),
165: (86, 96, 129),
166: (0, 25, 104),
167: (69, 78, 104),
168: (0, 19, 79),
169: (53, 59, 79),
170: (0, 0, 255),
171: (170, 170, 255),
172: (0, 0, 189),
173: (126, 126, 189),
174: (0, 0, 129),
175: (86, 86, 129),
176: (0, 0, 104),
177: (69, 69, 104),
178: (0, 0, 79),
179: (53, 53, 79),
180: (63, 0, 255),
181: (191, 170, 255),
182: (46, 0, 189),
183: (141, 126, 189),
184: (31, 0, 129),
185: (96, 86, 129),
186: (25, 0, 104),
187: (78, 69, 104),
188: (19, 0, 79),
189: (59, 53, 79),
190: (127, 0, 255),
191: (212, 170, 255),
192: (94, 0, 189),
193: (157, 126, 189),
194: (64, 0, 129),
195: (107, 86, 129),
196: (52, 0, 104),
197: (86, 69, 104),
198: (39, 0, 79),
199: (66, 53, 79),
200: (191, 0, 255),
201: (234, 170, 255),
202: (141, 0, 189),
203: (173, 126, 189),
204: (96, 0, 129),
205: (118, 86, 129),
206: (78, 0, 104),
207: (95, 69, 104),
208: (59, 0, 79),
209: (73, 53, 79),
210: (255, 0, 255),
211: (255, 170, 255),
212: (189, 0, 189),
213: (189, 126, 189),
214: (129, 0, 129),
215: (129, 86, 129),
216: (104, 0, 104),
217: (104, 69, 104),
218: (79, 0, 79),
219: (79, 53, 79),
220: (255, 0, 191),
221: (255, 170, 234),
222: (189, 0, 141),
223: (189, 126, 173),
224: (129, 0, 96),
225: (129, 86, 118),
226: (104, 0, 78),
227: (104, 69, 95),
228: (79, 0, 59),
229: (79, 53, 73),
230: (255, 0, 127),
231: (255, 170, 212),
232: (189, 0, 94),
233: (189, 126, 157),
234: (129, 0, 64),
235: (129, 86, 107),
236: (104, 0, 52),
237: (104, 69, 86),
238: (79, 0, 39),
239: (79, 53, 66),
240: (255, 0, 63),
241: (255, 170, 191),
242: (189, 0, 46),
243: (189, 126, 141),
244: (129, 0, 31),
245: (129, 86, 96),
246: (104, 0, 25),
247: (104, 69, 78),
248: (79, 0, 19),
249: (79, 53, 59),
250: (51, 51, 51),
251: (80, 80, 80),
252: (105, 105, 105),
253: (130, 130, 130),
254: (190, 190, 190),
255: (255, 255, 255)
}
# Default to white if index is invalid or not found
return aci_rgb_map.get(aci, (255, 255, 255))
def int_to_rgb(color_int):
"""Convert an integer to an (R, G, B) tuple."""
r = (color_int >> 16) & 255
g = (color_int >> 8) & 255
b = color_int & 255
return (r, g, b)
def get_hatch_color(entity):
"""Extract hatch color with detailed debugging."""
if not entity:
# print("No entity provided for color extraction.")
return (255, 255, 255)
# Check for true color
if entity.dxf.hasattr('true_color'):
true_color = entity.dxf.true_color
rgb_color = int_to_rgb(true_color) # Convert integer to (R, G, B)
# print(f"True color detected (RGB): {rgb_color}")
return rgb_color
# Check for color index
color_index = entity.dxf.color
# print(f"Entity color index: {color_index}")
if 1 <= color_index <= 255:
rgb_color = aci_to_rgb(color_index) # Convert ACI to RGB
# print(f"Converted ACI to RGB: {rgb_color}")
return rgb_color
# Handle ByLayer or ByBlock
if color_index == 0: # ByLayer
layer_name = entity.dxf.layer
layer = entity.doc.layers.get(layer_name)
# print(f"ByLayer detected for layer '{layer_name}'.")
if layer:
layer_color_index = layer.dxf.color
# print(layer_color_index)
rgb_color = aci_to_rgb(layer_color_index)
# print(f"Layer '{layer_name}' color index {layer_color_index} converted to RGB: {rgb_color}")
return rgb_color
else:
# print(f"Layer '{layer_name}' not found. Defaulting to white.")
return (255, 255, 255)
# Default
# print("Unhandled color case. Defaulting to white.")
return (255, 255, 255)
def point_in_rectangle(point, rect_coords):
x, y = point
(x1, y1), (x2, y2) = rect_coords
return x1 <= x <= x2 and y1 <= y <= y2
from math import sqrt
def euclidean_distance(point1, point2):
x1, y1 = point1
x2, y2 = point2
return sqrt((x2 - x1)**2 + (y2 - y1)**2)
def compute_hatch_centroid(hatch):
x_coords = []
y_coords = []
for path in hatch.paths:
if path.PATH_TYPE == "PolylinePath":
for vertex in path.vertices:
x_coords.append(vertex[0])
y_coords.append(vertex[1])
elif path.PATH_TYPE == "EdgePath":
for edge in path.edges:
if hasattr(edge, "start"):
x_coords.append(edge.start[0])
y_coords.append(edge.start[1])
if hasattr(edge, "end"):
x_coords.append(edge.end[0])
y_coords.append(edge.end[1])
if x_coords and y_coords:
return (sum(x_coords) / len(x_coords), sum(y_coords) / len(y_coords))
return None
"""### Hatched areas"""
def get_hatched_areas(datadoc,filename,FinalRatio,rotationangle,SearchArray):
print("SearchArray = ",SearchArray)
doc = ezdxf.readfile(filename)
doc.header['$MEASUREMENT'] = 1
msp = doc.modelspace()
trial=0
hatched_areas = []
threshold=0.001
TextFound = 0
j=0
unique_shapes = []
text_with_positions = []
text_color_mapping = {}
color_palette = [
(255, 0, 0), (0, 0, 255), (0, 255, 255), (0, 64, 0), (255, 204, 0),
(255, 128, 64), (255, 0, 128), (255, 128, 192), (128, 128, 255),
(128, 64, 0), (0, 255, 0), (0, 200, 0), (255, 128, 255), (128, 0, 255),
(0, 128, 192), (128, 0, 128), (128, 0, 0), (0, 128, 255), (149, 1, 70),
(255, 182, 128), (222, 48, 71), (240, 0, 112), (255, 0, 255),
(192, 46, 65), (0, 0, 128), (0, 128, 64), (255, 255, 0), (128, 0, 80),
(255, 255, 128), (90, 255, 140), (255, 200, 20), (91, 16, 51),
(90, 105, 138), (114, 10, 138), (36, 82, 78), (225, 105, 190),
(108, 150, 170), (11, 35, 75), (42, 176, 170), (255, 176, 170),
(209, 151, 15), (81, 27, 85), (226, 106, 122), (67, 119, 149),
(159, 179, 140), (159, 179, 30), (255, 85, 198), (255, 27, 85),
(188, 158, 8), (140, 188, 120), (59, 61, 52), (65, 81, 21),
(212, 255, 174), (15, 164, 90), (41, 217, 245), (213, 23, 182),
(11, 85, 169), (78, 153, 239), (0, 66, 141), (64, 98, 232),
(140, 112, 255), (57, 33, 154), (194, 117, 252), (116, 92, 135),
(74, 43, 98), (188, 13, 123), (129, 58, 91), (255, 128, 100),
(171, 122, 145), (255, 98, 98), (222, 48, 77)
]
import re
text_with_positions = []
# SearchArray=[["","Wall Type","",""],["","","",""]]
# print("SearchArray=",len(SearchArray))
# print("SearchArray=",len(SearchArray[0]))
# print("SearchArray=",SearchArray[0][0])
if(SearchArray):
for i in range(len(SearchArray)):
if (SearchArray[i][0] and SearchArray[i][1] and SearchArray[i][2]):
for text_entity in doc.modelspace().query('TEXT MTEXT'):
text = text_entity.text.strip() if hasattr(text_entity, 'text') else ""
# if (text.startswith("P") and len(text) == 3) or (text.startswith("I") and len(text) == 3): # Filter for "Wall"
if(text.startswith(SearchArray[i][0]) and len(text)==int(SearchArray[i][2])):
position = text_entity.dxf.insert # Extract text position
x, y = position.x, position.y
for text_entity in doc.modelspace().query('TEXT MTEXT'):
NBS = text_entity.text.strip() if hasattr(text_entity, 'text') else ""
if (NBS.startswith(SearchArray[i][1])):
positionNBS = text_entity.dxf.insert # Extract text position
xNBS, yNBS = positionNBS.x, positionNBS.y
if(x == xNBS or y == yNBS):
textNBS=NBS
break
else:
textNBS = None
nearest_hatch = None
min_distance = float('inf') # Initialize with a very large value
detected_color = (255, 255, 255) # Default to white
# Search for the nearest hatch
for hatch in doc.modelspace().query('HATCH'): # Query only hatches
if hatch.paths:
for path in hatch.paths:
if path.type == 1: # PolylinePath
vertices = [v[:2] for v in path.vertices]
# Calculate the centroid of the hatch
centroid_x = sum(v[0] for v in vertices) / len(vertices)
centroid_y = sum(v[1] for v in vertices) / len(vertices)
centroid = (centroid_x, centroid_y)
# Calculate the distance between the text and the hatch centroid
distance = calculate_distance((x, y), centroid)
# Update the nearest hatch if a closer one is found
if distance < min_distance:
min_distance = distance
nearest_hatch = hatch
# Get the color of this hatch
current_color = get_hatch_color(hatch)
if current_color != (255, 255, 255): # Valid color found
detected_color = current_color
break # Stop checking further paths for this hatch
# Append the detected result only once
text_with_positions.append([text, textNBS, (x, y), detected_color])
print("text_with_positions=",text_with_positions)
elif (SearchArray[i][0] and SearchArray[i][2]):
for text_entity in doc.modelspace().query('TEXT MTEXT'):
text = text_entity.text.strip() if hasattr(text_entity, 'text') else ""
# if (text.startswith("P") and len(text) == 3) or (text.startswith("I") and len(text) == 3): # Filter for "Wall"
if(text.startswith(SearchArray[i][0]) and len(text)==int(SearchArray[i][2])):
position = text_entity.dxf.insert # Extract text position
x, y = position.x, position.y
textNBS = None
nearest_hatch = None
min_distance = float('inf') # Initialize with a very large value
detected_color = (255, 255, 255) # Default to white
# Search for the nearest hatch
for hatch in doc.modelspace().query('HATCH'): # Query only hatches
if hatch.paths:
for path in hatch.paths:
if path.type == 1: # PolylinePath
vertices = [v[:2] for v in path.vertices]
# Calculate the centroid of the hatch
centroid_x = sum(v[0] for v in vertices) / len(vertices)
centroid_y = sum(v[1] for v in vertices) / len(vertices)
centroid = (centroid_x, centroid_y)
# Calculate the distance between the text and the hatch centroid
distance = calculate_distance((x, y), centroid)
# Update the nearest hatch if a closer one is found
if distance < min_distance:
min_distance = distance
nearest_hatch = hatch
# Get the color of this hatch
current_color = get_hatch_color(hatch)
if current_color != (255, 255, 255): # Valid color found
detected_color = current_color
break # Stop checking further paths for this hatch
# Append the detected result only once
text_with_positions.append([text, textNBS, (x, y), detected_color])
print("text_with_positions=",text_with_positions)
elif(SearchArray[i][0]):
for text_entity in doc.modelspace().query('TEXT MTEXT'):
text = text_entity.text.strip() if hasattr(text_entity, 'text') else ""
# if (text.startswith("P") and len(text) == 3) or (text.startswith("I") and len(text) == 3): # Filter for "Wall"
if(text.startswith(SearchArray[i][0])):
position = text_entity.dxf.insert # Extract text position
x, y = position.x, position.y
textNBS = None
nearest_hatch = None
min_distance = float('inf') # Initialize with a very large value
detected_color = (255, 255, 255) # Default to white
# Search for the nearest hatch
for hatch in doc.modelspace().query('HATCH'): # Query only hatches
if hatch.paths:
for path in hatch.paths:
if path.type == 1: # PolylinePath
vertices = [v[:2] for v in path.vertices]
# Calculate the centroid of the hatch
centroid_x = sum(v[0] for v in vertices) / len(vertices)
centroid_y = sum(v[1] for v in vertices) / len(vertices)
centroid = (centroid_x, centroid_y)
# Calculate the distance between the text and the hatch centroid
distance = calculate_distance((x, y), centroid)
# Update the nearest hatch if a closer one is found
if distance < min_distance:
min_distance = distance
nearest_hatch = hatch
# Get the color of this hatch
current_color = get_hatch_color(hatch)
if current_color != (255, 255, 255): # Valid color found
detected_color = current_color
break # Stop checking further paths for this hatch
# Append the detected result only once
text_with_positions.append([text, textNBS, (x, y), detected_color])
print("text_with_positions=",text_with_positions)
grouped = {}
for entry in text_with_positions:
key = entry[0]
grouped.setdefault(key, []).append(entry)
# Filter the groups: if any entry in a group has a non-None Text Nbs, keep only one of those
filtered_results = []
for key, entries in grouped.items():
# Find the first entry with a valid textNBS (non-None)
complete = next((entry for entry in entries if entry[1] is not None), None)
if complete:
filtered_results.append(complete)
else:
# If none are complete, you can choose to keep just one entry
filtered_results.append(entries[0])
text_with_positions=filtered_results
for entity in msp:
if entity.dxftype() == 'HATCH':
cntPoints=[]
for path in entity.paths:
# path_type = path.type
# # Resolve the path type to its name
# path_type_name = BoundaryPathType(path_type).name
# print(f"Encountered path type: {path_type_name}")
vertices = [] # Reset vertices for each path
# print(str(path.type))
if str(path.type) == 'BoundaryPathType.POLYLINE' or path.type == 1:
# if path.type == 2: # Polyline path
# Handle POLYLINE type HATCH
vertices = [(vertex[0] * FinalRatio, vertex[1] * FinalRatio) for vertex in path.vertices]
# print("Hatch Vertices = ",vertices)
if len(vertices) > 3:
poly = ShapelyPolygon(vertices)
minx, miny, maxx, maxy = poly.bounds
width = maxx - minx
height = maxy - miny
if (poly.area > 0 and (height > 0.2 or width > 0.2)):
length = height
if(width > length):
length = width
area1 = round(poly.area, 3)
perimeter = round(poly.length, 3)
# print("Vertices = ",vertices)
normalized_vertices = normalize_vertices(vertices)
rgb_color = get_hatch_color(entity)
# print("rgb_color = ",rgb_color)
# if(rgb_color == (255, 255, 255)):
# if(len(text_with_positions)>0):
# for text, position, color in text_with_positions:
# text_position = Point(position[0], position[1])
# if poly.contains(text_position):
# rgb_color = color
# break
duplicate_found = False
for existing_vertices, existing_area in unique_shapes:
if normalized_vertices == existing_vertices and areas_are_similar(area1, existing_area):
duplicate_found = True
break
if not duplicate_found:
# rgb_color = get_hatch_color(entity) # Assuming this function exists
unique_shapes.append((normalized_vertices, area1))
if length > 0.6:
hatched_areas.append([vertices, area1, length, rgb_color])
elif str(path.type) == 'BoundaryPathType.EDGE' or path.type == 2:
# elif path.type == 2: # Edge path
# Handle EDGE type HATCH
vert = []
for edge in path.edges:
x, y = edge.start
x1, y1 = edge.end
vert.append((x * FinalRatio, y * FinalRatio))
vert.append((x1 * FinalRatio, y1 * FinalRatio))
poly = ShapelyPolygon(vert)
minx, miny, maxx, maxy = poly.bounds
width = maxx - minx
height = maxy - miny
if (poly.area > 0 and (height > 0.2 or width > 0.2)):
length = height
if(width > length):
length = width
area1 = round(poly.area, 3)
perimeter = round(poly.length, 3)
normalized_vertices = normalize_vertices(vert)
rgb_color = get_hatch_color(entity)
# print("rgb_color = ",rgb_color)
# if(rgb_color == (255, 255, 255)):
# if(len(text_with_positions)>0):
# for text, position, color in text_with_positions:
# text_position = Point(position[0], position[1])
# if poly.contains(text_position):
# rgb_color = color
# break
duplicate_found = False
for existing_vertices, existing_area in unique_shapes:
if normalized_vertices == existing_vertices and areas_are_similar(area1, existing_area):
duplicate_found = True
break
if not duplicate_found:
# rgb_color = get_hatch_color(entity) # Assuming this function exists
unique_shapes.append((normalized_vertices, area1))
if length > 0.6:
hatched_areas.append([vert, area1, length, rgb_color])
else:
print(f"Encountered path type: {path.type}")
elif entity.dxftype() == 'SOLID':
vertices = [entity.dxf.vtx0 * (FinalRatio), entity.dxf.vtx1* (FinalRatio), entity.dxf.vtx2* (FinalRatio), entity.dxf.vtx3* (FinalRatio)]
poly = ShapelyPolygon(vertices)
minx, miny, maxx, maxy = poly.bounds
# Calculate the width and height of the bounding box
width = maxx - minx
height = maxy - miny
if (poly.area > 0 and (height > 0 and width > 0)):
area1 = round(poly.area, 3)
perimeter = round(poly.length, 3)
normalized_vertices = normalize_vertices(vertices)
duplicate_found = False
for existing_vertices, existing_area in unique_shapes:
if normalized_vertices == existing_vertices or areas_are_similar(area1, existing_area):
duplicate_found = True
break
if not duplicate_found:
rgb_color = get_hatch_color(entity) # Assuming this function exists
unique_shapes.append((normalized_vertices, area1))
hatched_areas.append([vertices, area1, perimeter, rgb_color])
elif entity.dxftype() == 'LWPOLYLINE':
vertices = []
lwpolyline = entity
points = lwpolyline.get_points()
flag = 0
# Collect vertices and apply the FinalRatio
for i in range(len(points)):
vertices.append([points[i][0] * FinalRatio, points[i][1] * FinalRatio])
# # Ensure there are more than 3 vertices
if len(vertices) > 3:
# Check if the polyline is closed
if vertices[0][0] == vertices[-1][0] or vertices[0][1] == vertices[-1][1]:
poly = ShapelyPolygon(vertices)
minx, miny, maxx, maxy = poly.bounds
# Calculate width and height of the bounding box
width = maxx - minx
height = maxy - miny
# Check area and size constraints
if (poly.area > 0 and (height > 0 and width > 0)):
area1 = round(poly.area, 3)
perimeter = round(poly.length, 3)
normalized_vertices = normalize_vertices(vertices)
duplicate_found = False
for existing_vertices, existing_area in unique_shapes:
if normalized_vertices == existing_vertices or areas_are_similar(area1, existing_area):
duplicate_found = True
break
if not duplicate_found:
rgb_color = get_hatch_color(entity) # Assuming this function exists
unique_shapes.append((normalized_vertices, area1))
hatched_areas.append([vertices, area1, perimeter, rgb_color])
elif entity.dxftype() == 'POLYLINE':
flag=0
vertices = [(v.dxf.location.x * (FinalRatio), v.dxf.location.y * (FinalRatio)) for v in entity.vertices]
# print('Vertices:', vertices)
if(len(vertices)>3):
if(vertices[0][0] == vertices[len(vertices)-1][0] or vertices[0][1] == vertices[len(vertices)-1][1]):
poly=ShapelyPolygon(vertices)
minx, miny, maxx, maxy = poly.bounds
# Calculate the width and height of the bounding box
width = maxx - minx
height = maxy - miny
if (poly.area > 0 and (height > 0 and width > 0)):
area1 = round(poly.area,3)
perimeter = round (poly.length,3)
normalized_vertices = normalize_vertices(vertices)
duplicate_found = False
for existing_vertices, existing_area in unique_shapes:
if normalized_vertices == existing_vertices or areas_are_similar(area1, existing_area):
duplicate_found = True
break
if not duplicate_found:
rgb_color = get_hatch_color(entity) # Assuming this function exists
unique_shapes.append((normalized_vertices, area1))
hatched_areas.append([vertices, area1, perimeter, rgb_color])
elif entity.dxftype() == 'SPLINE':
spline_entity = entity
vertices = []
control_points = spline_entity.control_points
if(len(control_points)>3):
for i in range(len(control_points)):
vertices.append([control_points[i][0]* (FinalRatio),control_points[i][1]* (FinalRatio)])
poly=ShapelyPolygon(vertices)
minx, miny, maxx, maxy = poly.bounds
# Calculate the width and height of the bounding box
width = maxx - minx
height = maxy - miny
if (poly.area > 0 and (height > 0 and width > 0)):
area1 = round(poly.area,3)
perimeter = round (poly.length,3)
normalized_vertices = normalize_vertices(vertices)
duplicate_found = False
for existing_vertices, existing_area in unique_shapes:
if normalized_vertices == existing_vertices or areas_are_similar(area1, existing_area):
duplicate_found = True
break
if not duplicate_found:
rgb_color = get_hatch_color(entity) # Assuming this function exists
unique_shapes.append((normalized_vertices, area1))
hatched_areas.append([vertices, area1, perimeter, rgb_color])
sorted_data = sorted(hatched_areas, key=lambda x: x[1])
return sorted_data,text_with_positions
"""### Rotate polygon"""
def rotate_point(point, angle,pdfrotation,width,height, center_point=(0, 0)):
"""Rotates a point around center_point(origin by default)
Angle is in degrees.
Rotation is counter-clockwise
"""
angle_rad = radians(angle % 360)
# Shift the point so that center_point becomes the origin
new_point = (point[0] - center_point[0], point[1] - center_point[1])
new_point = (new_point[0] * cos(angle_rad) - new_point[1] * sin(angle_rad),
new_point[0] * sin(angle_rad) + new_point[1] * cos(angle_rad))
# Reverse the shifting we have done
if pdfrotation!=0:
new_point = (new_point[0]+width + center_point[0], new_point[1] + center_point[1]) #pdfsize[2] is the same as +width
else:
new_point = (new_point[0] + center_point[0], new_point[1]+ height + center_point[1]) # pdfsize[3] is the same as +height
# new_point = (new_point[0] + center_point[0], new_point[1] + center_point[1])
return new_point
def rotate_polygon(polygon, angle, pdfrotation,width,height,center_point=(0, 0)):
"""Rotates the given polygon which consists of corners represented as (x,y)
around center_point (origin by default)
Rotation is counter-clockwise
Angle is in degrees
"""
rotated_polygon = []
for corner in polygon:
rotated_corner = rotate_point(corner, angle,pdfrotation,width,height, center_point)
rotated_polygon.append(rotated_corner)
return rotated_polygon
#create a dataframe containing color , count(how many times is this object found in the plan), area of 1 of these shapes, total area
#perimeter, totat perimeter, length, total length
#import pandas as pd
#SimilarAreaDictionary= pd.DataFrame(columns=['Guess','Color','Occurences','Area','Total Area','Perimeter','Total Perimeter','Length','Total Length','R','G','B'])
#loop 3la hatched areas and count the occurences of each shape w create a table bl hagat di
def Create_DF(dxfpath,datadoc,hatched_areas):
FinalRatio= RetriveRatio(datadoc,dxfpath)
# hatched_areas = get_hatched_areas(datadoc,dxfpath,FinalRatio)
# hatched_areas=remove_duplicate_shapes(new_hatched_areas)
# SimilarAreaDictionary= pd.DataFrame(columns=['Area', 'Total Area', 'Perimeter', 'Total Perimeter', 'Occurences', 'Color'])
SimilarAreaDictionary= pd.DataFrame(columns=['Guess','Color','Occurences','Area','Total Area','Perimeter','Total Perimeter','Length','Total Length','Texts','Comments'])
# colorRanges2=generate_color_array(30000)
# colorRanges = [[255, 0, 0], [0, 0, 255], [0, 255, 255], [0, 64, 0], [255, 204, 0], [255, 128, 64], [255, 0, 128], [255, 128, 192], [128, 128, 255], [128, 64, 0],[0, 255, 0],[0, 200, 0],[255, 128, 255], [128, 0, 255], [0, 128, 192], [128, 0, 128],[128, 0, 0], [0, 128, 255], [149, 1, 70], [255, 182, 128], [222, 48, 71], [240, 0, 112], [255, 0, 255], [192, 46, 65], [0, 0, 128],[0, 128, 64],[255, 255, 0], [128, 0, 80], [255, 255, 128], [90, 255, 140],[255, 200, 20],[91, 16, 51], [90, 105, 138], [114, 10, 138], [36, 82, 78], [225, 105, 190], [108, 150, 170], [11, 35, 75], [42, 176, 170], [255, 176, 170], [209, 151, 15],[81, 27, 85], [226, 106, 122], [67, 119, 149], [159, 179, 140], [159, 179, 30],[255, 85, 198], [255, 27, 85], [188, 158, 8],[140, 188, 120], [59, 61, 52], [65, 81, 21], [212, 255, 174], [15, 164, 90],[41, 217, 245], [213, 23, 182], [11, 85, 169], [78, 153, 239], [0, 66, 141],[64, 98, 232], [140, 112, 255], [57, 33, 154], [194, 117, 252], [116, 92, 135], [74, 43, 98], [188, 13, 123], [129, 58, 91], [255, 128, 100], [171, 122, 145], [255, 98, 98], [222, 48, 77]]
# colorUsed=[]
TotalArea=0
TotalPerimeter=0
for shape in hatched_areas:
area = shape[1] # area
perimeter = shape[2] # perimeter
# if(i < len(colorRanges)):
# color = colorRanges[i]
# colorUsed.append(color)
# else:
# color = colorRanges2[i]
# colorUsed.append(color)
TotalArea = area
TotalPerimeter = perimeter
tol=0
condition1 = (SimilarAreaDictionary['Area'] >= area - tol) & (SimilarAreaDictionary['Area'] <= area +tol)
condition2 = (SimilarAreaDictionary['Perimeter'] >= perimeter -tol) & (SimilarAreaDictionary['Perimeter'] <= perimeter +tol)
combined_condition = condition1 & condition2
if any(combined_condition):
index = np.where(combined_condition)[0][0]
SimilarAreaDictionary.at[index, 'Occurences'] += 1
SimilarAreaDictionary.at[index, 'Total Area'] = SimilarAreaDictionary.at[index, 'Area'] * SimilarAreaDictionary.at[index, 'Occurences']
SimilarAreaDictionary.at[index, 'Total Perimeter'] = SimilarAreaDictionary.at[index, 'Perimeter'] * SimilarAreaDictionary.at[index, 'Occurences']
else:
TotalArea=area
TotalPerimeter=perimeter
# print("Shape[3]",shape[3])
new_data = {'Area': area, 'Total Area': TotalArea ,'Perimeter': perimeter, 'Total Perimeter': TotalPerimeter, 'Occurences': 1, 'Color':shape[3],'Comments':''} #add color here and read color to insert in
SimilarAreaDictionary = pd.concat([SimilarAreaDictionary, pd.DataFrame([new_data])], ignore_index=True)
# print(SimilarAreaDictionary)
return SimilarAreaDictionary
"""### Draw on Image and PDF"""
# from sklearn.cluster import KMeans
def color_distance(color1, color2):
print("color1 = ",color1)
print("color2 = ",color2)
print("abs(color1[0] - color2[0]) = ",abs(color1[0] - color2[0]))
print("abs(color1[1] - color2[1]) = ",abs(color1[1] - color2[1]))
print("abs(color1[2] - color2[2]) = ",abs(color1[2] - color2[2]))
if(abs(color1[0] - color2[0]) < 20 and
abs(color1[1] - color2[1]) < 20 and
abs(color1[2] - color2[2]) < 20):
return 1
else:
return 100
# return np.sqrt(sum((a - b) ** 2 for a, b in zip(color1, color2)))
# Unify colors within a distance threshold
def unify_colors(df, threshold=20):
# Convert colors to tuple if they are not already in tuple format
df['Color'] = df['Color'].apply(lambda x: tuple(x) if isinstance(x, list) else x)
# Iterate through the DataFrame and compare each color with the next one
for i in range(len(df) - 1): # We don't need to compare the last color with anything
current_color = df.at[i, 'Color']
next_color = df.at[i + 1, 'Color']
# If the distance between current color and the next color is smaller than the threshold
if color_distance(current_color, next_color) <= threshold:
# Make both the same color (unify them to the current color)
df.at[i + 1, 'Color'] = current_color # Change the next color to the current color
return df
def normalize_color(color):
"""Convert PDF color (range 0-1) to RGB (range 0-255)."""
return tuple(min(max(round(c * 255), 0), 255) for c in color)
def adjustannotations(OutputPdfStage1,text_with_positions):
input_pdf_path = OutputPdfStage1
output_pdf_path = "Final-WallsAdjusted.pdf"
# Load the input PDF
pdf_bytes_io = BytesIO(OutputPdfStage1)
reader = PdfReader(pdf_bytes_io)
writer = PdfWriter()
# Append all pages to the writer
writer.append_pages_from_reader(reader)
# Add metadata (optional)
metadata = reader.metadata
writer.add_metadata(metadata)
for page_index, page in enumerate(writer.pages):
if "/Annots" in page:
annotations = page["/Annots"]
for annot_index, annot in enumerate(annotations):
obj = annot.get_object()
# print("obj", obj)
# print(obj.get("/IT"))
if obj.get("/Subtype") == "/Line":
# print("AWL ANNOT IF")
# Check the /IT value to differentiate annotations
# if "/Contents" in obj and "m" in obj["/Contents"]:
if "/Subj" in obj and "Perimeter Measurement" in obj["/Subj"]:
# print("Tany IF")
obj.update({
NameObject("/Measure"): DictionaryObject({
NameObject("/Type"): NameObject("/Measure"),
NameObject("/L"): DictionaryObject({
NameObject("/G"): FloatObject(1),
NameObject("/U"): TextStringObject("m"), # Unit of measurement for area
}),
}),
NameObject("/IT"): NameObject("/LineDimension"), # Use more distinctive name
NameObject("/Subj"): TextStringObject("Length Measurement"), # Intent explicitly for Area
})
# print(obj)
if obj.get("/Subtype") in ["/Line", "/PolyLine"] and "/C" in obj:
# Normalize and match the color
annot_color = normalize_color(obj["/C"])
matched_entry = next(
((text, NBS) for text,NBS, _, color in text_with_positions if annot_color == color),
(None, None)
)
# print("matched_entry = ",matched_entry)
matched_text, matched_nbs = matched_entry
combined_text = ""
if matched_text and matched_nbs:
combined_text = f"{matched_text} - {matched_nbs}"
elif matched_text:
combined_text = matched_text
elif matched_nbs:
combined_text = matched_nbs
obj.update({
NameObject("/T"): TextStringObject(combined_text), # Custom text for "Comment" column
})
output_pdf_io = BytesIO()
writer.write(output_pdf_io)
output_pdf_io.seek(0)
print(f"Annotations updated and saved to {output_pdf_path}")
return output_pdf_io.read()
def distance(rect1, rect2):
"""Calculate the Euclidean distance between two annotation centers."""
x1, y1 = (float(rect1[0]) + float(rect1[2])) / 2, (float(rect1[1]) + float(rect1[3])) / 2
x2, y2 = (float(rect2[0]) + float(rect2[2])) / 2, (float(rect2[1]) + float(rect2[3])) / 2
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def remove_duplicate_annotations(pdf_path, threshold):
"""Remove one of the duplicate annotations if they are close and have the same color."""
input_pdf_path = pdf_path
output_pdf_path = "Filtered-Walls.pdf"
# Load the input PDF
pdf_bytes_io = BytesIO(pdf_path)
reader = PdfReader(pdf_bytes_io)
writer = PdfWriter()
# Append all pages to the writer
# writer.append_pages_from_reader(reader)
# Add metadata (optional)
metadata = reader.metadata
writer.add_metadata(metadata)
for page_index in range(len(reader.pages)):
page = reader.pages[page_index]
if "/Annots" in page:
annotations = page["/Annots"]
annots_data = []
to_delete = set()
# Extract annotation positions and colors
for annot_index, annot_ref in enumerate(annotations):
annot = annot_ref.get_object()
if "/Rect" in annot and "/C" in annot:
rect = annot["/Rect"]
if isinstance(rect, ArrayObject): # Ensure rect is a list
rect = list(rect)
color = normalize_color(annot["/C"])
annots_data.append((annot_index, rect, color))
# Compare distances and mark duplicates
for i, (idx1, rect1, color1) in enumerate(annots_data):
if idx1 in to_delete:
continue
for j, (idx2, rect2, color2) in enumerate(annots_data[i+1:], start=i+1):
if idx2 in to_delete:
continue
if color1 == color2 and distance(rect1, rect2) < threshold:
to_delete.add(idx2) # Mark second annotation for deletion
# Remove duplicates
new_annotations = [annotations[i] for i in range(len(annotations)) if i not in to_delete]
page[NameObject("/Annots")] = ArrayObject(new_annotations)
writer.add_page(page)
output_pdf_io = BytesIO()
writer.write(output_pdf_io)
output_pdf_io.seek(0)
return output_pdf_io.read()
def calculate_distance(p1, p2):
return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
def mainFunctionDrawImgPdf(datadoc,dxfpath, dxfratio,SearchArray,Thickness,pdfpath=0,pdfname=0):
OutputPdfStage1='BB Trial.pdf'
FinalRatio= RetriveRatio(datadoc,dxfpath)
# hatched_areas = get_hatched_areas(datadoc,dxfpath,FinalRatio)
# hatched_areas=remove_duplicate_shapes(new_hatched_areas)
img,pix2=pdftoimg(datadoc)
flipped_horizontal=flip(img)
allcnts = []
imgg = flipped_horizontal
# imgtransparent1=imgg.copy()
doc = fitz.open('pdf',datadoc)
page2 = doc[0]
rotationOld=page2.rotation
derotationMatrix=page2.derotation_matrix
# print("Derotation Matrix = ",derotationMatrix)
pix=page2.get_pixmap()
width=abs(page2.mediabox[2])+abs(page2.mediabox[0])
height=abs(page2.mediabox[3])+abs(page2.mediabox[1])
print('mediabox', width , height)
if page2.rotation!=0:
rotationangle = page2.rotation
page2.set_rotation(0)
ratio = pix.width/ img.shape[0]
else:
ratio = pix.width/ img.shape[1]
rotationangle = 270
hatched_areas,text_with_positions = get_hatched_areas(datadoc,dxfpath,FinalRatio,rotationangle,SearchArray)
allshapes=[]
# Iterate through each polygon in metric units
NewColors = []
SimilarAreaDictionary=Create_DF(dxfpath,datadoc,hatched_areas)
i=0
flagcolor = 0
ColorCounter = 0
ColorCheck=[]
deleterows = []
# def color_distance(color1, color2):
# return np.sqrt(sum((a - b) ** 2 for a, b in zip(color1, color2)))
color_margin = 2 # Define margin threshold
for polygon in hatched_areas:
cntPoints = []
cntPoints1 = []
shapeePerimeter = []
shapeeArea = []
Text_Detected = 0
blackImgShapes = np.zeros(imgg.shape[:2], dtype="uint8")
blackImgShapes= cv2.cvtColor(blackImgShapes, cv2.COLOR_GRAY2BGR)
# Convert each vertex from metric to pixel coordinates
for vertex in polygon[0]:
x = (vertex[0]) *dxfratio
y = (vertex[1]) *dxfratio
if rotationangle==0:
if y<0:
y=y*-1
cntPoints.append([int(x), int(y)])
cntPoints1.append([x, y])
cv2.drawContours(blackImgShapes, [np.array(cntPoints)], -1, ([255,255,255]), thickness=-1)
x, y, w, h = cv2.boundingRect(np.array(cntPoints))
firstpoint = 0
for poi in np.array(cntPoints1):
if firstpoint == 0:
x2, y2 = poi
p2 = fitz.Point(x2,y2)
# p1 = fitz.Point(x1,y1)
p2=p2*derotationMatrix
shapeePerimeter.append([p2[0],p2[1]])
firstpoint = 1
else:
x1, y1 = poi
p1 = fitz.Point(x1,y1)
# p1 = fitz.Point(x1,y1)
p1=p1*derotationMatrix
# print("P1 = ",p1)
shapeePerimeter.append([p1[0],p1[1]])
shapeePerimeter.append([p2[0],p2[1]])
shapeePerimeter=np.flip(shapeePerimeter,1)
shapeePerimeter=rotate_polygon(shapeePerimeter,rotationangle,rotationOld,width,height)
for poi in np.array(cntPoints1):
x1, y1 = poi
p1 = fitz.Point(x1,y1)
# p1 = fitz.Point(x1,y1)
p1=p1*derotationMatrix
# print("P1 = ",p1)
shapeeArea.append([p1[0],p1[1]])
shapeeArea.append([p2[0],p2[1]])
shapeeArea=np.flip(shapeeArea,1)
shapeeArea=rotate_polygon(shapeeArea,rotationangle,rotationOld,width,height)
tol=0
condition1 = (SimilarAreaDictionary['Area'] >= polygon[1] - tol) & (SimilarAreaDictionary['Area'] <= polygon[1] +tol)
condition2 = (SimilarAreaDictionary['Perimeter'] >= polygon[2] -tol) & (SimilarAreaDictionary['Perimeter'] <= polygon[2] +tol)
combined_condition = condition1 & condition2
# print("combined_condition = ",combined_condition)
if any(combined_condition):
flagcolor = 1
index = np.where(combined_condition)[0][0]
# print(SimilarAreaDictionary.at[index, 'Color'])
NewColors=SimilarAreaDictionary.at[index, 'Color']
else:
flagcolor = 2
NewColors=SimilarAreaDictionary.at[i, 'Color']
# flagcolor = 2
# cv2.drawContours(imgg, [np.array(cntPoints)], -1, (NewColors), thickness=2)
# print("new color = ",NewColors)
# print("New Colors = ",NewColors)
# if img is not None or img.shape[0] != 0 or img.shape[1] != 0:
if(int(NewColors[0])==255 and int(NewColors[1])==255 and int(NewColors[2])==255):
WhiteImgFinal = cv2.bitwise_and(blackImgShapes,imgg)
# print("length = ",WhiteImgFinal.shape[0])
# print("width = ",WhiteImgFinal.shape[1])
flipped=flip(WhiteImgFinal)
# print("Flipped")
# cv2_imshow(flipped)
imgslice = WhiteImgFinal[y:y+h, x:x+w]
# print("length slice = ",imgslice.shape[0])
# print("width slice = ",imgslice.shape[1])
if(imgslice.shape[0] != 0 and imgslice.shape[1] != 0):
flippedSlice=flip(imgslice)
# print("Sliced & Flipped")
# cv2_imshow(flippedSlice)
# Convert flippedSlice to PIL for color extraction
flippedSlice_pil = Image.fromarray(flippedSlice)
# Define patch size for color sampling (e.g., 10x10 pixels)
patch_size = 100
patch_colors = []
# Loop through patches in the image
for i in range(0, flippedSlice_pil.width, patch_size):
for j in range(0, flippedSlice_pil.height, patch_size):
# Crop a patch from the original image
patch = flippedSlice_pil.crop((i, j, i + patch_size, j + patch_size))
patch_colors += patch.getcolors(patch_size * patch_size)
# Calculate the dominant color from all patches
max_count = 0
dominant_color = None
tolerance = 5
black_threshold = 30 # Max RGB value for a color to be considered "black"
white_threshold = 225 # Min RGB value for a color to be considered "white"
for count, color in patch_colors:
# Exclude colors within the black and white ranges
if not (all(c <= black_threshold for c in color) or all(c >= white_threshold for c in color)):
# Update if the current color has a higher count than previous max
if count > max_count:
max_count = count
dominant_color = color
# print("Dominant Color =", dominant_color)
# Append dominant color to ColorCheck and update NewColors
if dominant_color is not None:
ColorCheck.append(dominant_color)
NewColors = None # Initialize NewColors
for color in ColorCheck:
# Check if the current color is within the tolerance
# print("color = ",color)
# print("dominant_color = ",dominant_color)
if (abs(color[0] - dominant_color[0]) < 20 and
abs(color[1] - dominant_color[1]) < 20 and
abs(color[2] - dominant_color[2]) < 20):
NewColors = (color[2], color[1], color[0]) # Set the new color
break
else:
# If no color in ColorCheck meets the tolerance, use the dominant color
NewColors = (dominant_color[2], dominant_color[1], dominant_color[0])
# break
# Avoid appending `dominant_color` again unnecessarily
if NewColors not in ColorCheck:
ColorCheck.append(NewColors)
if flagcolor == 1:
SimilarAreaDictionary.at[index, 'Color'] = NewColors
# # print(f"Updated Color at index {index} with {NewColors}.")
elif flagcolor == 2:
SimilarAreaDictionary.at[i, 'Color'] = NewColors
# print("New Colors = ",NewColors)
cv2.drawContours(imgg, [np.array(cntPoints)], -1, ([NewColors[2],NewColors[1],NewColors[0]]), thickness=3)
start_point1 = shapeePerimeter[0]
end_point1 = shapeePerimeter[1]
start_point2 = shapeePerimeter[0]
end_point2 = shapeePerimeter[-2]
distance1 = calculate_distance(start_point1, end_point1)
distance2 = calculate_distance(start_point2, end_point2)
# Divide the shapePerimeter into two halves
half_index = len(shapeePerimeter) // 2
# half1 = shapeePerimeter[1:half_index+1]
# half2 = shapeePerimeter[half_index:]
half1 = shapeePerimeter[1:half_index]
half2 = shapeePerimeter[half_index:-1]
# Calculate distances for the halves
if len(half1) >= 2:
half1_distance = sum(calculate_distance(half1[i], half1[i + 1]) for i in range(len(half1) - 1))
else:
half1_distance = 0
if len(half2) >= 2:
half2_distance = sum(calculate_distance(half2[i], half2[i + 1]) for i in range(len(half2) - 1))
else:
half2_distance = 0
max_distance = max(distance1, distance2, half1_distance)
if max_distance == distance1:
# Draw the line annotation for distance1
chosen_start = start_point1
chosen_end = end_point1
annot12 = page2.add_line_annot(chosen_start, chosen_end)
elif max_distance == distance2:
# Draw the line annotation for distance2
chosen_start = start_point2
chosen_end = end_point2
annot12 = page2.add_line_annot(chosen_start, chosen_end)
elif max_distance == half1_distance:
# Draw the polyline annotation for half1
annot12 = page2.add_polyline_annot(half1)
# else: # max_distance == half2_distance
# # Draw the polyline annotation for half2
# annot12 = page2.add_polyline_annot(half2)
annot12.set_border(width=0.8)
annot12.set_colors(stroke=(int(NewColors[0])/255,int(NewColors[1])/255,int(NewColors[2])/255))
# annot12.set_info(content=str(polygon[2])+' m',subject='Perimeter Measurement', title="ADR Team")
annot12.set_info(subject='Perimeter Measurement',content=str(polygon[2])+' m')
annot12.set_opacity(0.8)
annot12.update()
i += 1
alpha = 0.8 # Transparency factor.
page2.set_rotation(rotationOld)
Correct_img=flip(imgg)
image_new1 = cv2.addWeighted(Correct_img, alpha, img, 1 - alpha, 0)
SimilarAreaDictionary = SimilarAreaDictionary.fillna(' ')
# Define white color to filter out
white_color = (255, 255, 255)
# Delete rows where 'Guess' equals white_color
SimilarAreaDictionary = SimilarAreaDictionary[SimilarAreaDictionary['Color'] != white_color]
# Reset the index to update row numbering
SimilarAreaDictionary.reset_index(drop=True, inplace=True)
grouped_df = SimilarAreaDictionary.groupby('Color').agg({
'Guess': 'first',
'Occurences': 'sum', # Sum of occurrences for each color
'Area':'first',
'Total Area': 'sum', # Sum of areas for each color
'Perimeter':'first',
'Total Perimeter': 'sum', # Sum of perimeters for each color
'Length':'first',
'Total Length': 'sum', # Sum of lengths for each color
'Texts': 'first', # Keep the first occurrence of 'Texts'
'Comments': 'first' # Keep the first occurrence of 'Comments'
}).reset_index()
# doc.save(OutputPdfStage1)
# OutputPdfStage2=adjustannotations(OutputPdfStage1,text_with_positions)
modified_pdf_data = doc.tobytes()
OutputPdfStage2=adjustannotations(modified_pdf_data,text_with_positions)
threshold = math.ceil(float(Thickness) * float(dxfratio) )
print(threshold)
OutputPdfStage3 = remove_duplicate_annotations(OutputPdfStage2,threshold)
latestimg,pix=pdftoimg(OutputPdfStage3)
doc2 =fitz.open('pdf',OutputPdfStage3)
gc,spreadsheet_service,spreadsheetId, spreadsheet_url , namepathArr=google_sheet_Legend.legendGoogleSheets(grouped_df , pdfname,pdfpath)
list1=pd.DataFrame(columns=['content', 'id', 'subject','color'])
# for page in doc:
for page in doc2:
# Iterate through annotations on the page
for annot in page.annots():
# Get the color of the annotation
annot_color = annot.colors
if annot_color is not None:
# annot_color is a dictionary with 'stroke' and 'fill' keys
stroke_color = annot_color.get('stroke') # Border color
fill_color = annot_color.get('fill') # Fill color
if fill_color:
v='fill'
# print('fill')
if stroke_color:
v='stroke'
x,y,z=int(annot_color.get(v)[0]*255),int(annot_color.get(v)[1]*255),int(annot_color.get(v)[2]*255)
list1.loc[len(list1)] =[annot.info['content'],annot.info['id'],annot.info['subject'],[x,y,z]]
print('LISTTT',list1)
return doc2,latestimg, SimilarAreaDictionary ,spreadsheetId, spreadsheet_url , namepathArr , list1,hatched_areas