MeasurementTesting / pilecaps_adr.py
Marthee's picture
Update pilecaps_adr.py
4c90a14 verified
# -*- coding: utf-8 -*-
"""Copy of XOR- ROI from plan-PileCaps-ADR.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16RHtRae7VU_fqHMAjOUL4ET5slEFo3pf
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
from math import sin, cos, radians
import pandas as pd
from PIL import Image , ImageChops
import numpy as np
from googleapiclient.discovery import build
from google.oauth2 import service_account
import pygsheets
import re
import fitz
import db
import ast
import Dropbox_TSA_API
import tsadropboxretrieval
from collections import Counter
from unidecode import unidecode
import google_sheet_Legend
def textLists(img,dataDoc):
allTexts = texts_from_pdf(dataDoc)
doc = fitz.open('pdf',dataDoc)
page=doc[0]
if page.rotation!=0:
page.set_rotation(0)
pix = page.get_pixmap() # render page to an image
ratio = pix.width/ img.shape[1]
listall=[]
pc_coor = []
for tpl in allTexts:
if "GB" in tpl[4] or "RC" in tpl[4] or "PC" in tpl[4]:
p1 = fitz.Point((tpl[2]/ratio),(tpl[3]/ratio))
if page.rotation==0:
p1=p1*page.derotation_matrix
pc_coor.append((p1[0],p1[1]))
listall.append((p1[0],p1[1],tpl[4]))
return pc_coor, listall
def textListsAlltexts(dataDoc,span_df):
listall=[]
pc_coor = []
allTexts = texts_from_pdf(dataDoc)
doc = fitz.open('pdf',dataDoc)
page=doc[0]
for i, row in span_df.iterrows():
p1 = fitz.Point((span_df['xmin'].loc[i]),(span_df['ymin'].loc[i]))
if page.rotation==0:
p1=p1*page.derotation_matrix
pc_coor.append((p1[0],p1[1]))
listall.append((p1[0],p1[1],span_df['text'].loc[i]))
return pc_coor, listall
# pc_coor,listall=textLists(img)
#Prepare preprocessing
def detectCircles(imgOriginal ):
im=imgOriginal.copy()
imgGry1 = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
kernel=np.ones((3,3),np.uint8)
er1=cv2.erode(imgGry1,kernel, iterations=2)
er1=cv2.dilate(er1,kernel, iterations=2)
# cv2_imshow(er1)
# Apply Hough transform on the blurred image.
# min distance between circles, Upper threshold for the internal Canny edge detector.
detected_circles = cv2.HoughCircles( er1, cv2.HOUGH_GRADIENT, 1, 50, param1= 700,
param2 =21, minRadius = 20, maxRadius = 50) #18 param2
# Draw circles that are detected.
if detected_circles is not None:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
detected_circles = np.round(detected_circles[0, :]).astype("int")
for (x, y, r) in detected_circles:
cv2.circle(im, (x, y), r, (255, 255, 255), 6)
return im
def detectSmallCircles(img ):
#Remove tiny TOC points that interfere with shapes
im=img.copy()
imgGry1 = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
kernel=np.ones((3,3),np.uint8)
er1=cv2.erode(imgGry1,kernel, iterations=1)
# Apply Hough transform on the blurred image.
# min distance between circles, Upper threshold for the internal Canny edge detector.
detected_circles = cv2.HoughCircles( imgGry1, cv2.HOUGH_GRADIENT, 1, 60, param1 =550,
param2 =13, minRadius = 1, maxRadius = 15) #18 param2
# Draw circles that are detected.
if detected_circles is not None:
# Convert the circle parameters a, b and r to integers.
detected_circles = np.uint16(np.around(detected_circles))
detected_circles = np.round(detected_circles[0, :]).astype("int")
#DRAW CIRCLES
for (x, y, r) in detected_circles:
cv2.circle(im, (x, y), r+3, (255, 255, 255), -1)
return im
def DashedPreprocessing(imgOriginal,imgnoSmall):
h,w=imgOriginal.shape[0:2]
#remove the gray contours from the plan
imgBW=cv2.threshold(imgnoSmall, 180, 255, cv2.THRESH_BINARY)[1]
im_copy=imgBW.copy()
im_copy1=im_copy
kernel1 = np.ones((3,5),np.uint8)
kernel2 = np.ones((9,9),np.uint8)
kernel3= np.ones((3,3),np.uint8)
imgGray=cv2.cvtColor(imgBW,cv2.COLOR_BGR2GRAY)
imgBW1=cv2.threshold(imgGray, 200, 255, cv2.THRESH_BINARY_INV)[1]
img1=cv2.erode(imgBW1, kernel1, iterations=1)
img2=cv2.dilate(img1, kernel2, iterations=3)
img3 = cv2.bitwise_and(imgBW1,img2)
img3= cv2.bitwise_not(img3)
img4 = cv2.bitwise_and(imgBW1,imgBW1,mask=img3)
img4=cv2.blur(img4,(7,7))
if h > w :
max = h
min = w
else:
max = w
min = h
return img4, imgBW, max,min
def removeDashedLines(img4, imgBW ,max,min):
imgLines= cv2.HoughLinesP(img4,1,np.pi/310,30,minLineLength=(max-min)//1.8,maxLineGap = 120) #was w-h , gap=150 0.99
for i in range(len(imgLines)):
for x1,y1,x2,y2 in imgLines[i]:
cv2.line(imgBW,(x1,y1),(x2,y2),(0,255,0),3)
im_copy=imgBW.copy()
green=im_copy[:,:,1]
return green
def removeSmallDashes(imgOriginal,green,num=0):
smalldashes=green.copy()
smalldashes=cv2.bitwise_not(smalldashes)
kernel3= np.ones((3,3),np.uint8)
img1=cv2.dilate(smalldashes, kernel3, iterations=2)
img2=cv2.erode(img1, kernel3, iterations=2)
smalldashes=cv2.medianBlur(img2,7)
smalldashes=cv2.medianBlur(smalldashes,9)
smalldashesOut=green.copy()
# if num==1:
# smalldashes=cv2.cvtColor(smalldashes,cv2.COLOR_GRAY2BGR)
# smalldashes=detectSmallCircles(smalldashes)
# smalldashes=cv2.cvtColor(smalldashes,cv2.COLOR_BGR2GRAY)
smalldashesOut=cv2.cvtColor(smalldashesOut,cv2.COLOR_GRAY2BGR)
imgLines= cv2.HoughLinesP(smalldashes,1,np.pi/180,27,minLineLength=70,maxLineGap = 70) #was w-h , gap=150
imgCopy=imgOriginal.copy()
for i in range(len(imgLines)):
for x1,y1,x2,y2 in imgLines[i]:
cv2.line(smalldashesOut,(x1,y1),(x2,y2),(0,255,0),3)
cv2.line(imgCopy,(x1,y1),(x2,y2),(0,255,0),2)
imgCopy=imgCopy[:,:,1]
smalldashesOut=smalldashesOut[:,:,1]
return imgCopy,smalldashesOut
def euclidian_distance(point1, point2):
return sum([(point1[x] - point2[x]) ** 2 for x in range(len(point1))]) ** 0.5
def removeDashedLinesSmall(img4, imgBW ,max,min):
imgBW=cv2.cvtColor(imgBW,cv2.COLOR_GRAY2BGR)
imgLines= cv2.HoughLinesP(img4,1,np.pi/100,20,minLineLength=(max-min)//3.5,maxLineGap = 70) #2.1
for i in range(len(imgLines)):
for x1,y1,x2,y2 in imgLines[i]:
dist=euclidian_distance((x1,y1), (x2,y2))
if dist >= (max-min)//2.3 and dist < (max-min)//1.9: #1.4
cv2.line(imgBW,(x1,y1),(x2,y2),(0,255,0),3)
im_copy=imgBW.copy()
green=im_copy[:,:,1]
return green
def ConnectBeamLines(smalldashesOut, maxLineGap=0):
if maxLineGap==0:
maxLineGap=25
thresh=20
point=0.3
else:
thresh=20
point=0.2
maxLineGap=40
green1=cv2.bitwise_not(smalldashesOut)
smalldashesOut=cv2.cvtColor(smalldashesOut,cv2.COLOR_GRAY2BGR)
imgLines= cv2.HoughLinesP(green1,point,np.pi/180,thresh,minLineLength=25,maxLineGap =maxLineGap) #try 180
for i in range(len(imgLines)):
for x1,y1,x2,y2 in imgLines[i]:
cv2.line(smalldashesOut,(x1,y1),(x2,y2),(0,0,0),2)
return smalldashesOut
def getImgDark(imgg):
imgold=preprocessold(imgg,0)
blurG = cv2.GaussianBlur(ChangeBrightness(imgg,1),(3,3),0 )
imgGry = cv2.cvtColor(blurG, cv2.COLOR_BGR2GRAY)
ret3, thresh = cv2.threshold(imgGry, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
imgold=cv2.medianBlur(imgold,3)
thresh=cv2.bitwise_or(thresh,imgold)
imgDark=cv2.bitwise_not(thresh)
imgDark = cv2.cvtColor(imgDark, cv2.COLOR_GRAY2BGR)
return imgDark
#create img with solid lines
def allpreSteps(imgOriginal,num=0):
noCircles=detectCircles(imgOriginal)
imgold=preprocessold(imgOriginal,0)
if num!=1:
imgnoSmall=detectSmallCircles(noCircles )
img4,imgBW,max,min=DashedPreprocessing(imgOriginal,imgnoSmall)
green=removeDashedLines(img4,imgBW,max,min)
imgCopy,smalldashesOut=removeSmallDashes(imgOriginal,green)
noSmallDashes=removeDashedLinesSmall(img4, smalldashesOut ,max,min)
green2=ConnectBeamLines(noSmallDashes,0)
return green2
else:
#imgDark with no dashed lines or small dashes
imgDark1=getImgDark(noCircles)
img4,imgBW,max,min=DashedPreprocessing(imgOriginal,noCircles)
imgDarkNoDashedLines=removeDashedLines(img4,imgDark1,max,min) #do preprocessing on normal img , and draw on darkimg
imgBW0 = cv2.cvtColor(imgBW, cv2.COLOR_BGR2GRAY)
imgDarkNoDashedLines = cv2.cvtColor(imgDarkNoDashedLines, cv2.COLOR_GRAY2BGR)
imgDarkNoDashedLines,smalldashesOut0=removeSmallDashes(imgDarkNoDashedLines,imgBW0)
#imgOld no small dashes - for oring purposes
imgoldG = cv2.cvtColor(imgold, cv2.COLOR_GRAY2BGR)
imgoldnoDashes,_=removeSmallDashes(cv2.bitwise_not(imgoldG),imgBW0,1)
#to connect leader lines after removing dashed lines and circles
Nodashedlines=removeDashedLines(img4,imgBW,max,min)
imgCopy,smalldashesOut=removeSmallDashes(imgOriginal,Nodashedlines)
noSmallDashes=removeDashedLinesSmall(img4, smalldashesOut ,max,min)
# green2=ConnectBeamLines(noSmallDashes,1)
# green2 = cv2.cvtColor(green2, cv2.COLOR_BGR2GRAY)
green2=cv2.bitwise_or(cv2.bitwise_not(imgDarkNoDashedLines),cv2.bitwise_not(noSmallDashes) )
green2=cv2.bitwise_not(green2)
# cv2_imshow(green2)
green2=cv2.medianBlur(green2,5)
# cv2_imshow(green2)
imgoldnoDashes=cv2.medianBlur(imgoldnoDashes,5)
return green2 , cv2.bitwise_not(imgoldnoDashes)
def texts_from_pdf(input_pdf):
pdf_document = fitz.open('pdf',input_pdf)
for page_num in range(pdf_document.page_count):
page = pdf_document[page_num]
text_instances = page.get_text("words")
page.apply_redactions()
return text_instances
def textDictionaryBlocks(img,dataDoc):
doc = fitz.open('pdf',dataDoc)
page=doc[0]
if page.rotation!=0:
page.set_rotation(0)
pix = page.get_pixmap() # render page to an image
ratio = pix.width/ img.shape[1]
block_dict = {}
page_num = 1
for page in doc: # Iterate all pages in the document
file_dict = page.get_text('dict') # Get the page dictionary
block = file_dict['blocks'] # Get the block information
block_dict[page_num] = block # Store in block dictionary
page_num += 1 # Increase the page value by 1
spans = pd.DataFrame(columns=['xmin', 'ymin', 'xmax', 'ymax', 'text','FitzPointP0','FitzPointP1'])
rows = []
for page_num, blocks in block_dict.items():
for block in blocks:
if block['type'] == 0:
for line in block['lines']:
for span in line['spans']:
xmin, ymin, xmax, ymax = list(span['bbox'])
text = unidecode(span['text'])
XminRatio=xmin/ratio
YminRatio=ymin/ratio
p1=fitz.Point((XminRatio),(YminRatio))
if page.rotation==0:
p1=p1*page.derotation_matrix
if text.replace(" ","") != "":
rows.append((XminRatio,YminRatio, xmax /ratio, ymax/ratio, text,p1[0],p1[1]))
span_df = pd.DataFrame(rows, columns=['xmin','ymin','xmax','ymax', 'text','FitzPointP0','FitzPointP1'])
return span_df
def nearestText(a,b,span_df):
allNearbyText=[]
shapeTexts=[]
for i, row in span_df.iterrows():
measuredDist=euclidian_distance((a,b),(span_df['FitzPointP0'].loc[i],span_df['FitzPointP1'].loc[i]))
if measuredDist < 250:
allNearbyText.append((span_df['FitzPointP0'].loc[i],span_df['FitzPointP1'].loc[i] ))
shapeTexts.append(str(span_df['text'].loc[i]))
if len(allNearbyText)==0:
allNearbyText='none'
return allNearbyText , shapeTexts
def nearestTextPCCOOR(a,b , pc_coor):
nearest_point=min(pc_coor,key=lambda x:euclidian_distance((a,b),x))
dist = euclidian_distance(nearest_point, (a,b))
if dist < 400: #distance threshold
return nearest_point
else:
return 'none'
def ChangeBrightness(img,k):
imgdarker = 255 * (img/255)**k # k>1 darker , k <1 lighter
imgdarker = imgdarker.astype('uint8')
return imgdarker
def getOutlinesDotIN(img):
cc=detectSmallCircles(img)
cc = cv2.cvtColor(cc, cv2.COLOR_BGR2GRAY)
kernel=np.ones((3,3),np.uint8)
er1=cv2.dilate(cc,kernel, iterations=2) #thinning
blurG = ChangeBrightness(er1,10)
ret3, thresh = cv2.threshold(blurG, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
threshCnt, threshHier2 = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
outlinesDotIN = np.zeros(img.shape[:2], dtype="uint8")
for cnt in threshCnt:
area1 = cv2.contourArea(cnt)
if (area1 > 2000 ):
cv2.drawContours(outlinesDotIN,[cnt],0,(255,255,255),2)
return outlinesDotIN
def connectsmallDot(blackwithNoDot):
imgLines= cv2.HoughLinesP(blackwithNoDot,0.05,np.pi/180,8,minLineLength=30,maxLineGap = 30) #was w-h , gap=150 #50
for i in range(len(imgLines)):
for x1,y1,x2,y2 in imgLines[i]:
cv2.line(blackwithNoDot,(x1,y1),(x2,y2),(255,255,255),2)
return blackwithNoDot
def getDiff(img,green22,imgoldnodashes):
# green22 , imgoldnoDashes= allpreSteps(img,1)
imgoldnoDashes1=cv2.medianBlur(imgoldnodashes,7)
kernel=np.ones((3,3),np.uint8)
green3=cv2.erode(green22,kernel, iterations=6)
green3=cv2.dilate(green3,kernel, iterations=3)
imgoldnoDashes1=cv2.erode(imgoldnoDashes1,kernel, iterations=2)
img1Eroded=cv2.dilate(imgoldnoDashes1,kernel, iterations=7) #5
diff = ImageChops.difference(Image.fromarray(img1Eroded), Image.fromarray(cv2.bitwise_not(green3)))
diff=np.array(diff)
diff=cv2.erode(diff,kernel, iterations=4)
diff=cv2.dilate(diff,kernel, iterations=11)
return diff
#OLD method (White shapes)
def preprocessold(img,number):
blurG = cv2.GaussianBlur(ChangeBrightness(img,8),(3,3),0 )
# cv2.imwrite('imgold.png', blurG)
imgGry = cv2.cvtColor(blurG, cv2.COLOR_BGR2GRAY)
kernel=np.ones((3,3),np.uint8)
er1=cv2.dilate(imgGry,kernel, iterations=2) #thinning
if number == 0:
ret3, thresh = cv2.threshold(er1, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
else:
ret3, thresh = cv2.threshold(er1, 220, 255, cv2.THRESH_BINARY_INV) #`140 - 141
thresh=cv2.medianBlur(thresh,5)
# thresh=cv2.dilate(thresh,kernel, iterations=1) #thinning
return thresh
#preprocessing for shapes with arrows (attach them to shape )
def getTextfromImg(grayimgtextdilated, img,dataDoc):
span_df=textDictionaryBlocks(img,dataDoc)
threshCnt2, threshHier2 = cv2.findContours(grayimgtextdilated, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
allshapesExtremes_Text=[]
listallTexts=textListsAlltexts(dataDoc,span_df)[1]
for cnt in threshCnt2:
texts=[]
area1 = cv2.contourArea(cnt)
if (area1 >2000 ):
x, y , width, height = cv2.boundingRect(cnt)
perimeter=cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt, 0.01* perimeter, True)
for extremePoint in approx:
allnearbyPoints,alltxts=nearestText(int(extremePoint[0][0]),int(extremePoint[0][1]),span_df)
if(allnearbyPoints!='none'):
for nearbypoint in allnearbyPoints:
for textTuple in listallTexts:
if nearbypoint[0]==textTuple[0] and nearbypoint[1]==textTuple[1]:
if textTuple[2] not in texts:
texts.append(textTuple[2])
allshapesExtremes_Text.append([cnt,texts])
# print(allshapesExtremes_Text)
ArrthreshCnt=[]
for th in range(len(allshapesExtremes_Text)):
eachcnt=[]
for point in allshapesExtremes_Text[th][0]:
eachcnt.append(list(point[0]))
ArrthreshCnt.append(eachcnt)
return ArrthreshCnt , allshapesExtremes_Text
def mergingPreprocessing(img,number,green2,layeredflag,BlackmaskDetected1=0):
# diff , imgoldnodashes=getDiff(img)#diff (img with tof leaders)
img1=preprocessold(img,number)
# green2Gray = cv2.cvtColor(green2, cv2.COLOR_BGR2GRAY)
# anding=cv2.bitwise_and(green2Gray,img1) #and between old and green2 to get perfect shapes with no outer lines
# ret3, thresh2 = cv2.threshold(anding, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# if layeredflag.startswith('layer'):
# thresh2=cv2.bitwise_and(thresh2,thresh2,mask=BlackmaskDetected1)
threshcontours, threshHier = cv2.findContours(img1, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
return img1 , threshcontours , img1
#anding of old method output with solid lines img
def preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,img,number,green2,layeredflag,BlackmaskDetected1=0):
#first preprocessing ( old method - black img with white shapes)
kernel0=np.ones((2,2),np.uint8)
##first preprocessing ( old method - black img with white shapes)
img1,threshcontours,thresh2=mergingPreprocessing(img,number,green2,layeredflag,BlackmaskDetected1)
diff =getDiff(img,green22,imgoldnodashes)#diff (img with tof leaders)
iddk=cv2.bitwise_or(thresh2,diff) #add it to preprocessing img of anding
iddk=cv2.medianBlur(iddk,5)
iddk=cv2.dilate(iddk,kernel0, iterations=2)
ArrthreshCnt , texts=getTextfromImg(iddk,img,dataDoc) #getText relations between each contour and its text
outlinesDotIN=getOutlinesDotIN(img)
pc_coor,listall=textLists(img,dataDoc)
finalcntsP=[]
finalcntsA=[]
perimeters=[]
openClosedFlag=0
threshCnt, threshHier2 = cv2.findContours(img1, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
outlines = np.zeros(img.shape[:2], dtype="uint8")
for cnt in threshCnt:
area1 = cv2.contourArea(cnt)
if (area1 > 2000 ):
cv2.drawContours(outlines,[cnt],0,(255,255,255),2)
perimeter=0
shapetxts=[]
for cnt in threshcontours:
BlackmaskP = np.zeros(img.shape[:2], dtype="uint8")
BlackmaskA=np.zeros(img.shape[:2], dtype="uint8")
area1 = cv2.contourArea(cnt)
if (area1 > 2000 ):
x, y , width, height = cv2.boundingRect(cnt)
#Get contours - Areas , Perimeters
kernel=np.ones((2,2),np.uint8)
cv2.drawContours(BlackmaskP,[cnt],0,(255,255,255), 4)
BlackmaskP=cv2.dilate(BlackmaskP,kernel, iterations=2)
contoursP, hier1 = cv2.findContours(BlackmaskP, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
finalcntsP.append(contoursP)
cv2.drawContours(BlackmaskA,[cnt],0,(255,255,255), 3)
BlackmaskA=cv2.erode(BlackmaskA,kernel, iterations=1)
contoursA, hier1 = cv2.findContours(BlackmaskA, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
finalcntsA.append(contoursA)
if pc_coor:
textPoint=nearestTextPCCOOR(int(x+(width/2)), int(y+(height/2)) , pc_coor)
txt=''
if(textPoint!='none'):
for textTuple in listall:
if textPoint[0]==textTuple[0] and textPoint[1]==textTuple[1]:
txt=textTuple[2]
if "GB" in txt or "RC" in txt or "PC" in txt:
shapetxts.append(txt)
elif textPoint=='none':
shapetxts.append('none')
if 'GB' in shapetxts:
xcBlk, ycBlk , width, height = cv2.boundingRect(contoursP[0])
xx=cv2.bitwise_and(outlines,outlines,mask=BlackmaskP)
xx = cv2.threshold(xx, 250, 255, cv2.THRESH_BINARY)[1]
cntsx,hierx= cv2.findContours(xx, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
if len(cntsx)>0:
hierx=hierx[0]
xx=cv2.bitwise_and(outlinesDotIN,outlinesDotIN,mask=xx)
cntsx,hierx= cv2.findContours(xx, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
if len(cntsx)>0:
xx=connectsmallDot(xx)
cntsx,hierx= cv2.findContours(xx, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
if len(cntsx)>0:
hierx=hierx[0]
for comp in zip(cntsx,hierx):
c=comp[0]
h=comp[1]
xc, yc , width, height = cv2.boundingRect(c)
perimeter=cv2.arcLength(c,True)
shape=[]
approx = cv2.approxPolyDP(c, 0.003* perimeter, True)
if h[2]<0 and h[3] <0:
perimeter1 = cv2.arcLength(approx, True)
perimeter=perimeter1/2
# cv2_imshow(xx)
openClosedFlag='open'
imgtransparent1=cv2.polylines(imgtransparent1, [approx], False, (0,255,0), thickness=4,lineType=8)
perimeters.append([xc, yc ,xcBlk, ycBlk ,perimeter ,openClosedFlag , approx])
else:
if h[2] >0:
openClosedFlag='closed'
return tuple(finalcntsP),tuple(finalcntsA), perimeters , shapetxts , imgtransparent1 ,ArrthreshCnt , texts, iddk
"""# ROI (levels)
## Detect regions with specific color and mask them
"""
def hexRGB(color):
color=color.lstrip('#')
color= tuple(int(color[i:i+2], 16) for i in (0, 2, 4)) #hex to rgb
color=np.array(color) #rgb to bgr
return color
def DetectColor(img,color=0):
imgCopy=img.copy()
imgCopy=cv2.cvtColor(imgCopy,cv2.COLOR_BGR2HSV)
tol=5 #tolerance
# color=hexRGB(color)
h,s,v = cv2.cvtColor(np.uint8([[[color[2],color[1],color[0]]]]),cv2.COLOR_BGR2HSV)[0][0]
lower =np.array( [h- tol, 100, 100 ], dtype='uint8')
upper = np.array( [h + tol, 255, 255],dtype='uint8')
mask = cv2.inRange(imgCopy, lower , upper)
detectedColors = cv2.bitwise_and(imgCopy,imgCopy, mask= mask) # Bitwise-AND mask and original image
kernel=np.ones((3,3),np.uint8)
mask=cv2.dilate(mask,kernel, iterations=5)
mask=cv2.erode(mask,kernel, iterations=4)
detectedColors=cv2.dilate(detectedColors,kernel, iterations=5)
detectedColors=cv2.erode(detectedColors,kernel, iterations=4)
detectedColors=cv2.cvtColor(detectedColors,cv2.COLOR_HSV2BGR)
detectedColors=cv2.medianBlur(detectedColors,7)
cv2.imwrite('det.png',detectedColors)
return mask, detectedColors, color
def getinnerColor(BlackmaskDetected,img,detectedColors,finalColorArray,ratioarea,ratioperim,eachcolor):
countBlackMasks=0
xored=detectedColors
invertedmask=detectedColors
imgc=img.copy()
imgNewCopy=img.copy()
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
for eachcolor in finalColorArray:
masked=DetectColor(detectedColors,eachcolor)[0]
pil_image=Image.fromarray(masked)
extrema = pil_image.convert("L").getextrema()
if extrema != (0, 0): # if image is not black --> has a colored mask within
cc=detectedColors.copy()
ColoredContour, Coloredhierarchy = cv2.findContours(masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in ColoredContour:
area1 = cv2.contourArea(cnt)
if (area1 > 1000 ):
x, y , width, height = cv2.boundingRect(cnt)
cv2.drawContours(cc,[cnt],0,(255,255,255), 3)
cv2.drawContours(Blackmask,[cnt] ,0, (255,255,255), 3)
cv2.drawContours(cc,[cnt],0,(255,255,255), -1) # (x-5,y-5 ), (x+width, y+height),
cv2.drawContours(Blackmask,[cnt] ,0, (255,255,255), -1) #,(x,y ), (x+width, y+height)
cv2.drawContours(BlackmaskDetected,[cnt] ,0, (0,0,0), -1) #,(x,y ), (x+width, y+height)
invertedmask = cv2.bitwise_and(imgc,imgc, mask= Blackmask)
xored=cc
detectedColors=xored
else: #black mask , no other levels are found # to check law count == number of colors in array yb2a no more levels and break
countBlackMasks+=1
return xored,invertedmask , BlackmaskDetected
def allLevelsofColor(BlackmaskDetected,img,levelonly, invertedmask,color,finalColorArray):
# cc=levelonly.copy()
firstLevel=levelonly
firstLevel1=levelonly
print('in')
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
masked,maskedColor,rgbcolor=DetectColor(invertedmask,color)
color=[color[0],color[1],color[2]]
rgbcolor=[rgbcolor[0],rgbcolor[1],rgbcolor[2]]
print(rgbcolor,color)
pil_image=Image.fromarray(masked)
extrema = pil_image.convert("L").getextrema()
if extrema != (0, 0): # if image is not black --> has a colored mask within
if rgbcolor==color: #found level tany gowa b nfs el lon
print('kkkkkkkk')
ColoredContour, Coloredhierarchy = cv2.findContours(masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
Coloredhierarchy=Coloredhierarchy[0]
for component in zip(ColoredContour,Coloredhierarchy):
cnt=component[0]
hier=component[1]
area1 = cv2.contourArea(cnt)
if (area1 > 1000 ):
if hier[3]> -1:
cv2.drawContours(Blackmask,[cnt],0,(255,255,255), -1)
cv2.drawContours(Blackmask,[cnt],0,(0,0,0), 20)
cv2.drawContours(BlackmaskDetected,[cnt],0,(255,255,255), -1)
firstLevel=cv2.bitwise_and(invertedmask,invertedmask,mask=Blackmask)
####remove black pixels and let them be all white
# get (i, j) positions of all RGB pixels that are black (i.e. [0, 0, 0])
firstLevel[np.all(firstLevel == (0,0,0), axis=-1)] = (255, 255, 255)
firstLevel1=cv2.bitwise_and(levelonly,firstLevel)
for othercolor in finalColorArray:
# othercolor2=hexRGB(othercolor)
othercolor2=[othercolor[0],othercolor[1],othercolor[2]]
if othercolor2!=color:
masked0=DetectColor(firstLevel,othercolor)[0]
pil_image0=Image.fromarray(masked0)
extrema0 = pil_image0.convert("L").getextrema()
if extrema != (0, 0): # if image is not black --> has a colored mask within
ColoredContour0, Coloredhierarchy0 = cv2.findContours(masked0, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in ColoredContour0:
area1 = cv2.contourArea(cnt)
if (area1 > 1000 ):
cv2.drawContours(firstLevel1,[cnt],0,(255,255,255), -1)
cv2.drawContours(firstLevel1,[cnt],0,(255,255,255), 10)
cv2.drawContours(BlackmaskDetected,[cnt],0,(0,0,0), -1)
# cv2.drawContours(Blackmask,[cnt],0,(255,255,255), -1)
# cv2.drawContours(Blackmask,[cnt],0,(255,255,255), 10)
# cv2_imshow(firstLevel1)
# cv2_imshow(Blackmask)
return firstLevel1, BlackmaskDetected
def getColoredContour(mask,img,finalColorArray,ratioarea,ratioperim,eachcolor):
ColoredContour, Coloredhierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
Coloredhierarchy=Coloredhierarchy[0]
imgc= img.copy()
detectedColors=np.zeros(img.shape[:2], dtype="uint8")
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
for component in zip( ColoredContour, Coloredhierarchy):
cnt=component[0]
hier=component[1]
area1 = cv2.contourArea(cnt)
if (area1 > 3000 ):
# cv2.drawContours(imgNewCopy, [cnt], 0,(255,255,255), 20) #(x+20,y+20 ), (x+width-20, y+height-20),
if hier[3] >-1:
x, y , width, height = cv2.boundingRect(cnt)
cv2.drawContours(Blackmask, [cnt], 0,(255,255,255), -1) #(x+20,y+20 ), (x+width-20, y+height-20),
cv2.drawContours(Blackmask, [cnt], 0,(0,0,0), 10) #(x+20,y+20 ), (x+width-20, y+height-20),
detectedColors = cv2.bitwise_and(imgc,imgc, mask= Blackmask)
pil_image=Image.fromarray(detectedColors)
extrema = pil_image.convert("L").getextrema()
if extrema == (0, 0) :#and extremaB==(0,0): # if image is not black --> has a colored mask within
break
levelOnly,invertedmask,BlackmaskDetected=getinnerColor(Blackmask,img,detectedColors,finalColorArray,ratioarea,ratioperim,eachcolor) #mask inner levels b abyad
firstLevel1, BlackmaskDetected1= allLevelsofColor(BlackmaskDetected,img,levelOnly, invertedmask,eachcolor,finalColorArray)
return firstLevel1,invertedmask, BlackmaskDetected1
"""# contours"""
def findContoursFullImage(green22,imgoldnodashes,dataDoc,imgtransparent1,green2,img,number,finalColorArray,ratioarea,ratioperim,color=[0,0,0]):
if number == 0:
contourssP,contourssA,perimeters,alltxts,imgtransparent1,arrthresh,allshapesExtremes_Text,iddk=preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,img,number,green2, 'nolayer')
return contourssP,contourssA, perimeters ,alltxts , imgtransparent1,arrthresh,allshapesExtremes_Text , iddk
else:
mask, detectedColors, rgbcolor =DetectColor(img,color)
pil_image=Image.fromarray(mask)
extrema = pil_image.convert("L").getextrema()
if extrema != (0, 0): # if image is not black --> has a colored mask within
coloredregions,invertedmask,BlackmaskDetected1=getColoredContour(mask,img,finalColorArray,ratioarea,ratioperim,color)
contourssP,contourssA,perimeters,alltxts,imgtransparent1,arrthresh,allshapesExtremes_Text,iddk=preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,coloredregions,number,green2,'layer',BlackmaskDetected1)
return contourssP,contourssA ,rgbcolor ,invertedmask , perimeters , alltxts , imgtransparent1 ,arrthresh,allshapesExtremes_Text , iddk
else:
contourssP,contourssA ,rgbcolor ,invertedmask , perimeters , alltxts , imgtransparent1 ,arrthresh,allshapesExtremes_Text , iddk=preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,img,number,green2,'nolayer')
return contourssP,contourssA,color ,mask , perimeters , alltxts,imgtransparent1,arrthresh,allshapesExtremes_Text , iddk
#Straighten tilted shapes
def StraightenImage(contour,imgArea):
rect = cv2.minAreaRect(contour)
(center, (width, height), angleR) = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
# get width and height of the detected rectangle
width = int(rect[1][0])
height = int(rect[1][1])
return angleR,width,height
#get all areas and perimeter present
def getAreasPerimeter(img,ratioarea,ratioperim,contourssP,contourssA):
appended=[]
for contour in range(len(contourssP)):
area1 = cv2.contourArea(contourssP[contour][0])
if (area1 >2000 ):
perimeter= cv2.arcLength(contourssP[contour][0],True)
approx = cv2.approxPolyDP(contourssP[contour][0], 0.002* perimeter, True)
perimeter1 = cv2.arcLength(approx, True)
approx = cv2.approxPolyDP(contourssA[contour][0], 0.0002* perimeter, True)
area1 = cv2.contourArea(approx)
x, y , width, height = cv2.boundingRect(contourssP[contour][0])
angleR,widthR ,heightR= StraightenImage(contourssP[contour][0],img)
if (angleR != 90.0 and angleR != -90.0 and angleR != 0.0 and angleR != -0.0 ): #inclined b ay degree
width=widthR
height=heightR
if (area1 > 2000 ): #check perimeter kman fl condition -- 2800
if ratioarea!=0 and ratioperim!=0:
areaa=round(area1* ratioarea , 3) # true value of area of any shape/ area px value of same shape
appended.append([areaa,width,height])
return appended
#fill dictionary with areas and perimeters and occurences
def FillDictionary(SimilarAreaDictionary,img,ratioarea,ratioperim,contourssP,contourssA,rgbcolor=[0,0,0],color=[0,0,0]):
#fills dictionary with key areas and number of occurences
areas_Perimeters=sorted(getAreasPerimeter(img,ratioarea,ratioperim,contourssP,contourssA))
indices=[]
colorRanges=[[255,0,0],[0,0,255],[0,255,255],[0,64,0],[255,204,0],[255,128,64],[255,0,128],[255,128,192],[128,128,255],[128,64,0],[0,255,0],[179,106,179],[115,52,179],[0,128,192],[128,0,128],[128,0,0],[0,128,255],[255,182,128],[255,0,255],[0,0,128],[0,128,64],[255,255,0],[128,0,64],[203,203,106],[128,255,166],[255,128,0],[255,98,98],[90,105,138],[114,10,72],[36,82,78],[225,105,29],[108,62,40],[11,35,75],[42,176,203],[255,153,153],[129,74,138],[99,123,137],[159,179,30],[255,0,0],[0,0,255],[0,255,255],[0,64,0],[255,204,0],[255,128,64],[255,0,128],[255,128,192],[128,128,255],[128,64,0],[0,255,0],[179,106,179],[115,52,179],[0,128,192],[128,0,128],[128,0,0],[0,128,255],[255,182,128],[255,0,255],[0,0,128],[0,128,64],[255,255,0],[128,0,64],[203,203,106],[128,255,166],[255,128,0],[255,98,98],[90,105,138],[114,10,72],[36,82,78],[225,105,29],[108,62,40],[11,35,75],[42,176,203],[255,153,153],[129,74,138],[99,123,137],[159,179,30]]
colorsUsed=[]
for i in range(len(areas_Perimeters)):
# colorRGB=hexRGB(color)
item1 = areas_Perimeters[i][0]
width1 = areas_Perimeters[i][1]
height1 = areas_Perimeters[i][2]
widthMin= width1-15
widthMax= width1+15
heightMin=height1-15
heightMax= height1+15
areaPerimeterMin= round(item1,1) - 0.4
areaPerimeterMax= round(item1,1) + 0.4
# print (areaMin, areaMax)
if color != [0,0,0]: #colored images
mydata=[[],[rgbcolor[0],rgbcolor[1],rgbcolor[2] ],round(item1,1),width1,height1,1, 0,0,0,0,0,0,'',0,0,0,'']
else:
mydata=[[],' ', round(item1,1),width1,height1,1, 0,0,0,0,0,0,'',0,0,0,'']
myindex= SimilarAreaDictionary.index[((SimilarAreaDictionary['Rounded'] >=areaPerimeterMin) &(SimilarAreaDictionary['Rounded']<=areaPerimeterMax) )].tolist()
if color!= [0,0,0]: #leveled image
checkifColorExists=0 # to check whether this row was found or not( area and color )
for i in myindex: # loop on indices that were found --> rows containing this area to check its color and add occ.
if SimilarAreaDictionary['Color'].loc[i]==[rgbcolor[0],rgbcolor[1],rgbcolor[2]] and ( SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin and SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax) :
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
checkifColorExists=1 #found and incremented
SimilarAreaDictionary['Occurences'].loc[i]+=1
if checkifColorExists==0: #couldnt find the color , doesnt exist so add it
SimilarAreaDictionary.loc[len(SimilarAreaDictionary)] =mydata
else: #full image
#same code bs mgher color
checkifColorExists=0
for i in myindex:
if ( SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax and SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin) :
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
checkifColorExists=1 #found and incremented
SimilarAreaDictionary['Occurences'].loc[i]+=1
if checkifColorExists==0: #couldnt find the color , doesnt exist so add it
SimilarAreaDictionary.loc[len(SimilarAreaDictionary)] =mydata
# s= SimilarAreaDictionary
for i in range(len(SimilarAreaDictionary)):
SimilarAreaDictionary.loc[i, "R"] = colorRanges[i][0]
SimilarAreaDictionary.loc[i, "G"] = colorRanges[i][1]
SimilarAreaDictionary.loc[i, "B"] = colorRanges[i][2]
colorsUsed.append(colorRanges[i])
return SimilarAreaDictionary, colorsUsed , areas_Perimeters
#detect and draw and measure
def drawAllContours(dataDoc,img,number,finalColorArray,ratioarea,ratioperim , path,pdfpath):
green2=allpreSteps(img)
green22,imgoldnodashes=allpreSteps(img,num=1)
doc = fitz.open("pdf",dataDoc)
page = doc[0]
rotationOld=page.rotation
pix=page.get_pixmap()
if page.rotation!=0:
page.set_rotation(0)
ratio = pix.width/ img.shape[0]
else:
ratio = pix.width/ img.shape[1]
areasinImage=[]
imgArea1= img.copy()
imgPerimeter1=img.copy()
imgtransparent1=img.copy()
if number ==220:
SimilarAreaDictionary= pd.DataFrame(columns=['Guess','Color','Rounded','Width','Height','Occurences','Area','Total Area','Perimeter','Total Perimeter','Length','Total Length','Texts','R','G','B','Comments'])
# firstcolor=finalColorArray[0]
# counter=0
maskDone=img.copy()
for eachcolor in finalColorArray:
contourssP,contourssA,rgbcolor,invertedmask , perimeters , alltxts,imgtransparent1 , ArrthreshCnt ,allshapesExtremes_Text, green22Gry=findContoursFullImage(green22,imgoldnodashes,dataDoc,imgtransparent1,green2,maskDone,number,finalColorArray,ratioarea,ratioperim,eachcolor)
SimilarAreaDictionary, colorsUsed , areas_Perimeters= FillDictionary(SimilarAreaDictionary,maskDone,ratioarea,ratioperim,contourssP,contourssA,rgbcolor,eachcolor)
perimTotal=0
for contour in range(len(contourssP)):
shape=[]
area1 = cv2.contourArea(contourssA[contour][0])
if (area1 > 3500 ): #check perimeter kman fl condition -- 2800
perimeter=cv2.arcLength(contourssP[contour][0],True)
shape=[]
angleR,widthR ,heightR= StraightenImage(contourssP[contour][0],imgArea1)
x, y , width, height = cv2.boundingRect(contourssP[contour][0])
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
Blackmask = cv2.rectangle(Blackmask, (int(x-10),int(y-10)), (int(x+width+10),int(y+height+10)), (255, 255, 255), -1)
Blackmask=cv2.bitwise_and(green22Gry,green22Gry,mask=Blackmask)
BlackmaskCnt,_= cv2.findContours(Blackmask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in BlackmaskCnt]
if len(areas)>0:
max_index = np.argmax(areas)
blackcnt=BlackmaskCnt[max_index]
blackcnt=tuple(blackcnt)
texts=''
for th in range(len(ArrthreshCnt)):
for e in blackcnt:
if list(e[0]) in ArrthreshCnt[th]:
texts=allshapesExtremes_Text[th][1]
break
if len(texts)>0:
break
approxA = cv2.approxPolyDP(contourssA[contour][0], 0.0002* perimeter, True)
area1 = cv2.contourArea(approxA)
approx = cv2.approxPolyDP(contourssP[contour][0], 0.002 * perimeter, True) #0.0009
perimeter1 = cv2.arcLength(approx, True)
for point in approxA:
x1, y1 = point[0]
p1 = fitz.Point(x1*ratio,y1*ratio)
p1=p1*page.derotation_matrix
shape.append([p1[0],p1[1]])
if (angleR != 90.0 and angleR != -90.0 and angleR != 0.0 and angleR != -0.0 ): #inclined b ay degree
width=widthR
height=heightR
if width>height:
lengthShape = width
else:
lengthShape = height
widthMin= width-15
widthMax= width+15
heightMin=height-15
heightMax= height+15
if ratioarea !=0 and ratioperim!=0:
areaa=round(area1* ratioarea, 3) # true value of area of any shape/ area px value of same shape
perimeterr=round(perimeter1* ratioperim,3)
lengthShape=round(lengthShape* ratioperim,3)
else:
areaa=area1
perimeterr=perimeter1
areaPerimeterMin= round(areaa,1) - 0.4
areaPerimeterMax= round(areaa,1) + 0.4
masked=SimilarAreaDictionary.loc[SimilarAreaDictionary.index[((SimilarAreaDictionary['Rounded'] >=areaPerimeterMin) &(SimilarAreaDictionary['Rounded']<=areaPerimeterMax) )]]
passed=0
for i, row in masked.iterrows():
if passed ==0:
if SimilarAreaDictionary['Color'].loc[i] == [rgbcolor[0],rgbcolor[1],rgbcolor[2]] and ( SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax and SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin) :
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
if len(alltxts)>0:
SimilarAreaDictionary['Guess'].loc[i].append(str(alltxts[contour]))
for t in texts:
if "GB" not in t or "RC" not in t or "PC" not in t:
if t not in SimilarAreaDictionary['Texts'].loc[i]:
SimilarAreaDictionary['Texts'].loc[i]+=' '+t
SimilarAreaDictionary['Total Area'].loc[i]+=areaa
SimilarAreaDictionary['Area'].loc[i]=areaa
pFlagDF=0
color= (int(SimilarAreaDictionary['R'].loc[i])/255 , int(SimilarAreaDictionary['G'].loc[i])/255 , int(SimilarAreaDictionary['B'].loc[i])/255 )
for p in perimeters:
if p[2]==x and p[3]==y and p[5]=='open':
# if areaa >=5.15 and areaa<=5.25:
shapee=[]
SimilarAreaDictionary['Total Perimeter'].loc[i]+=round((p[4]-1)*ratioperim,3)
SimilarAreaDictionary['Perimeter'].loc[i]=round((p[4]-1)*ratioperim,3)
for poi in p[6]:
x1, y1 = poi[0]
p1 = fitz.Point(x1*ratio,y1*ratio)
p1=p1*page.derotation_matrix
shapee.append([p1[0],p1[1]])
annot11 = page.add_polyline_annot( points=shapee) # 'Polygon'
annot11.set_border(width=0.2, dashes=[3])
annot11.set_colors(stroke=color ,fill=None)
if len(alltxts)>0:
annot11.set_info(content='Perimeter='+str(round((p[4]-1)*ratioperim,3))+' m',subject='ADR Team',title=str(alltxts[contour]))
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
annot11.update()
pFlagDF=1
if pFlagDF==0:
annot1 = page.add_polyline_annot( points=shape) # 'Polygon'
annot1.set_border(width=0.2 ,dashes=[3])
annot1.set_colors(stroke=color , fill=None)
if len(alltxts)>0:
annot1.set_info(content='Perimeter='+str(perimeterr)+' m',subject='ADR Team',title=str(alltxts[contour]))
SimilarAreaDictionary['Total Perimeter'].loc[i]+=perimeterr
SimilarAreaDictionary['Perimeter'].loc[i]=perimeterr
SimilarAreaDictionary['Total Length'].loc[i]+=lengthShape
SimilarAreaDictionary['Length'].loc[i]=lengthShape
passed=1
cv2.drawContours(imgArea1, [contourssP[contour][0]], 0, ( int(SimilarAreaDictionary['B'].loc[i]), int(SimilarAreaDictionary['G'].loc[i]), int(SimilarAreaDictionary['R'].loc[i])), -1)
annot = page.add_polygon_annot( points=shape) # 'Polygon'
annot.set_border(width=0.2)
annot.set_colors(stroke=color, fill= color )
annot.set_opacity(0.5)
if len(alltxts)>0:
annot.set_info(content='Area='+str(areaa)+ " m²",subject='ADR Team',title=str(alltxts[contour]))
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
annot.update()
annot11 = page.add_polyline_annot( points=shape) # 'Polygon'
annot11.set_border(width=0.2, dashes=[3])
annot1.set_colors(stroke=color ,fill=None)
annot11.set_info(content=' Length='+str(lengthShape)+' m',subject='ADR Team',title=str(alltxts[contour]))
annot11.update()
# areasinImage.append(areaa)
cv2.putText(imgPerimeter1,'Perimeter: '+str(perimeterr)+ ' m', (x+70,y-30) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
cv2.putText(imgPerimeter1,'Area: '+str(areaa)+' m2', (x+50,y-40) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
# for i,row in SimilarAreaDictionary.iterrows():
# # print(row)
# if row[5] not in areasinImage: # column of area
# SimilarAreaDictionary = SimilarAreaDictionary.drop(SimilarAreaDictionary.loc[SimilarAreaDictionary.index==i].index)
# print(SimilarAreaDictionary)
# display(totaldf)
#########################
else:
SimilarAreaDictionary= pd.DataFrame(columns=['Guess','Color','Rounded','Width','Height','Occurences','Area','Total Area','Perimeter','Total Perimeter','Length','Total Length','Texts','R','G','B','Comments'])
contourssP,contourssA , perimeters , alltxts , imgtransparent1 , ArrthreshCnt ,allshapesExtremes_Text, green22Gry=findContoursFullImage(green22,imgoldnodashes,dataDoc,imgtransparent1,green2,img,number,finalColorArray,ratioarea,ratioperim)
SimilarAreaDictionary,colorsUsed , areas_Perimeters= FillDictionary(SimilarAreaDictionary,img,ratioarea,ratioperim,contourssP,contourssA)
for contour in range(len(contourssP)):
area1 = cv2.contourArea(contourssA[contour][0])
if (area1 >4000 ):
perimeter=cv2.arcLength(contourssP[contour][0],True)
shape=[]
angleR,widthR ,heightR= StraightenImage(contourssP[contour][0],imgArea1)
x, y , width, height = cv2.boundingRect(contourssP[contour][0])
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
Blackmask = cv2.rectangle(Blackmask, (int(x-10),int(y-10)), (int(x+width+10),int(y+height+10)), (255, 255, 255), -1)
Blackmask=cv2.bitwise_and(green22Gry,green22Gry,mask=Blackmask)
BlackmaskCnt,_= cv2.findContours(Blackmask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in BlackmaskCnt]
if len(areas)>0:
max_index = np.argmax(areas)
blackcnt=BlackmaskCnt[max_index]
blackcnt=tuple(blackcnt)
texts=''
# textsMid=''
for th in range(len(ArrthreshCnt)):
for e in blackcnt:
if list(e[0]) in ArrthreshCnt[th]:
texts=allshapesExtremes_Text[th][1]
# textsMid=allshapesExtremes_Text[th][2]
break
if len(texts)>0:
break
# print(texts)
approxA = cv2.approxPolyDP(contourssA[contour][0], 0.0002* perimeter, True)
area1 = cv2.contourArea(approxA)
approx = cv2.approxPolyDP(contourssP[contour][0], 0.002* perimeter, True) #0.0009
perimeter1 = cv2.arcLength(approx, True)
for point in approxA:
x1, y1 = point[0]
p1 = fitz.Point(x1*ratio,y1*ratio)
p1=p1*page.derotation_matrix
shape.append([p1[0],p1[1]])
if (angleR != 90.0 and angleR != -90.0 and angleR != 0.0 and angleR != -0.0 ): #inclined b ay degree
width=widthR
height=heightR
if width>height:
lengthShape = width
if height>width:
lengthShape = height
widthMin= width-15
widthMax= width+15
heightMin=height-15
heightMax= height+15
if ratioarea !=0 and ratioperim!=0:
areaa= round(area1* ratioarea,3) # true value of area of any shape/ area px value of same shape
perimeterr=round(perimeter1* ratioperim,3)
lengthShape=round(lengthShape* ratioperim,3)
else:
perimeterr=perimeter1
areaPerimeterMin= round(areaa,1) - 0.4
areaPerimeterMax= round(areaa,1) + 0.4
masked=SimilarAreaDictionary.loc[SimilarAreaDictionary.index[((SimilarAreaDictionary['Rounded'] >=areaPerimeterMin) & (SimilarAreaDictionary['Rounded']<=areaPerimeterMax) )]]
passed=0
for i, row in masked.iterrows():
if passed ==0:
if ( SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax and SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin) :
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
if len(alltxts)>0:
SimilarAreaDictionary['Guess'].loc[i].append(str(alltxts[contour]))
for t in texts:
if "GB" not in t or "RC" not in t or "PC" not in t:
if t not in SimilarAreaDictionary['Texts'].loc[i]:
SimilarAreaDictionary['Texts'].loc[i]+=' '+t
SimilarAreaDictionary['Total Area'].loc[i]+=areaa
SimilarAreaDictionary['Area'].loc[i]=areaa
pFlagDF=0
color= (int(SimilarAreaDictionary['R'].loc[i])/255 , int(SimilarAreaDictionary['G'].loc[i])/255 , int(SimilarAreaDictionary['B'].loc[i])/255 )
for p in perimeters:
if p[2]==x and p[3]==y and p[5]=='open':
# if areaa >=5.15 and areaa<=5.25:
shapee=[]
SimilarAreaDictionary['Total Perimeter'].loc[i]+=round((p[4]-1)*ratioperim,3)
SimilarAreaDictionary['Perimeter'].loc[i]=round((p[4]-1)*ratioperim,3)
for poi in p[6]:
x1, y1 = poi[0]
p1 = fitz.Point(x1*ratio,y1*ratio)
p1=p1*page.derotation_matrix
shapee.append([p1[0],p1[1]])
annot11 = page.add_polyline_annot( points=shapee) # 'Polygon'
annot11.set_border(width=0.2, dashes=[3])
annot1.set_colors(stroke=color ,fill=None)
if len(alltxts)>0:
annot11.set_info(content='Perimeter='+str(round((p[4]-1)*ratioperim,3))+' m',subject='ADR Team',title=str(alltxts[contour]))
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
annot11.update()
pFlagDF=1
if pFlagDF==0:
annot1 = page.add_polyline_annot( points=shape) # 'Polygon'
annot1.set_border(width=0.2 ,dashes=[3])
annot1.set_colors(stroke=color ,fill=None)
if len(alltxts)>0:
annot1.set_info(content='Perimeter='+str(perimeterr)+' m',subject='ADR Team',title=str(alltxts[contour]))
SimilarAreaDictionary['Total Perimeter'].loc[i]+=perimeterr
SimilarAreaDictionary['Perimeter'].loc[i]=perimeterr
SimilarAreaDictionary['Total Length'].loc[i]+=lengthShape
SimilarAreaDictionary['Length'].loc[i]=lengthShape
passed=1
cv2.drawContours(imgArea1, [contourssP[contour][0]], 0, ( int(SimilarAreaDictionary['B'].loc[i]), int(SimilarAreaDictionary['G'].loc[i]), int(SimilarAreaDictionary['R'].loc[i])), -1)
annot = page.add_polygon_annot( points=shape) # 'Polygon'
annot.set_border(width=0.2)
annot.set_colors(stroke=color, fill= color )
annot.set_opacity(0.5)
if len(alltxts)>0:
annot.set_info(content='Area='+str(areaa)+ " m²",subject='ADR Team',title=str(alltxts[contour]))
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
annot.update()
annot11 = page.add_polyline_annot( points=shapee) # 'Polygon'
annot11.set_border(width=0.2, dashes=[3])
annot1.set_colors(stroke=color ,fill=None)
annot11.set_info(content=' Length='+str(lengthShape)+' m',subject='ADR Team',title=str(alltxts[contour]))
annot11.update()
# cv2.putText(imgtransparent1,'Area: '+str(areaa) +' '+str(alltxts[contour])+' m2', (x+50,y-10) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
pFlag=0
# for p in perimeters:
# if p[2]==x and p[3]==y and p[5]=='open':
# # if areaa >=5.15 and areaa<=5.25:
# perimTotal+=round((p[4]-1)*ratioperim,3)
# cv2.putText(imgtransparent1,'Perimeter: '+str(round((p[4])*ratioperim,3))+ ' m', (p[0]+50,p[1]-40) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
# pFlag=1
# if pFlag==0:
# cv2.putText(imgtransparent1,'Perimeter: '+str(perimeterr)+' m', (x+50,y-40) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
for i, row in SimilarAreaDictionary.iterrows():
c = Counter( SimilarAreaDictionary['Guess'].loc[i])
if len(c) >0:
value, count = c.most_common()[0]
SimilarAreaDictionary['Guess'].loc[i]= value
else:
SimilarAreaDictionary['Guess'].loc[i]= 'none'
# cv2.circle (imgtransparent1, (img.shape[0],img.shape[0]1), 5, 255, 5)
alpha = 0.4 # Transparency factor.
image_new1 = cv2.addWeighted(imgArea1, alpha, imgtransparent1, 1 - alpha, 0)
if rotationOld==90:
image_new1 = cv2.rotate(image_new1, cv2.ROTATE_90_CLOCKWISE)
if rotationOld==180:
image_new1 = cv2.rotate(image_new1, cv2.ROTATE_180)
if rotationOld==270:
image_new1 = cv2.rotate(image_new1, cv2.ROTATE_90_COUNTERCLOCKWISE)
page.set_rotation(rotationOld)
dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/'
pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=path) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/'
dbxTeam=tsadropboxretrieval.ADR_Access_DropboxTeam('user')
md, res =dbxTeam.files_download(path= dbPath+path)
data = res.content
doc=fitz.open("pdf", data)
# list1=pd.DataFrame(columns=['content', 'creationDate', 'id', 'modDate', 'name', 'subject', 'title'])
list1=pd.DataFrame(columns=['content', 'id', 'subject'])
for page in doc:
for annot in page.annots():
list1.loc[len(list1)] =annot.info
gc,spreadsheet_service,spreadsheetId, spreadsheet_url , namepathArr=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary , path,pdfpath)
return imgPerimeter1,image_new1,SimilarAreaDictionary, colorsUsed , spreadsheet_url , spreadsheetId , list1 , pdflink , areas_Perimeters , namepathArr
######################################################
def deletemarkups(list1, dbPath , path):
'''list1 : original markup pdf
list2 : deleted markup pdf
deletedrows : deleted markups - difference betw both dfs
'''
myDict1=eval(list1)
list1=pd.DataFrame(myDict1)
areastodelete = []
perimstodelete=[]
dbxTeam=tsadropboxretrieval.ADR_Access_DropboxTeam('user')
# print('pathhhhh',dbPath+path)
md, res =dbxTeam.files_download(path= dbPath+path)
data = res.content
doc=fitz.open("pdf", data)
list2=pd.DataFrame(columns=['content', 'id', 'subject'])
# list2=pd.DataFrame(columns=['content', 'creationDate', 'id', 'modDate', 'name', 'subject', 'title'])
for page in doc:
for annot in page.annots():
list2.loc[len(list2)] =annot.info
# print(list1)
deletedrows=pd.concat([list1,list2]).drop_duplicates(keep=False)
print(deletedrows,len(deletedrows))
flag=0
if len(deletedrows)!=0:
flag=1
deletedrows=deletedrows[['content', 'id', 'subject']]
deletedrows = deletedrows.drop(deletedrows.index[deletedrows['content'].str.startswith('Scale')] )#, inplace=True)
else:
flag=0
return deletedrows
#######################################################