Spaces:
Sleeping
Sleeping
Create pilecaps_adr.py
Browse files- pilecaps_adr.py +1159 -0
pilecaps_adr.py
ADDED
|
@@ -0,0 +1,1159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Copy of XOR- ROI from plan-PileCaps-ADR.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colaboratory.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/16RHtRae7VU_fqHMAjOUL4ET5slEFo3pf
|
| 8 |
+
"""
|
| 9 |
+
import numpy as np
|
| 10 |
+
import cv2
|
| 11 |
+
from matplotlib import pyplot as plt
|
| 12 |
+
from math import sin, cos, radians
|
| 13 |
+
import pandas as pd
|
| 14 |
+
from PIL import Image , ImageChops
|
| 15 |
+
import numpy as np
|
| 16 |
+
from googleapiclient.discovery import build
|
| 17 |
+
from google.oauth2 import service_account
|
| 18 |
+
import pygsheets
|
| 19 |
+
import re
|
| 20 |
+
import fitz
|
| 21 |
+
import db
|
| 22 |
+
import ast
|
| 23 |
+
import Dropbox_TSA_API
|
| 24 |
+
import tsadropboxretrieval
|
| 25 |
+
from collections import Counter
|
| 26 |
+
from unidecode import unidecode
|
| 27 |
+
import google_sheet_Legend
|
| 28 |
+
|
| 29 |
+
def textLists(img,dataDoc):
|
| 30 |
+
allTexts = texts_from_pdf(dataDoc)
|
| 31 |
+
doc = fitz.open('pdf',dataDoc)
|
| 32 |
+
page=doc[0]
|
| 33 |
+
if page.rotation!=0:
|
| 34 |
+
page.set_rotation(0)
|
| 35 |
+
pix = page.get_pixmap() # render page to an image
|
| 36 |
+
ratio = pix.width/ img.shape[1]
|
| 37 |
+
listall=[]
|
| 38 |
+
pc_coor = []
|
| 39 |
+
for tpl in allTexts:
|
| 40 |
+
if "GB" in tpl[4] or "RC" in tpl[4] or "PC" in tpl[4]:
|
| 41 |
+
p1 = fitz.Point((tpl[2]/ratio),(tpl[3]/ratio))
|
| 42 |
+
if page.rotation==0:
|
| 43 |
+
p1=p1*page.derotation_matrix
|
| 44 |
+
pc_coor.append((p1[0],p1[1]))
|
| 45 |
+
listall.append((p1[0],p1[1],tpl[4]))
|
| 46 |
+
return pc_coor, listall
|
| 47 |
+
|
| 48 |
+
def textListsAlltexts(dataDoc,span_df):
|
| 49 |
+
listall=[]
|
| 50 |
+
pc_coor = []
|
| 51 |
+
allTexts = texts_from_pdf(dataDoc)
|
| 52 |
+
doc = fitz.open('pdf',dataDoc)
|
| 53 |
+
page=doc[0]
|
| 54 |
+
for i, row in span_df.iterrows():
|
| 55 |
+
p1 = fitz.Point((span_df['xmin'].loc[i]),(span_df['ymin'].loc[i]))
|
| 56 |
+
if page.rotation==0:
|
| 57 |
+
p1=p1*page.derotation_matrix
|
| 58 |
+
pc_coor.append((p1[0],p1[1]))
|
| 59 |
+
listall.append((p1[0],p1[1],span_df['text'].loc[i]))
|
| 60 |
+
return pc_coor, listall
|
| 61 |
+
# pc_coor,listall=textLists(img)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
#Prepare preprocessing
|
| 65 |
+
def detectCircles(imgOriginal ):
|
| 66 |
+
im=imgOriginal.copy()
|
| 67 |
+
imgGry1 = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
|
| 68 |
+
kernel=np.ones((3,3),np.uint8)
|
| 69 |
+
er1=cv2.erode(imgGry1,kernel, iterations=2)
|
| 70 |
+
er1=cv2.dilate(er1,kernel, iterations=2)
|
| 71 |
+
# cv2_imshow(er1)
|
| 72 |
+
# Apply Hough transform on the blurred image.
|
| 73 |
+
# min distance between circles, Upper threshold for the internal Canny edge detector.
|
| 74 |
+
detected_circles = cv2.HoughCircles( er1, cv2.HOUGH_GRADIENT, 1, 50, param1= 700,
|
| 75 |
+
param2 =21, minRadius = 20, maxRadius = 50) #18 param2
|
| 76 |
+
|
| 77 |
+
# Draw circles that are detected.
|
| 78 |
+
if detected_circles is not None:
|
| 79 |
+
# Convert the circle parameters a, b and r to integers.
|
| 80 |
+
detected_circles = np.uint16(np.around(detected_circles))
|
| 81 |
+
detected_circles = np.round(detected_circles[0, :]).astype("int")
|
| 82 |
+
for (x, y, r) in detected_circles:
|
| 83 |
+
cv2.circle(im, (x, y), r, (255, 255, 255), 6)
|
| 84 |
+
return im
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def detectSmallCircles(img ):
|
| 89 |
+
#Remove tiny TOC points that interfere with shapes
|
| 90 |
+
im=img.copy()
|
| 91 |
+
imgGry1 = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
|
| 92 |
+
kernel=np.ones((3,3),np.uint8)
|
| 93 |
+
er1=cv2.erode(imgGry1,kernel, iterations=1)
|
| 94 |
+
# Apply Hough transform on the blurred image.
|
| 95 |
+
# min distance between circles, Upper threshold for the internal Canny edge detector.
|
| 96 |
+
detected_circles = cv2.HoughCircles( imgGry1, cv2.HOUGH_GRADIENT, 1, 60, param1 =550,
|
| 97 |
+
param2 =13, minRadius = 1, maxRadius = 15) #18 param2
|
| 98 |
+
|
| 99 |
+
# Draw circles that are detected.
|
| 100 |
+
if detected_circles is not None:
|
| 101 |
+
# Convert the circle parameters a, b and r to integers.
|
| 102 |
+
detected_circles = np.uint16(np.around(detected_circles))
|
| 103 |
+
detected_circles = np.round(detected_circles[0, :]).astype("int")
|
| 104 |
+
#DRAW CIRCLES
|
| 105 |
+
for (x, y, r) in detected_circles:
|
| 106 |
+
cv2.circle(im, (x, y), r+3, (255, 255, 255), -1)
|
| 107 |
+
return im
|
| 108 |
+
|
| 109 |
+
def DashedPreprocessing(imgOriginal,imgnoSmall):
|
| 110 |
+
h,w=imgOriginal.shape[0:2]
|
| 111 |
+
#remove the gray contours from the plan
|
| 112 |
+
imgBW=cv2.threshold(imgnoSmall, 180, 255, cv2.THRESH_BINARY)[1]
|
| 113 |
+
|
| 114 |
+
im_copy=imgBW.copy()
|
| 115 |
+
im_copy1=im_copy
|
| 116 |
+
kernel1 = np.ones((3,5),np.uint8)
|
| 117 |
+
kernel2 = np.ones((9,9),np.uint8)
|
| 118 |
+
kernel3= np.ones((3,3),np.uint8)
|
| 119 |
+
imgGray=cv2.cvtColor(imgBW,cv2.COLOR_BGR2GRAY)
|
| 120 |
+
imgBW1=cv2.threshold(imgGray, 200, 255, cv2.THRESH_BINARY_INV)[1]
|
| 121 |
+
|
| 122 |
+
img1=cv2.erode(imgBW1, kernel1, iterations=1)
|
| 123 |
+
img2=cv2.dilate(img1, kernel2, iterations=3)
|
| 124 |
+
img3 = cv2.bitwise_and(imgBW1,img2)
|
| 125 |
+
img3= cv2.bitwise_not(img3)
|
| 126 |
+
img4 = cv2.bitwise_and(imgBW1,imgBW1,mask=img3)
|
| 127 |
+
img4=cv2.blur(img4,(7,7))
|
| 128 |
+
if h > w :
|
| 129 |
+
max = h
|
| 130 |
+
min = w
|
| 131 |
+
else:
|
| 132 |
+
max = w
|
| 133 |
+
min = h
|
| 134 |
+
return img4, imgBW, max,min
|
| 135 |
+
|
| 136 |
+
def removeDashedLines(img4, imgBW ,max,min):
|
| 137 |
+
imgLines= cv2.HoughLinesP(img4,1,np.pi/310,30,minLineLength=(max-min)//1.8,maxLineGap = 120) #was w-h , gap=150 0.99
|
| 138 |
+
for i in range(len(imgLines)):
|
| 139 |
+
for x1,y1,x2,y2 in imgLines[i]:
|
| 140 |
+
cv2.line(imgBW,(x1,y1),(x2,y2),(0,255,0),3)
|
| 141 |
+
im_copy=imgBW.copy()
|
| 142 |
+
green=im_copy[:,:,1]
|
| 143 |
+
return green
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def removeSmallDashes(imgOriginal,green,num=0):
|
| 147 |
+
smalldashes=green.copy()
|
| 148 |
+
smalldashes=cv2.bitwise_not(smalldashes)
|
| 149 |
+
|
| 150 |
+
kernel3= np.ones((3,3),np.uint8)
|
| 151 |
+
|
| 152 |
+
img1=cv2.dilate(smalldashes, kernel3, iterations=2)
|
| 153 |
+
img2=cv2.erode(img1, kernel3, iterations=2)
|
| 154 |
+
|
| 155 |
+
smalldashes=cv2.medianBlur(img2,7)
|
| 156 |
+
smalldashes=cv2.medianBlur(smalldashes,9)
|
| 157 |
+
|
| 158 |
+
smalldashesOut=green.copy()
|
| 159 |
+
|
| 160 |
+
# if num==1:
|
| 161 |
+
# smalldashes=cv2.cvtColor(smalldashes,cv2.COLOR_GRAY2BGR)
|
| 162 |
+
# smalldashes=detectSmallCircles(smalldashes)
|
| 163 |
+
# smalldashes=cv2.cvtColor(smalldashes,cv2.COLOR_BGR2GRAY)
|
| 164 |
+
smalldashesOut=cv2.cvtColor(smalldashesOut,cv2.COLOR_GRAY2BGR)
|
| 165 |
+
imgLines= cv2.HoughLinesP(smalldashes,1,np.pi/180,27,minLineLength=70,maxLineGap = 70) #was w-h , gap=150
|
| 166 |
+
|
| 167 |
+
imgCopy=imgOriginal.copy()
|
| 168 |
+
for i in range(len(imgLines)):
|
| 169 |
+
for x1,y1,x2,y2 in imgLines[i]:
|
| 170 |
+
cv2.line(smalldashesOut,(x1,y1),(x2,y2),(0,255,0),3)
|
| 171 |
+
cv2.line(imgCopy,(x1,y1),(x2,y2),(0,255,0),2)
|
| 172 |
+
imgCopy=imgCopy[:,:,1]
|
| 173 |
+
smalldashesOut=smalldashesOut[:,:,1]
|
| 174 |
+
return imgCopy,smalldashesOut
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def euclidian_distance(point1, point2):
|
| 178 |
+
return sum([(point1[x] - point2[x]) ** 2 for x in range(len(point1))]) ** 0.5
|
| 179 |
+
|
| 180 |
+
def removeDashedLinesSmall(img4, imgBW ,max,min):
|
| 181 |
+
imgBW=cv2.cvtColor(imgBW,cv2.COLOR_GRAY2BGR)
|
| 182 |
+
imgLines= cv2.HoughLinesP(img4,1,np.pi/100,20,minLineLength=(max-min)//3.5,maxLineGap = 70) #2.1
|
| 183 |
+
for i in range(len(imgLines)):
|
| 184 |
+
for x1,y1,x2,y2 in imgLines[i]:
|
| 185 |
+
dist=euclidian_distance((x1,y1), (x2,y2))
|
| 186 |
+
if dist >= (max-min)//2.3 and dist < (max-min)//1.9: #1.4
|
| 187 |
+
cv2.line(imgBW,(x1,y1),(x2,y2),(0,255,0),3)
|
| 188 |
+
im_copy=imgBW.copy()
|
| 189 |
+
green=im_copy[:,:,1]
|
| 190 |
+
return green
|
| 191 |
+
|
| 192 |
+
def ConnectBeamLines(smalldashesOut, maxLineGap=0):
|
| 193 |
+
if maxLineGap==0:
|
| 194 |
+
maxLineGap=25
|
| 195 |
+
thresh=20
|
| 196 |
+
point=0.3
|
| 197 |
+
else:
|
| 198 |
+
thresh=20
|
| 199 |
+
point=0.2
|
| 200 |
+
maxLineGap=40
|
| 201 |
+
|
| 202 |
+
green1=cv2.bitwise_not(smalldashesOut)
|
| 203 |
+
smalldashesOut=cv2.cvtColor(smalldashesOut,cv2.COLOR_GRAY2BGR)
|
| 204 |
+
imgLines= cv2.HoughLinesP(green1,point,np.pi/180,thresh,minLineLength=25,maxLineGap =maxLineGap) #try 180
|
| 205 |
+
for i in range(len(imgLines)):
|
| 206 |
+
for x1,y1,x2,y2 in imgLines[i]:
|
| 207 |
+
cv2.line(smalldashesOut,(x1,y1),(x2,y2),(0,0,0),2)
|
| 208 |
+
return smalldashesOut
|
| 209 |
+
|
| 210 |
+
def getImgDark(imgg):
|
| 211 |
+
imgold=preprocessold(imgg,0)
|
| 212 |
+
blurG = cv2.GaussianBlur(ChangeBrightness(imgg,1),(3,3),0 )
|
| 213 |
+
imgGry = cv2.cvtColor(blurG, cv2.COLOR_BGR2GRAY)
|
| 214 |
+
ret3, thresh = cv2.threshold(imgGry, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
|
| 215 |
+
imgold=cv2.medianBlur(imgold,3)
|
| 216 |
+
thresh=cv2.bitwise_or(thresh,imgold)
|
| 217 |
+
imgDark=cv2.bitwise_not(thresh)
|
| 218 |
+
imgDark = cv2.cvtColor(imgDark, cv2.COLOR_GRAY2BGR)
|
| 219 |
+
return imgDark
|
| 220 |
+
|
| 221 |
+
#create img with solid lines
|
| 222 |
+
def allpreSteps(imgOriginal,num=0):
|
| 223 |
+
noCircles=detectCircles(imgOriginal)
|
| 224 |
+
imgold=preprocessold(imgOriginal,0)
|
| 225 |
+
if num!=1:
|
| 226 |
+
imgnoSmall=detectSmallCircles(noCircles )
|
| 227 |
+
img4,imgBW,max,min=DashedPreprocessing(imgOriginal,imgnoSmall)
|
| 228 |
+
green=removeDashedLines(img4,imgBW,max,min)
|
| 229 |
+
imgCopy,smalldashesOut=removeSmallDashes(imgOriginal,green)
|
| 230 |
+
noSmallDashes=removeDashedLinesSmall(img4, smalldashesOut ,max,min)
|
| 231 |
+
green2=ConnectBeamLines(noSmallDashes,0)
|
| 232 |
+
|
| 233 |
+
return green2
|
| 234 |
+
else:
|
| 235 |
+
#imgDark with no dashed lines or small dashes
|
| 236 |
+
|
| 237 |
+
imgDark1=getImgDark(noCircles)
|
| 238 |
+
|
| 239 |
+
img4,imgBW,max,min=DashedPreprocessing(imgOriginal,noCircles)
|
| 240 |
+
imgDarkNoDashedLines=removeDashedLines(img4,imgDark1,max,min) #do preprocessing on normal img , and draw on darkimg
|
| 241 |
+
imgBW0 = cv2.cvtColor(imgBW, cv2.COLOR_BGR2GRAY)
|
| 242 |
+
imgDarkNoDashedLines = cv2.cvtColor(imgDarkNoDashedLines, cv2.COLOR_GRAY2BGR)
|
| 243 |
+
imgDarkNoDashedLines,smalldashesOut0=removeSmallDashes(imgDarkNoDashedLines,imgBW0)
|
| 244 |
+
|
| 245 |
+
#imgOld no small dashes - for oring purposes
|
| 246 |
+
imgoldG = cv2.cvtColor(imgold, cv2.COLOR_GRAY2BGR)
|
| 247 |
+
imgoldnoDashes,_=removeSmallDashes(cv2.bitwise_not(imgoldG),imgBW0,1)
|
| 248 |
+
|
| 249 |
+
#to connect leader lines after removing dashed lines and circles
|
| 250 |
+
Nodashedlines=removeDashedLines(img4,imgBW,max,min)
|
| 251 |
+
imgCopy,smalldashesOut=removeSmallDashes(imgOriginal,Nodashedlines)
|
| 252 |
+
|
| 253 |
+
noSmallDashes=removeDashedLinesSmall(img4, smalldashesOut ,max,min)
|
| 254 |
+
# green2=ConnectBeamLines(noSmallDashes,1)
|
| 255 |
+
|
| 256 |
+
# green2 = cv2.cvtColor(green2, cv2.COLOR_BGR2GRAY)
|
| 257 |
+
green2=cv2.bitwise_or(cv2.bitwise_not(imgDarkNoDashedLines),cv2.bitwise_not(noSmallDashes) )
|
| 258 |
+
green2=cv2.bitwise_not(green2)
|
| 259 |
+
# cv2_imshow(green2)
|
| 260 |
+
green2=cv2.medianBlur(green2,5)
|
| 261 |
+
# cv2_imshow(green2)
|
| 262 |
+
imgoldnoDashes=cv2.medianBlur(imgoldnoDashes,5)
|
| 263 |
+
|
| 264 |
+
return green2 , cv2.bitwise_not(imgoldnoDashes)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def texts_from_pdf(input_pdf):
|
| 268 |
+
pdf_document = fitz.open('pdf',input_pdf)
|
| 269 |
+
for page_num in range(pdf_document.page_count):
|
| 270 |
+
page = pdf_document[page_num]
|
| 271 |
+
text_instances = page.get_text("words")
|
| 272 |
+
page.apply_redactions()
|
| 273 |
+
return text_instances
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def textDictionaryBlocks(img,dataDoc):
|
| 277 |
+
doc = fitz.open('pdf',dataDoc)
|
| 278 |
+
page=doc[0]
|
| 279 |
+
if page.rotation!=0:
|
| 280 |
+
page.set_rotation(0)
|
| 281 |
+
pix = page.get_pixmap() # render page to an image
|
| 282 |
+
ratio = pix.width/ img.shape[1]
|
| 283 |
+
|
| 284 |
+
block_dict = {}
|
| 285 |
+
page_num = 1
|
| 286 |
+
for page in doc: # Iterate all pages in the document
|
| 287 |
+
file_dict = page.get_text('dict') # Get the page dictionary
|
| 288 |
+
block = file_dict['blocks'] # Get the block information
|
| 289 |
+
block_dict[page_num] = block # Store in block dictionary
|
| 290 |
+
page_num += 1 # Increase the page value by 1
|
| 291 |
+
spans = pd.DataFrame(columns=['xmin', 'ymin', 'xmax', 'ymax', 'text','FitzPointP0','FitzPointP1'])
|
| 292 |
+
rows = []
|
| 293 |
+
for page_num, blocks in block_dict.items():
|
| 294 |
+
for block in blocks:
|
| 295 |
+
if block['type'] == 0:
|
| 296 |
+
for line in block['lines']:
|
| 297 |
+
for span in line['spans']:
|
| 298 |
+
xmin, ymin, xmax, ymax = list(span['bbox'])
|
| 299 |
+
text = unidecode(span['text'])
|
| 300 |
+
XminRatio=xmin/ratio
|
| 301 |
+
YminRatio=ymin/ratio
|
| 302 |
+
p1=fitz.Point((XminRatio),(YminRatio))
|
| 303 |
+
if page.rotation==0:
|
| 304 |
+
p1=p1*page.derotation_matrix
|
| 305 |
+
if text.replace(" ","") != "":
|
| 306 |
+
rows.append((XminRatio,YminRatio, xmax /ratio, ymax/ratio, text,p1[0],p1[1]))
|
| 307 |
+
span_df = pd.DataFrame(rows, columns=['xmin','ymin','xmax','ymax', 'text','FitzPointP0','FitzPointP1'])
|
| 308 |
+
return span_df
|
| 309 |
+
|
| 310 |
+
def nearestText(a,b,span_df):
|
| 311 |
+
allNearbyText=[]
|
| 312 |
+
shapeTexts=[]
|
| 313 |
+
for i, row in span_df.iterrows():
|
| 314 |
+
measuredDist=euclidian_distance((a,b),(span_df['FitzPointP0'].loc[i],span_df['FitzPointP1'].loc[i]))
|
| 315 |
+
if measuredDist < 250:
|
| 316 |
+
allNearbyText.append((span_df['FitzPointP0'].loc[i],span_df['FitzPointP1'].loc[i] ))
|
| 317 |
+
shapeTexts.append(str(span_df['text'].loc[i]))
|
| 318 |
+
if len(allNearbyText)==0:
|
| 319 |
+
allNearbyText='none'
|
| 320 |
+
return allNearbyText , shapeTexts
|
| 321 |
+
|
| 322 |
+
def nearestTextPCCOOR(a,b , pc_coor):
|
| 323 |
+
nearest_point=min(pc_coor,key=lambda x:euclidian_distance((a,b),x))
|
| 324 |
+
dist = euclidian_distance(nearest_point, (a,b))
|
| 325 |
+
if dist < 400: #distance threshold
|
| 326 |
+
return nearest_point
|
| 327 |
+
else:
|
| 328 |
+
return 'none'
|
| 329 |
+
|
| 330 |
+
def ChangeBrightness(img,k):
|
| 331 |
+
imgdarker = 255 * (img/255)**k # k>1 darker , k <1 lighter
|
| 332 |
+
imgdarker = imgdarker.astype('uint8')
|
| 333 |
+
return imgdarker
|
| 334 |
+
|
| 335 |
+
def getOutlinesDotIN(img):
|
| 336 |
+
cc=detectSmallCircles(img)
|
| 337 |
+
cc = cv2.cvtColor(cc, cv2.COLOR_BGR2GRAY)
|
| 338 |
+
kernel=np.ones((3,3),np.uint8)
|
| 339 |
+
er1=cv2.dilate(cc,kernel, iterations=2) #thinning
|
| 340 |
+
blurG = ChangeBrightness(er1,10)
|
| 341 |
+
ret3, thresh = cv2.threshold(blurG, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
|
| 342 |
+
threshCnt, threshHier2 = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 343 |
+
outlinesDotIN = np.zeros(img.shape[:2], dtype="uint8")
|
| 344 |
+
for cnt in threshCnt:
|
| 345 |
+
area1 = cv2.contourArea(cnt)
|
| 346 |
+
if (area1 > 2000 ):
|
| 347 |
+
cv2.drawContours(outlinesDotIN,[cnt],0,(255,255,255),2)
|
| 348 |
+
return outlinesDotIN
|
| 349 |
+
|
| 350 |
+
def connectsmallDot(blackwithNoDot):
|
| 351 |
+
imgLines= cv2.HoughLinesP(blackwithNoDot,0.05,np.pi/180,8,minLineLength=30,maxLineGap = 30) #was w-h , gap=150 #50
|
| 352 |
+
for i in range(len(imgLines)):
|
| 353 |
+
for x1,y1,x2,y2 in imgLines[i]:
|
| 354 |
+
cv2.line(blackwithNoDot,(x1,y1),(x2,y2),(255,255,255),2)
|
| 355 |
+
return blackwithNoDot
|
| 356 |
+
|
| 357 |
+
def getDiff(img,green22,imgoldnodashes):
|
| 358 |
+
# green22 , imgoldnoDashes= allpreSteps(img,1)
|
| 359 |
+
imgoldnoDashes1=cv2.medianBlur(imgoldnodashes,7)
|
| 360 |
+
kernel=np.ones((3,3),np.uint8)
|
| 361 |
+
green3=cv2.erode(green22,kernel, iterations=6)
|
| 362 |
+
green3=cv2.dilate(green3,kernel, iterations=3)
|
| 363 |
+
imgoldnoDashes1=cv2.erode(imgoldnoDashes1,kernel, iterations=2)
|
| 364 |
+
img1Eroded=cv2.dilate(imgoldnoDashes1,kernel, iterations=7) #5
|
| 365 |
+
diff = ImageChops.difference(Image.fromarray(img1Eroded), Image.fromarray(cv2.bitwise_not(green3)))
|
| 366 |
+
diff=np.array(diff)
|
| 367 |
+
diff=cv2.erode(diff,kernel, iterations=4)
|
| 368 |
+
diff=cv2.dilate(diff,kernel, iterations=11)
|
| 369 |
+
return diff
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
#OLD method (White shapes)
|
| 373 |
+
def preprocessold(img,number):
|
| 374 |
+
blurG = cv2.GaussianBlur(ChangeBrightness(img,8),(3,3),0 )
|
| 375 |
+
# cv2.imwrite('imgold.png', blurG)
|
| 376 |
+
imgGry = cv2.cvtColor(blurG, cv2.COLOR_BGR2GRAY)
|
| 377 |
+
kernel=np.ones((3,3),np.uint8)
|
| 378 |
+
er1=cv2.dilate(imgGry,kernel, iterations=2) #thinning
|
| 379 |
+
if number == 0:
|
| 380 |
+
ret3, thresh = cv2.threshold(er1, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
|
| 381 |
+
else:
|
| 382 |
+
ret3, thresh = cv2.threshold(er1, 220, 255, cv2.THRESH_BINARY_INV) #`140 - 141
|
| 383 |
+
thresh=cv2.medianBlur(thresh,5)
|
| 384 |
+
# thresh=cv2.dilate(thresh,kernel, iterations=1) #thinning
|
| 385 |
+
return thresh
|
| 386 |
+
|
| 387 |
+
#preprocessing for shapes with arrows (attach them to shape )
|
| 388 |
+
def getTextfromImg(grayimgtextdilated, img,dataDoc):
|
| 389 |
+
span_df=textDictionaryBlocks(img,dataDoc)
|
| 390 |
+
threshCnt2, threshHier2 = cv2.findContours(grayimgtextdilated, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 391 |
+
allshapesExtremes_Text=[]
|
| 392 |
+
listallTexts=textListsAlltexts(dataDoc,span_df)[1]
|
| 393 |
+
for cnt in threshCnt2:
|
| 394 |
+
texts=[]
|
| 395 |
+
area1 = cv2.contourArea(cnt)
|
| 396 |
+
if (area1 >2000 ):
|
| 397 |
+
x, y , width, height = cv2.boundingRect(cnt)
|
| 398 |
+
perimeter=cv2.arcLength(cnt,True)
|
| 399 |
+
approx = cv2.approxPolyDP(cnt, 0.01* perimeter, True)
|
| 400 |
+
for extremePoint in approx:
|
| 401 |
+
allnearbyPoints,alltxts=nearestText(int(extremePoint[0][0]),int(extremePoint[0][1]),span_df)
|
| 402 |
+
if(allnearbyPoints!='none'):
|
| 403 |
+
for nearbypoint in allnearbyPoints:
|
| 404 |
+
for textTuple in listallTexts:
|
| 405 |
+
if nearbypoint[0]==textTuple[0] and nearbypoint[1]==textTuple[1]:
|
| 406 |
+
if textTuple[2] not in texts:
|
| 407 |
+
texts.append(textTuple[2])
|
| 408 |
+
allshapesExtremes_Text.append([cnt,texts])
|
| 409 |
+
# print(allshapesExtremes_Text)
|
| 410 |
+
ArrthreshCnt=[]
|
| 411 |
+
for th in range(len(allshapesExtremes_Text)):
|
| 412 |
+
eachcnt=[]
|
| 413 |
+
for point in allshapesExtremes_Text[th][0]:
|
| 414 |
+
eachcnt.append(list(point[0]))
|
| 415 |
+
ArrthreshCnt.append(eachcnt)
|
| 416 |
+
return ArrthreshCnt , allshapesExtremes_Text
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def mergingPreprocessing(img,number,green2,layeredflag,BlackmaskDetected1=0):
|
| 420 |
+
# diff , imgoldnodashes=getDiff(img)#diff (img with tof leaders)
|
| 421 |
+
img1=preprocessold(img,number)
|
| 422 |
+
|
| 423 |
+
# green2Gray = cv2.cvtColor(green2, cv2.COLOR_BGR2GRAY)
|
| 424 |
+
# anding=cv2.bitwise_and(green2Gray,img1) #and between old and green2 to get perfect shapes with no outer lines
|
| 425 |
+
# ret3, thresh2 = cv2.threshold(anding, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 426 |
+
# if layeredflag.startswith('layer'):
|
| 427 |
+
# thresh2=cv2.bitwise_and(thresh2,thresh2,mask=BlackmaskDetected1)
|
| 428 |
+
threshcontours, threshHier = cv2.findContours(img1, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 429 |
+
|
| 430 |
+
return img1 , threshcontours , img1
|
| 431 |
+
|
| 432 |
+
#anding of old method output with solid lines img
|
| 433 |
+
def preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,img,number,green2,layeredflag,BlackmaskDetected1=0):
|
| 434 |
+
#first preprocessing ( old method - black img with white shapes)
|
| 435 |
+
kernel0=np.ones((2,2),np.uint8)
|
| 436 |
+
##first preprocessing ( old method - black img with white shapes)
|
| 437 |
+
img1,threshcontours,thresh2=mergingPreprocessing(img,number,green2,layeredflag,BlackmaskDetected1)
|
| 438 |
+
diff =getDiff(img,green22,imgoldnodashes)#diff (img with tof leaders)
|
| 439 |
+
iddk=cv2.bitwise_or(thresh2,diff) #add it to preprocessing img of anding
|
| 440 |
+
iddk=cv2.medianBlur(iddk,5)
|
| 441 |
+
iddk=cv2.dilate(iddk,kernel0, iterations=2)
|
| 442 |
+
ArrthreshCnt , texts=getTextfromImg(iddk,img,dataDoc) #getText relations between each contour and its text
|
| 443 |
+
outlinesDotIN=getOutlinesDotIN(img)
|
| 444 |
+
pc_coor,listall=textLists(img,dataDoc)
|
| 445 |
+
finalcntsP=[]
|
| 446 |
+
finalcntsA=[]
|
| 447 |
+
perimeters=[]
|
| 448 |
+
openClosedFlag=0
|
| 449 |
+
threshCnt, threshHier2 = cv2.findContours(img1, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 450 |
+
outlines = np.zeros(img.shape[:2], dtype="uint8")
|
| 451 |
+
for cnt in threshCnt:
|
| 452 |
+
area1 = cv2.contourArea(cnt)
|
| 453 |
+
if (area1 > 2000 ):
|
| 454 |
+
cv2.drawContours(outlines,[cnt],0,(255,255,255),2)
|
| 455 |
+
perimeter=0
|
| 456 |
+
shapetxts=[]
|
| 457 |
+
for cnt in threshcontours:
|
| 458 |
+
BlackmaskP = np.zeros(img.shape[:2], dtype="uint8")
|
| 459 |
+
BlackmaskA=np.zeros(img.shape[:2], dtype="uint8")
|
| 460 |
+
area1 = cv2.contourArea(cnt)
|
| 461 |
+
if (area1 > 2000 ):
|
| 462 |
+
x, y , width, height = cv2.boundingRect(cnt)
|
| 463 |
+
#Get contours - Areas , Perimeters
|
| 464 |
+
kernel=np.ones((2,2),np.uint8)
|
| 465 |
+
|
| 466 |
+
cv2.drawContours(BlackmaskP,[cnt],0,(255,255,255), 4)
|
| 467 |
+
BlackmaskP=cv2.dilate(BlackmaskP,kernel, iterations=2)
|
| 468 |
+
contoursP, hier1 = cv2.findContours(BlackmaskP, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 469 |
+
finalcntsP.append(contoursP)
|
| 470 |
+
|
| 471 |
+
cv2.drawContours(BlackmaskA,[cnt],0,(255,255,255), 3)
|
| 472 |
+
BlackmaskA=cv2.erode(BlackmaskA,kernel, iterations=1)
|
| 473 |
+
contoursA, hier1 = cv2.findContours(BlackmaskA, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 474 |
+
finalcntsA.append(contoursA)
|
| 475 |
+
if pc_coor:
|
| 476 |
+
textPoint=nearestTextPCCOOR(int(x+(width/2)), int(y+(height/2)) , pc_coor)
|
| 477 |
+
txt=''
|
| 478 |
+
if(textPoint!='none'):
|
| 479 |
+
for textTuple in listall:
|
| 480 |
+
if textPoint[0]==textTuple[0] and textPoint[1]==textTuple[1]:
|
| 481 |
+
txt=textTuple[2]
|
| 482 |
+
if "GB" in txt or "RC" in txt or "PC" in txt:
|
| 483 |
+
shapetxts.append(txt)
|
| 484 |
+
elif textPoint=='none':
|
| 485 |
+
shapetxts.append('none')
|
| 486 |
+
|
| 487 |
+
if 'GB' in shapetxts:
|
| 488 |
+
xcBlk, ycBlk , width, height = cv2.boundingRect(contoursP[0])
|
| 489 |
+
xx=cv2.bitwise_and(outlines,outlines,mask=BlackmaskP)
|
| 490 |
+
xx = cv2.threshold(xx, 250, 255, cv2.THRESH_BINARY)[1]
|
| 491 |
+
cntsx,hierx= cv2.findContours(xx, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
|
| 492 |
+
if len(cntsx)>0:
|
| 493 |
+
hierx=hierx[0]
|
| 494 |
+
xx=cv2.bitwise_and(outlinesDotIN,outlinesDotIN,mask=xx)
|
| 495 |
+
cntsx,hierx= cv2.findContours(xx, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
|
| 496 |
+
if len(cntsx)>0:
|
| 497 |
+
xx=connectsmallDot(xx)
|
| 498 |
+
cntsx,hierx= cv2.findContours(xx, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
|
| 499 |
+
if len(cntsx)>0:
|
| 500 |
+
hierx=hierx[0]
|
| 501 |
+
for comp in zip(cntsx,hierx):
|
| 502 |
+
c=comp[0]
|
| 503 |
+
h=comp[1]
|
| 504 |
+
xc, yc , width, height = cv2.boundingRect(c)
|
| 505 |
+
perimeter=cv2.arcLength(c,True)
|
| 506 |
+
shape=[]
|
| 507 |
+
approx = cv2.approxPolyDP(c, 0.003* perimeter, True)
|
| 508 |
+
if h[2]<0 and h[3] <0:
|
| 509 |
+
perimeter1 = cv2.arcLength(approx, True)
|
| 510 |
+
perimeter=perimeter1/2
|
| 511 |
+
# cv2_imshow(xx)
|
| 512 |
+
openClosedFlag='open'
|
| 513 |
+
imgtransparent1=cv2.polylines(imgtransparent1, [approx], False, (0,255,0), thickness=4,lineType=8)
|
| 514 |
+
perimeters.append([xc, yc ,xcBlk, ycBlk ,perimeter ,openClosedFlag , approx])
|
| 515 |
+
else:
|
| 516 |
+
if h[2] >0:
|
| 517 |
+
openClosedFlag='closed'
|
| 518 |
+
|
| 519 |
+
return tuple(finalcntsP),tuple(finalcntsA), perimeters , shapetxts , imgtransparent1 ,ArrthreshCnt , texts, iddk
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
"""# ROI (levels)
|
| 523 |
+
## Detect regions with specific color and mask them
|
| 524 |
+
"""
|
| 525 |
+
def hexRGB(color):
|
| 526 |
+
color=color.lstrip('#')
|
| 527 |
+
color= tuple(int(color[i:i+2], 16) for i in (0, 2, 4)) #hex to rgb
|
| 528 |
+
color=np.array(color) #rgb to bgr
|
| 529 |
+
return color
|
| 530 |
+
|
| 531 |
+
def DetectColor(img,color=0):
|
| 532 |
+
|
| 533 |
+
imgCopy=img.copy()
|
| 534 |
+
imgCopy=cv2.cvtColor(imgCopy,cv2.COLOR_BGR2HSV)
|
| 535 |
+
tol=5 #tolerance
|
| 536 |
+
# color=hexRGB(color)
|
| 537 |
+
h,s,v = cv2.cvtColor(np.uint8([[[color[2],color[1],color[0]]]]),cv2.COLOR_BGR2HSV)[0][0]
|
| 538 |
+
|
| 539 |
+
lower =np.array( [h- tol, 100, 100 ], dtype='uint8')
|
| 540 |
+
upper = np.array( [h + tol, 255, 255],dtype='uint8')
|
| 541 |
+
|
| 542 |
+
mask = cv2.inRange(imgCopy, lower , upper)
|
| 543 |
+
|
| 544 |
+
detectedColors = cv2.bitwise_and(imgCopy,imgCopy, mask= mask) # Bitwise-AND mask and original image
|
| 545 |
+
|
| 546 |
+
kernel=np.ones((3,3),np.uint8)
|
| 547 |
+
mask=cv2.dilate(mask,kernel, iterations=5)
|
| 548 |
+
mask=cv2.erode(mask,kernel, iterations=4)
|
| 549 |
+
|
| 550 |
+
detectedColors=cv2.dilate(detectedColors,kernel, iterations=5)
|
| 551 |
+
detectedColors=cv2.erode(detectedColors,kernel, iterations=4)
|
| 552 |
+
|
| 553 |
+
detectedColors=cv2.cvtColor(detectedColors,cv2.COLOR_HSV2BGR)
|
| 554 |
+
detectedColors=cv2.medianBlur(detectedColors,7)
|
| 555 |
+
cv2.imwrite('det.png',detectedColors)
|
| 556 |
+
return mask, detectedColors, color
|
| 557 |
+
|
| 558 |
+
def getinnerColor(BlackmaskDetected,img,detectedColors,finalColorArray,ratioarea,ratioperim,eachcolor):
|
| 559 |
+
|
| 560 |
+
countBlackMasks=0
|
| 561 |
+
xored=detectedColors
|
| 562 |
+
|
| 563 |
+
invertedmask=detectedColors
|
| 564 |
+
|
| 565 |
+
imgc=img.copy()
|
| 566 |
+
imgNewCopy=img.copy()
|
| 567 |
+
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
|
| 568 |
+
for eachcolor in finalColorArray:
|
| 569 |
+
masked=DetectColor(detectedColors,eachcolor)[0]
|
| 570 |
+
pil_image=Image.fromarray(masked)
|
| 571 |
+
extrema = pil_image.convert("L").getextrema()
|
| 572 |
+
if extrema != (0, 0): # if image is not black --> has a colored mask within
|
| 573 |
+
cc=detectedColors.copy()
|
| 574 |
+
ColoredContour, Coloredhierarchy = cv2.findContours(masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 575 |
+
for cnt in ColoredContour:
|
| 576 |
+
area1 = cv2.contourArea(cnt)
|
| 577 |
+
if (area1 > 1000 ):
|
| 578 |
+
|
| 579 |
+
x, y , width, height = cv2.boundingRect(cnt)
|
| 580 |
+
cv2.drawContours(cc,[cnt],0,(255,255,255), 3)
|
| 581 |
+
cv2.drawContours(Blackmask,[cnt] ,0, (255,255,255), 3)
|
| 582 |
+
cv2.drawContours(cc,[cnt],0,(255,255,255), -1) # (x-5,y-5 ), (x+width, y+height),
|
| 583 |
+
cv2.drawContours(Blackmask,[cnt] ,0, (255,255,255), -1) #,(x,y ), (x+width, y+height)
|
| 584 |
+
cv2.drawContours(BlackmaskDetected,[cnt] ,0, (0,0,0), -1) #,(x,y ), (x+width, y+height)
|
| 585 |
+
invertedmask = cv2.bitwise_and(imgc,imgc, mask= Blackmask)
|
| 586 |
+
xored=cc
|
| 587 |
+
detectedColors=xored
|
| 588 |
+
else: #black mask , no other levels are found # to check law count == number of colors in array yb2a no more levels and break
|
| 589 |
+
countBlackMasks+=1
|
| 590 |
+
return xored,invertedmask , BlackmaskDetected
|
| 591 |
+
|
| 592 |
+
def allLevelsofColor(BlackmaskDetected,img,levelonly, invertedmask,color,finalColorArray):
|
| 593 |
+
|
| 594 |
+
# cc=levelonly.copy()
|
| 595 |
+
firstLevel=levelonly
|
| 596 |
+
firstLevel1=levelonly
|
| 597 |
+
print('in')
|
| 598 |
+
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
|
| 599 |
+
|
| 600 |
+
masked,maskedColor,rgbcolor=DetectColor(invertedmask,color)
|
| 601 |
+
color=[color[0],color[1],color[2]]
|
| 602 |
+
rgbcolor=[rgbcolor[0],rgbcolor[1],rgbcolor[2]]
|
| 603 |
+
print(rgbcolor,color)
|
| 604 |
+
pil_image=Image.fromarray(masked)
|
| 605 |
+
extrema = pil_image.convert("L").getextrema()
|
| 606 |
+
if extrema != (0, 0): # if image is not black --> has a colored mask within
|
| 607 |
+
|
| 608 |
+
if rgbcolor==color: #found level tany gowa b nfs el lon
|
| 609 |
+
print('kkkkkkkk')
|
| 610 |
+
ColoredContour, Coloredhierarchy = cv2.findContours(masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
| 611 |
+
Coloredhierarchy=Coloredhierarchy[0]
|
| 612 |
+
for component in zip(ColoredContour,Coloredhierarchy):
|
| 613 |
+
cnt=component[0]
|
| 614 |
+
hier=component[1]
|
| 615 |
+
area1 = cv2.contourArea(cnt)
|
| 616 |
+
if (area1 > 1000 ):
|
| 617 |
+
if hier[3]> -1:
|
| 618 |
+
cv2.drawContours(Blackmask,[cnt],0,(255,255,255), -1)
|
| 619 |
+
cv2.drawContours(Blackmask,[cnt],0,(0,0,0), 20)
|
| 620 |
+
cv2.drawContours(BlackmaskDetected,[cnt],0,(255,255,255), -1)
|
| 621 |
+
|
| 622 |
+
firstLevel=cv2.bitwise_and(invertedmask,invertedmask,mask=Blackmask)
|
| 623 |
+
####remove black pixels and let them be all white
|
| 624 |
+
# get (i, j) positions of all RGB pixels that are black (i.e. [0, 0, 0])
|
| 625 |
+
firstLevel[np.all(firstLevel == (0,0,0), axis=-1)] = (255, 255, 255)
|
| 626 |
+
firstLevel1=cv2.bitwise_and(levelonly,firstLevel)
|
| 627 |
+
|
| 628 |
+
for othercolor in finalColorArray:
|
| 629 |
+
# othercolor2=hexRGB(othercolor)
|
| 630 |
+
othercolor2=[othercolor[0],othercolor[1],othercolor[2]]
|
| 631 |
+
|
| 632 |
+
if othercolor2!=color:
|
| 633 |
+
|
| 634 |
+
masked0=DetectColor(firstLevel,othercolor)[0]
|
| 635 |
+
pil_image0=Image.fromarray(masked0)
|
| 636 |
+
extrema0 = pil_image0.convert("L").getextrema()
|
| 637 |
+
if extrema != (0, 0): # if image is not black --> has a colored mask within
|
| 638 |
+
ColoredContour0, Coloredhierarchy0 = cv2.findContours(masked0, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 639 |
+
for cnt in ColoredContour0:
|
| 640 |
+
area1 = cv2.contourArea(cnt)
|
| 641 |
+
if (area1 > 1000 ):
|
| 642 |
+
cv2.drawContours(firstLevel1,[cnt],0,(255,255,255), -1)
|
| 643 |
+
cv2.drawContours(firstLevel1,[cnt],0,(255,255,255), 10)
|
| 644 |
+
cv2.drawContours(BlackmaskDetected,[cnt],0,(0,0,0), -1)
|
| 645 |
+
# cv2.drawContours(Blackmask,[cnt],0,(255,255,255), -1)
|
| 646 |
+
# cv2.drawContours(Blackmask,[cnt],0,(255,255,255), 10)
|
| 647 |
+
# cv2_imshow(firstLevel1)
|
| 648 |
+
# cv2_imshow(Blackmask)
|
| 649 |
+
return firstLevel1, BlackmaskDetected
|
| 650 |
+
|
| 651 |
+
def getColoredContour(mask,img,finalColorArray,ratioarea,ratioperim,eachcolor):
|
| 652 |
+
ColoredContour, Coloredhierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
| 653 |
+
Coloredhierarchy=Coloredhierarchy[0]
|
| 654 |
+
|
| 655 |
+
imgc= img.copy()
|
| 656 |
+
|
| 657 |
+
detectedColors=np.zeros(img.shape[:2], dtype="uint8")
|
| 658 |
+
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
|
| 659 |
+
|
| 660 |
+
for component in zip( ColoredContour, Coloredhierarchy):
|
| 661 |
+
cnt=component[0]
|
| 662 |
+
hier=component[1]
|
| 663 |
+
area1 = cv2.contourArea(cnt)
|
| 664 |
+
if (area1 > 3000 ):
|
| 665 |
+
# cv2.drawContours(imgNewCopy, [cnt], 0,(255,255,255), 20) #(x+20,y+20 ), (x+width-20, y+height-20),
|
| 666 |
+
if hier[3] >-1:
|
| 667 |
+
|
| 668 |
+
x, y , width, height = cv2.boundingRect(cnt)
|
| 669 |
+
cv2.drawContours(Blackmask, [cnt], 0,(255,255,255), -1) #(x+20,y+20 ), (x+width-20, y+height-20),
|
| 670 |
+
cv2.drawContours(Blackmask, [cnt], 0,(0,0,0), 10) #(x+20,y+20 ), (x+width-20, y+height-20),
|
| 671 |
+
|
| 672 |
+
detectedColors = cv2.bitwise_and(imgc,imgc, mask= Blackmask)
|
| 673 |
+
pil_image=Image.fromarray(detectedColors)
|
| 674 |
+
extrema = pil_image.convert("L").getextrema()
|
| 675 |
+
if extrema == (0, 0) :#and extremaB==(0,0): # if image is not black --> has a colored mask within
|
| 676 |
+
break
|
| 677 |
+
|
| 678 |
+
levelOnly,invertedmask,BlackmaskDetected=getinnerColor(Blackmask,img,detectedColors,finalColorArray,ratioarea,ratioperim,eachcolor) #mask inner levels b abyad
|
| 679 |
+
firstLevel1, BlackmaskDetected1= allLevelsofColor(BlackmaskDetected,img,levelOnly, invertedmask,eachcolor,finalColorArray)
|
| 680 |
+
return firstLevel1,invertedmask, BlackmaskDetected1
|
| 681 |
+
|
| 682 |
+
"""# contours"""
|
| 683 |
+
|
| 684 |
+
def findContoursFullImage(green22,imgoldnodashes,dataDoc,imgtransparent1,green2,img,number,finalColorArray,ratioarea,ratioperim,color=[0,0,0]):
|
| 685 |
+
if number == 0:
|
| 686 |
+
contourssP,contourssA,perimeters,alltxts,imgtransparent1,arrthresh,allshapesExtremes_Text,iddk=preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,img,number,green2, 'nolayer')
|
| 687 |
+
return contourssP,contourssA, perimeters ,alltxts , imgtransparent1,arrthresh,allshapesExtremes_Text , iddk
|
| 688 |
+
else:
|
| 689 |
+
mask, detectedColors, rgbcolor =DetectColor(img,color)
|
| 690 |
+
pil_image=Image.fromarray(mask)
|
| 691 |
+
extrema = pil_image.convert("L").getextrema()
|
| 692 |
+
if extrema != (0, 0): # if image is not black --> has a colored mask within
|
| 693 |
+
coloredregions,invertedmask,BlackmaskDetected1=getColoredContour(mask,img,finalColorArray,ratioarea,ratioperim,color)
|
| 694 |
+
|
| 695 |
+
contourssP,contourssA,perimeters,alltxts,imgtransparent1,arrthresh,allshapesExtremes_Text,iddk=preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,coloredregions,number,green2,'layer',BlackmaskDetected1)
|
| 696 |
+
return contourssP,contourssA ,rgbcolor ,invertedmask , perimeters , alltxts , imgtransparent1 ,arrthresh,allshapesExtremes_Text , iddk
|
| 697 |
+
else:
|
| 698 |
+
contourssP,contourssA ,rgbcolor ,invertedmask , perimeters , alltxts , imgtransparent1 ,arrthresh,allshapesExtremes_Text , iddk=preprocess(green22,imgoldnodashes,dataDoc,imgtransparent1,img,number,green2,'nolayer')
|
| 699 |
+
return contourssP,contourssA,color ,mask , perimeters , alltxts,imgtransparent1,arrthresh,allshapesExtremes_Text , iddk
|
| 700 |
+
|
| 701 |
+
#Straighten tilted shapes
|
| 702 |
+
def StraightenImage(contour,imgArea):
|
| 703 |
+
rect = cv2.minAreaRect(contour)
|
| 704 |
+
(center, (width, height), angleR) = cv2.minAreaRect(contour)
|
| 705 |
+
box = cv2.boxPoints(rect)
|
| 706 |
+
box = np.int0(box)
|
| 707 |
+
# get width and height of the detected rectangle
|
| 708 |
+
width = int(rect[1][0])
|
| 709 |
+
height = int(rect[1][1])
|
| 710 |
+
return angleR,width,height
|
| 711 |
+
|
| 712 |
+
#get all areas and perimeter present
|
| 713 |
+
def getAreasPerimeter(img,ratioarea,ratioperim,contourssP,contourssA):
|
| 714 |
+
appended=[]
|
| 715 |
+
for contour in range(len(contourssP)):
|
| 716 |
+
area1 = cv2.contourArea(contourssP[contour][0])
|
| 717 |
+
if (area1 >2000 ):
|
| 718 |
+
perimeter= cv2.arcLength(contourssP[contour][0],True)
|
| 719 |
+
approx = cv2.approxPolyDP(contourssP[contour][0], 0.002* perimeter, True)
|
| 720 |
+
perimeter1 = cv2.arcLength(approx, True)
|
| 721 |
+
approx = cv2.approxPolyDP(contourssA[contour][0], 0.0002* perimeter, True)
|
| 722 |
+
area1 = cv2.contourArea(approx)
|
| 723 |
+
x, y , width, height = cv2.boundingRect(contourssP[contour][0])
|
| 724 |
+
angleR,widthR ,heightR= StraightenImage(contourssP[contour][0],img)
|
| 725 |
+
|
| 726 |
+
if (angleR != 90.0 and angleR != -90.0 and angleR != 0.0 and angleR != -0.0 ): #inclined b ay degree
|
| 727 |
+
width=widthR
|
| 728 |
+
height=heightR
|
| 729 |
+
if (area1 > 2000 ): #check perimeter kman fl condition -- 2800
|
| 730 |
+
if ratioarea!=0 and ratioperim!=0:
|
| 731 |
+
areaa=round(area1* ratioarea , 2) # true value of area of any shape/ area px value of same shape
|
| 732 |
+
appended.append([areaa,width,height])
|
| 733 |
+
return appended
|
| 734 |
+
#fill dictionary with areas and perimeters and occurences
|
| 735 |
+
def FillDictionary(SimilarAreaDictionary,img,ratioarea,ratioperim,contourssP,contourssA,rgbcolor=[0,0,0],color=[0,0,0]):
|
| 736 |
+
#fills dictionary with key areas and number of occurences
|
| 737 |
+
areas_Perimeters=sorted(getAreasPerimeter(img,ratioarea,ratioperim,contourssP,contourssA))
|
| 738 |
+
|
| 739 |
+
indices=[]
|
| 740 |
+
colorRanges=[[255,0,0],[0,0,255],[0,255,255],[0,64,0],[255,204,0],[255,128,64],[255,0,128],[255,128,192],[128,128,255],[128,64,0],[0,255,0],[179,106,179],[115,52,179],[0,128,192],[128,0,128],[128,0,0],[0,128,255],[255,182,128],[255,0,255],[0,0,128],[0,128,64],[255,255,0],[128,0,64],[203,203,106],[128,255,166],[255,128,0],[255,98,98],[90,105,138],[114,10,72],[36,82,78],[225,105,29],[108,62,40],[11,35,75],[42,176,203],[255,153,153],[129,74,138],[99,123,137],[159,179,30],[255,0,0],[0,0,255],[0,255,255],[0,64,0],[255,204,0],[255,128,64],[255,0,128],[255,128,192],[128,128,255],[128,64,0],[0,255,0],[179,106,179],[115,52,179],[0,128,192],[128,0,128],[128,0,0],[0,128,255],[255,182,128],[255,0,255],[0,0,128],[0,128,64],[255,255,0],[128,0,64],[203,203,106],[128,255,166],[255,128,0],[255,98,98],[90,105,138],[114,10,72],[36,82,78],[225,105,29],[108,62,40],[11,35,75],[42,176,203],[255,153,153],[129,74,138],[99,123,137],[159,179,30]]
|
| 741 |
+
colorsUsed=[]
|
| 742 |
+
for i in range(len(areas_Perimeters)):
|
| 743 |
+
|
| 744 |
+
# colorRGB=hexRGB(color)
|
| 745 |
+
item1 = areas_Perimeters[i][0]
|
| 746 |
+
width1 = areas_Perimeters[i][1]
|
| 747 |
+
height1 = areas_Perimeters[i][2]
|
| 748 |
+
widthMin= width1-15
|
| 749 |
+
widthMax= width1+15
|
| 750 |
+
heightMin=height1-15
|
| 751 |
+
heightMax= height1+15
|
| 752 |
+
areaPerimeterMin= round(item1,1) - 0.4
|
| 753 |
+
areaPerimeterMax= round(item1,1) + 0.4
|
| 754 |
+
# print (areaMin, areaMax)
|
| 755 |
+
if color != [0,0,0]: #colored images
|
| 756 |
+
mydata=[[],[rgbcolor[0],rgbcolor[1],rgbcolor[2] ],round(item1,1),width1,height1,1, 0,0,0,0,0,0,'',0,0,0]
|
| 757 |
+
else:
|
| 758 |
+
mydata=[[],' ', round(item1,1),width1,height1,1, 0,0,0,0,0,0,'',0,0,0]
|
| 759 |
+
myindex= SimilarAreaDictionary.index[((SimilarAreaDictionary['Rounded'] >=areaPerimeterMin) &(SimilarAreaDictionary['Rounded']<=areaPerimeterMax) )].tolist()
|
| 760 |
+
if color!= [0,0,0]: #leveled image
|
| 761 |
+
checkifColorExists=0 # to check whether this row was found or not( area and color )
|
| 762 |
+
for i in myindex: # loop on indices that were found --> rows containing this area to check its color and add occ.
|
| 763 |
+
if SimilarAreaDictionary['Color'].loc[i]==[rgbcolor[0],rgbcolor[1],rgbcolor[2]] and ( SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin and SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax) :
|
| 764 |
+
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
|
| 765 |
+
checkifColorExists=1 #found and incremented
|
| 766 |
+
SimilarAreaDictionary['Occurences'].loc[i]+=1
|
| 767 |
+
if checkifColorExists==0: #couldnt find the color , doesnt exist so add it
|
| 768 |
+
SimilarAreaDictionary.loc[len(SimilarAreaDictionary)] =mydata
|
| 769 |
+
else: #full image
|
| 770 |
+
#same code bs mgher color
|
| 771 |
+
checkifColorExists=0
|
| 772 |
+
for i in myindex:
|
| 773 |
+
if ( SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax and SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin) :
|
| 774 |
+
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
|
| 775 |
+
checkifColorExists=1 #found and incremented
|
| 776 |
+
SimilarAreaDictionary['Occurences'].loc[i]+=1
|
| 777 |
+
if checkifColorExists==0: #couldnt find the color , doesnt exist so add it
|
| 778 |
+
SimilarAreaDictionary.loc[len(SimilarAreaDictionary)] =mydata
|
| 779 |
+
# s= SimilarAreaDictionary
|
| 780 |
+
for i in range(len(SimilarAreaDictionary)):
|
| 781 |
+
SimilarAreaDictionary.loc[i, "R"] = colorRanges[i][0]
|
| 782 |
+
SimilarAreaDictionary.loc[i, "G"] = colorRanges[i][1]
|
| 783 |
+
SimilarAreaDictionary.loc[i, "B"] = colorRanges[i][2]
|
| 784 |
+
colorsUsed.append(colorRanges[i])
|
| 785 |
+
|
| 786 |
+
return SimilarAreaDictionary, colorsUsed , areas_Perimeters
|
| 787 |
+
#detect and draw and measure
|
| 788 |
+
def drawAllContours(dataDoc,img,number,finalColorArray,ratioarea,ratioperim , path,pdfpath):
|
| 789 |
+
green2=allpreSteps(img)
|
| 790 |
+
green22,imgoldnodashes=allpreSteps(img,num=1)
|
| 791 |
+
doc = fitz.open("pdf",dataDoc)
|
| 792 |
+
page = doc[0]
|
| 793 |
+
rotationOld=page.rotation
|
| 794 |
+
pix=page.get_pixmap()
|
| 795 |
+
if page.rotation!=0:
|
| 796 |
+
page.set_rotation(0)
|
| 797 |
+
ratio = pix.width/ img.shape[0]
|
| 798 |
+
else:
|
| 799 |
+
ratio = pix.width/ img.shape[1]
|
| 800 |
+
|
| 801 |
+
areasinImage=[]
|
| 802 |
+
imgArea1= img.copy()
|
| 803 |
+
imgPerimeter1=img.copy()
|
| 804 |
+
imgtransparent1=img.copy()
|
| 805 |
+
|
| 806 |
+
if number ==220:
|
| 807 |
+
SimilarAreaDictionary= pd.DataFrame(columns=['Guess','Color','Rounded','Width','Height','Occurences','Area','Total Area','Perimeter','Total Perimeter','Length','Total Length','Texts','R','G','B'])
|
| 808 |
+
# firstcolor=finalColorArray[0]
|
| 809 |
+
# counter=0
|
| 810 |
+
maskDone=img.copy()
|
| 811 |
+
for eachcolor in finalColorArray:
|
| 812 |
+
contourssP,contourssA,rgbcolor,invertedmask , perimeters , alltxts,imgtransparent1 , ArrthreshCnt ,allshapesExtremes_Text, green22Gry=findContoursFullImage(green22,imgoldnodashes,dataDoc,imgtransparent1,green2,maskDone,number,finalColorArray,ratioarea,ratioperim,eachcolor)
|
| 813 |
+
SimilarAreaDictionary, colorsUsed , areas_Perimeters= FillDictionary(SimilarAreaDictionary,maskDone,ratioarea,ratioperim,contourssP,contourssA,rgbcolor,eachcolor)
|
| 814 |
+
perimTotal=0
|
| 815 |
+
for contour in range(len(contourssP)):
|
| 816 |
+
shape=[]
|
| 817 |
+
area1 = cv2.contourArea(contourssA[contour][0])
|
| 818 |
+
if (area1 > 3500 ): #check perimeter kman fl condition -- 2800
|
| 819 |
+
perimeter=cv2.arcLength(contourssP[contour][0],True)
|
| 820 |
+
shape=[]
|
| 821 |
+
angleR,widthR ,heightR= StraightenImage(contourssP[contour][0],imgArea1)
|
| 822 |
+
x, y , width, height = cv2.boundingRect(contourssP[contour][0])
|
| 823 |
+
|
| 824 |
+
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
|
| 825 |
+
Blackmask = cv2.rectangle(Blackmask, (int(x-10),int(y-10)), (int(x+width+10),int(y+height+10)), (255, 255, 255), -1)
|
| 826 |
+
Blackmask=cv2.bitwise_and(green22Gry,green22Gry,mask=Blackmask)
|
| 827 |
+
BlackmaskCnt,_= cv2.findContours(Blackmask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 828 |
+
areas = [cv2.contourArea(c) for c in BlackmaskCnt]
|
| 829 |
+
if len(areas)>0:
|
| 830 |
+
max_index = np.argmax(areas)
|
| 831 |
+
blackcnt=BlackmaskCnt[max_index]
|
| 832 |
+
blackcnt=tuple(blackcnt)
|
| 833 |
+
texts=''
|
| 834 |
+
for th in range(len(ArrthreshCnt)):
|
| 835 |
+
for e in blackcnt:
|
| 836 |
+
if list(e[0]) in ArrthreshCnt[th]:
|
| 837 |
+
texts=allshapesExtremes_Text[th][1]
|
| 838 |
+
break
|
| 839 |
+
if len(texts)>0:
|
| 840 |
+
break
|
| 841 |
+
|
| 842 |
+
approxA = cv2.approxPolyDP(contourssA[contour][0], 0.0002* perimeter, True)
|
| 843 |
+
area1 = cv2.contourArea(approxA)
|
| 844 |
+
approx = cv2.approxPolyDP(contourssP[contour][0], 0.002 * perimeter, True) #0.0009
|
| 845 |
+
perimeter1 = cv2.arcLength(approx, True)
|
| 846 |
+
for point in approxA:
|
| 847 |
+
x1, y1 = point[0]
|
| 848 |
+
p1 = fitz.Point(x1*ratio,y1*ratio)
|
| 849 |
+
p1=p1*page.derotation_matrix
|
| 850 |
+
shape.append([p1[0],p1[1]])
|
| 851 |
+
|
| 852 |
+
if (angleR != 90.0 and angleR != -90.0 and angleR != 0.0 and angleR != -0.0 ): #inclined b ay degree
|
| 853 |
+
width=widthR
|
| 854 |
+
height=heightR
|
| 855 |
+
if width>height:
|
| 856 |
+
lengthShape = width
|
| 857 |
+
else:
|
| 858 |
+
lengthShape = height
|
| 859 |
+
widthMin= width-15
|
| 860 |
+
widthMax= width+15
|
| 861 |
+
heightMin=height-15
|
| 862 |
+
heightMax= height+15
|
| 863 |
+
if ratioarea !=0 and ratioperim!=0:
|
| 864 |
+
areaa=round(area1* ratioarea, 3) # true value of area of any shape/ area px value of same shape
|
| 865 |
+
perimeterr=round(perimeter1* ratioperim,3)
|
| 866 |
+
lengthShape=round(lengthShape* ratioperim,3)
|
| 867 |
+
else:
|
| 868 |
+
areaa=area1
|
| 869 |
+
perimeterr=perimeter1
|
| 870 |
+
|
| 871 |
+
areaPerimeterMin= round(areaa,1) - 0.4
|
| 872 |
+
areaPerimeterMax= round(areaa,1) + 0.4
|
| 873 |
+
masked=SimilarAreaDictionary.loc[SimilarAreaDictionary.index[((SimilarAreaDictionary['Rounded'] >=areaPerimeterMin) &(SimilarAreaDictionary['Rounded']<=areaPerimeterMax) )]]
|
| 874 |
+
passed=0
|
| 875 |
+
for i, row in masked.iterrows():
|
| 876 |
+
if passed ==0:
|
| 877 |
+
if SimilarAreaDictionary['Color'].loc[i] == [rgbcolor[0],rgbcolor[1],rgbcolor[2]] and ( SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax and SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin) :
|
| 878 |
+
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
|
| 879 |
+
if len(alltxts)>0:
|
| 880 |
+
SimilarAreaDictionary['Guess'].loc[i].append(str(alltxts[contour]))
|
| 881 |
+
for t in texts:
|
| 882 |
+
if "GB" not in t or "RC" not in t or "PC" not in t:
|
| 883 |
+
if t not in SimilarAreaDictionary['Texts'].loc[i]:
|
| 884 |
+
SimilarAreaDictionary['Texts'].loc[i]+=' '+t
|
| 885 |
+
SimilarAreaDictionary['Total Area'].loc[i]+=areaa
|
| 886 |
+
SimilarAreaDictionary['Area'].loc[i]=areaa
|
| 887 |
+
|
| 888 |
+
pFlagDF=0
|
| 889 |
+
color= (int(SimilarAreaDictionary['R'].loc[i])/255 , int(SimilarAreaDictionary['G'].loc[i])/255 , int(SimilarAreaDictionary['B'].loc[i])/255 )
|
| 890 |
+
|
| 891 |
+
|
| 892 |
+
for p in perimeters:
|
| 893 |
+
if p[2]==x and p[3]==y and p[5]=='open':
|
| 894 |
+
# if areaa >=5.15 and areaa<=5.25:
|
| 895 |
+
shapee=[]
|
| 896 |
+
SimilarAreaDictionary['Total Perimeter'].loc[i]+=round((p[4]-1)*ratioperim,3)
|
| 897 |
+
SimilarAreaDictionary['Perimeter'].loc[i]=round((p[4]-1)*ratioperim,3)
|
| 898 |
+
for poi in p[6]:
|
| 899 |
+
x1, y1 = poi[0]
|
| 900 |
+
p1 = fitz.Point(x1*ratio,y1*ratio)
|
| 901 |
+
p1=p1*page.derotation_matrix
|
| 902 |
+
shapee.append([p1[0],p1[1]])
|
| 903 |
+
|
| 904 |
+
annot11 = page.add_polyline_annot( points=shapee) # 'Polygon'
|
| 905 |
+
annot11.set_border(width=0.2, dashes=[3])
|
| 906 |
+
annot1.set_colors(stroke=color ,fill=None)
|
| 907 |
+
if len(alltxts)>0:
|
| 908 |
+
annot11.set_info(content='Perimeter='+str(round((p[4]-1)*ratioperim,3))+' m',subject='ADR Team',title=str(alltxts[contour]))
|
| 909 |
+
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
|
| 910 |
+
annot11.update()
|
| 911 |
+
pFlagDF=1
|
| 912 |
+
if pFlagDF==0:
|
| 913 |
+
annot1 = page.add_polyline_annot( points=shape) # 'Polygon'
|
| 914 |
+
annot1.set_border(width=0.2 ,dashes=[3])
|
| 915 |
+
annot1.set_colors(stroke=color , fill=None)
|
| 916 |
+
if len(alltxts)>0:
|
| 917 |
+
annot1.set_info(content='Perimeter='+str(perimeterr)+' m',subject='ADR Team',title=str(alltxts[contour]))
|
| 918 |
+
SimilarAreaDictionary['Total Perimeter'].loc[i]+=perimeterr
|
| 919 |
+
SimilarAreaDictionary['Perimeter'].loc[i]=perimeterr
|
| 920 |
+
SimilarAreaDictionary['Total Length'].loc[i]+=lengthShape
|
| 921 |
+
SimilarAreaDictionary['Length'].loc[i]=lengthShape
|
| 922 |
+
|
| 923 |
+
passed=1
|
| 924 |
+
cv2.drawContours(imgArea1, [contourssP[contour][0]], 0, ( int(SimilarAreaDictionary['B'].loc[i]), int(SimilarAreaDictionary['G'].loc[i]), int(SimilarAreaDictionary['R'].loc[i])), -1)
|
| 925 |
+
annot = page.add_polygon_annot( points=shape) # 'Polygon'
|
| 926 |
+
annot.set_border(width=0.2)
|
| 927 |
+
annot.set_colors(stroke=color, fill= color )
|
| 928 |
+
annot.set_opacity(0.5)
|
| 929 |
+
if len(alltxts)>0:
|
| 930 |
+
annot.set_info(content='Area='+str(areaa)+' m2' +'\n \n Length='+str(lengthShape)+' m',subject='ADR Team',title=str(alltxts[contour]))
|
| 931 |
+
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
|
| 932 |
+
annot.update()
|
| 933 |
+
|
| 934 |
+
# areasinImage.append(areaa)
|
| 935 |
+
cv2.putText(imgPerimeter1,'Perimeter: '+str(perimeterr)+ ' m', (x+70,y-30) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
|
| 936 |
+
cv2.putText(imgPerimeter1,'Area: '+str(areaa)+' m2', (x+50,y-40) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
# for i,row in SimilarAreaDictionary.iterrows():
|
| 940 |
+
# # print(row)
|
| 941 |
+
# if row[5] not in areasinImage: # column of area
|
| 942 |
+
# SimilarAreaDictionary = SimilarAreaDictionary.drop(SimilarAreaDictionary.loc[SimilarAreaDictionary.index==i].index)
|
| 943 |
+
|
| 944 |
+
# print(SimilarAreaDictionary)
|
| 945 |
+
# display(totaldf)
|
| 946 |
+
#########################
|
| 947 |
+
else:
|
| 948 |
+
|
| 949 |
+
SimilarAreaDictionary= pd.DataFrame(columns=['Guess','Color','Rounded','Width','Height','Occurences','Area','Total Area','Perimeter','Total Perimeter','Length','Total Length','Texts','R','G','B'])
|
| 950 |
+
contourssP,contourssA , perimeters , alltxts , imgtransparent1 , ArrthreshCnt ,allshapesExtremes_Text, green22Gry=findContoursFullImage(green22,imgoldnodashes,dataDoc,imgtransparent1,green2,img,number,finalColorArray,ratioarea,ratioperim)
|
| 951 |
+
SimilarAreaDictionary,colorsUsed , areas_Perimeters= FillDictionary(SimilarAreaDictionary,img,ratioarea,ratioperim,contourssP,contourssA)
|
| 952 |
+
|
| 953 |
+
for contour in range(len(contourssP)):
|
| 954 |
+
area1 = cv2.contourArea(contourssA[contour][0])
|
| 955 |
+
if (area1 >4000 ):
|
| 956 |
+
perimeter=cv2.arcLength(contourssP[contour][0],True)
|
| 957 |
+
|
| 958 |
+
|
| 959 |
+
shape=[]
|
| 960 |
+
angleR,widthR ,heightR= StraightenImage(contourssP[contour][0],imgArea1)
|
| 961 |
+
x, y , width, height = cv2.boundingRect(contourssP[contour][0])
|
| 962 |
+
|
| 963 |
+
Blackmask = np.zeros(img.shape[:2], dtype="uint8")
|
| 964 |
+
Blackmask = cv2.rectangle(Blackmask, (int(x-10),int(y-10)), (int(x+width+10),int(y+height+10)), (255, 255, 255), -1)
|
| 965 |
+
Blackmask=cv2.bitwise_and(green22Gry,green22Gry,mask=Blackmask)
|
| 966 |
+
BlackmaskCnt,_= cv2.findContours(Blackmask, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
| 967 |
+
areas = [cv2.contourArea(c) for c in BlackmaskCnt]
|
| 968 |
+
if len(areas)>0:
|
| 969 |
+
max_index = np.argmax(areas)
|
| 970 |
+
blackcnt=BlackmaskCnt[max_index]
|
| 971 |
+
blackcnt=tuple(blackcnt)
|
| 972 |
+
texts=''
|
| 973 |
+
# textsMid=''
|
| 974 |
+
for th in range(len(ArrthreshCnt)):
|
| 975 |
+
for e in blackcnt:
|
| 976 |
+
if list(e[0]) in ArrthreshCnt[th]:
|
| 977 |
+
texts=allshapesExtremes_Text[th][1]
|
| 978 |
+
# textsMid=allshapesExtremes_Text[th][2]
|
| 979 |
+
break
|
| 980 |
+
if len(texts)>0:
|
| 981 |
+
break
|
| 982 |
+
# print(texts)
|
| 983 |
+
approxA = cv2.approxPolyDP(contourssA[contour][0], 0.0002* perimeter, True)
|
| 984 |
+
area1 = cv2.contourArea(approxA)
|
| 985 |
+
approx = cv2.approxPolyDP(contourssP[contour][0], 0.002* perimeter, True) #0.0009
|
| 986 |
+
perimeter1 = cv2.arcLength(approx, True)
|
| 987 |
+
for point in approxA:
|
| 988 |
+
x1, y1 = point[0]
|
| 989 |
+
p1 = fitz.Point(x1*ratio,y1*ratio)
|
| 990 |
+
p1=p1*page.derotation_matrix
|
| 991 |
+
shape.append([p1[0],p1[1]])
|
| 992 |
+
if (angleR != 90.0 and angleR != -90.0 and angleR != 0.0 and angleR != -0.0 ): #inclined b ay degree
|
| 993 |
+
width=widthR
|
| 994 |
+
height=heightR
|
| 995 |
+
if width>height:
|
| 996 |
+
lengthShape = width
|
| 997 |
+
if height>width:
|
| 998 |
+
lengthShape = height
|
| 999 |
+
|
| 1000 |
+
widthMin= width-15
|
| 1001 |
+
widthMax= width+15
|
| 1002 |
+
heightMin=height-15
|
| 1003 |
+
heightMax= height+15
|
| 1004 |
+
|
| 1005 |
+
if ratioarea !=0 and ratioperim!=0:
|
| 1006 |
+
areaa= round(area1* ratioarea,3) # true value of area of any shape/ area px value of same shape
|
| 1007 |
+
perimeterr=round(perimeter1* ratioperim,3)
|
| 1008 |
+
lengthShape=round(lengthShape* ratioperim,3)
|
| 1009 |
+
else:
|
| 1010 |
+
perimeterr=perimeter1
|
| 1011 |
+
areaPerimeterMin= round(areaa,1) - 0.4
|
| 1012 |
+
areaPerimeterMax= round(areaa,1) + 0.4
|
| 1013 |
+
masked=SimilarAreaDictionary.loc[SimilarAreaDictionary.index[((SimilarAreaDictionary['Rounded'] >=areaPerimeterMin) & (SimilarAreaDictionary['Rounded']<=areaPerimeterMax) )]]
|
| 1014 |
+
passed=0
|
| 1015 |
+
for i, row in masked.iterrows():
|
| 1016 |
+
if passed ==0:
|
| 1017 |
+
if ( SimilarAreaDictionary['Rounded'].loc[i] <= areaPerimeterMax and SimilarAreaDictionary['Rounded'].loc[i] >= areaPerimeterMin) :
|
| 1018 |
+
if (SimilarAreaDictionary['Width'].loc[i] <=widthMax and SimilarAreaDictionary['Width'].loc[i] >= widthMin) and (SimilarAreaDictionary['Height'].loc[i] <= heightMax and SimilarAreaDictionary['Height'].loc[i] >= heightMin ) or (SimilarAreaDictionary['Width'].loc[i] <=heightMax and SimilarAreaDictionary['Width'].loc[i] >= heightMin) and (SimilarAreaDictionary['Height'].loc[i] <= widthMax and SimilarAreaDictionary['Height'].loc[i] >= widthMin ) :
|
| 1019 |
+
if len(alltxts)>0:
|
| 1020 |
+
SimilarAreaDictionary['Guess'].loc[i].append(str(alltxts[contour]))
|
| 1021 |
+
for t in texts:
|
| 1022 |
+
if "GB" not in t or "RC" not in t or "PC" not in t:
|
| 1023 |
+
if t not in SimilarAreaDictionary['Texts'].loc[i]:
|
| 1024 |
+
SimilarAreaDictionary['Texts'].loc[i]+=' '+t
|
| 1025 |
+
SimilarAreaDictionary['Total Area'].loc[i]+=areaa
|
| 1026 |
+
SimilarAreaDictionary['Area'].loc[i]=areaa
|
| 1027 |
+
|
| 1028 |
+
pFlagDF=0
|
| 1029 |
+
color= (int(SimilarAreaDictionary['R'].loc[i])/255 , int(SimilarAreaDictionary['G'].loc[i])/255 , int(SimilarAreaDictionary['B'].loc[i])/255 )
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
for p in perimeters:
|
| 1033 |
+
if p[2]==x and p[3]==y and p[5]=='open':
|
| 1034 |
+
# if areaa >=5.15 and areaa<=5.25:
|
| 1035 |
+
shapee=[]
|
| 1036 |
+
SimilarAreaDictionary['Total Perimeter'].loc[i]+=round((p[4]-1)*ratioperim,3)
|
| 1037 |
+
SimilarAreaDictionary['Perimeter'].loc[i]=round((p[4]-1)*ratioperim,3)
|
| 1038 |
+
for poi in p[6]:
|
| 1039 |
+
x1, y1 = poi[0]
|
| 1040 |
+
p1 = fitz.Point(x1*ratio,y1*ratio)
|
| 1041 |
+
p1=p1*page.derotation_matrix
|
| 1042 |
+
shapee.append([p1[0],p1[1]])
|
| 1043 |
+
|
| 1044 |
+
annot11 = page.add_polyline_annot( points=shapee) # 'Polygon'
|
| 1045 |
+
annot11.set_border(width=0.2, dashes=[3])
|
| 1046 |
+
annot1.set_colors(stroke=color ,fill=None)
|
| 1047 |
+
if len(alltxts)>0:
|
| 1048 |
+
annot11.set_info(content='Perimeter='+str(round((p[4]-1)*ratioperim,3))+' m',subject='ADR Team',title=str(alltxts[contour]))
|
| 1049 |
+
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
|
| 1050 |
+
annot11.update()
|
| 1051 |
+
pFlagDF=1
|
| 1052 |
+
if pFlagDF==0:
|
| 1053 |
+
annot1 = page.add_polyline_annot( points=shape) # 'Polygon'
|
| 1054 |
+
annot1.set_border(width=0.2 ,dashes=[3])
|
| 1055 |
+
annot1.set_colors(stroke=color ,fill=None)
|
| 1056 |
+
if len(alltxts)>0:
|
| 1057 |
+
annot1.set_info(content='Perimeter='+str(perimeterr)+' m',subject='ADR Team',title=str(alltxts[contour]))
|
| 1058 |
+
SimilarAreaDictionary['Total Perimeter'].loc[i]+=perimeterr
|
| 1059 |
+
SimilarAreaDictionary['Perimeter'].loc[i]=perimeterr
|
| 1060 |
+
SimilarAreaDictionary['Total Length'].loc[i]+=lengthShape
|
| 1061 |
+
SimilarAreaDictionary['Length'].loc[i]=lengthShape
|
| 1062 |
+
|
| 1063 |
+
passed=1
|
| 1064 |
+
cv2.drawContours(imgArea1, [contourssP[contour][0]], 0, ( int(SimilarAreaDictionary['B'].loc[i]), int(SimilarAreaDictionary['G'].loc[i]), int(SimilarAreaDictionary['R'].loc[i])), -1)
|
| 1065 |
+
annot = page.add_polygon_annot( points=shape) # 'Polygon'
|
| 1066 |
+
annot.set_border(width=0.2)
|
| 1067 |
+
annot.set_colors(stroke=color, fill= color )
|
| 1068 |
+
annot.set_opacity(0.5)
|
| 1069 |
+
if len(alltxts)>0:
|
| 1070 |
+
annot.set_info(content='Area='+str(areaa)+' m2' +'\n \n Length='+str(lengthShape)+' m',subject='ADR Team',title=str(alltxts[contour]))
|
| 1071 |
+
# annot.set_line_ends(fitz.PDF_ANNOT_LE_DIAMOND, fitz.PDF_ANNOT_LE_CIRCLE)
|
| 1072 |
+
annot.update()
|
| 1073 |
+
|
| 1074 |
+
cv2.putText(imgtransparent1,'Area: '+str(areaa) +' '+str(alltxts[contour])+' m2', (x+50,y-10) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
|
| 1075 |
+
pFlag=0
|
| 1076 |
+
|
| 1077 |
+
for p in perimeters:
|
| 1078 |
+
if p[2]==x and p[3]==y and p[5]=='open':
|
| 1079 |
+
# if areaa >=5.15 and areaa<=5.25:
|
| 1080 |
+
perimTotal+=round((p[4]-1)*ratioperim,3)
|
| 1081 |
+
cv2.putText(imgtransparent1,'Perimeter: '+str(round((p[4])*ratioperim,3))+ ' m', (p[0]+50,p[1]-40) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
|
| 1082 |
+
pFlag=1
|
| 1083 |
+
if pFlag==0:
|
| 1084 |
+
cv2.putText(imgtransparent1,'Perimeter: '+str(perimeterr)+' m', (x+50,y-40) ,cv2.FONT_HERSHEY_SIMPLEX, 0.6, (50, 50, 255), 2)
|
| 1085 |
+
for i, row in SimilarAreaDictionary.iterrows():
|
| 1086 |
+
c = Counter( SimilarAreaDictionary['Guess'].loc[i])
|
| 1087 |
+
if len(c) >0:
|
| 1088 |
+
value, count = c.most_common()[0]
|
| 1089 |
+
SimilarAreaDictionary['Guess'].loc[i]= value
|
| 1090 |
+
else:
|
| 1091 |
+
SimilarAreaDictionary['Guess'].loc[i]= 'none'
|
| 1092 |
+
|
| 1093 |
+
|
| 1094 |
+
# cv2.circle (imgtransparent1, (img.shape[0],img.shape[0]1), 5, 255, 5)
|
| 1095 |
+
alpha = 0.4 # Transparency factor.
|
| 1096 |
+
image_new1 = cv2.addWeighted(imgArea1, alpha, imgtransparent1, 1 - alpha, 0)
|
| 1097 |
+
|
| 1098 |
+
if rotationOld==90:
|
| 1099 |
+
image_new1 = cv2.rotate(image_new1, cv2.ROTATE_90_CLOCKWISE)
|
| 1100 |
+
if rotationOld==180:
|
| 1101 |
+
image_new1 = cv2.rotate(image_new1, cv2.ROTATE_180)
|
| 1102 |
+
if rotationOld==270:
|
| 1103 |
+
image_new1 = cv2.rotate(image_new1, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
| 1104 |
+
page.set_rotation(rotationOld)
|
| 1105 |
+
dbPath='/TSA JOBS/ADR Test'+pdfpath+'Measured Plan/'
|
| 1106 |
+
pdflink= tsadropboxretrieval.uploadanyFile(doc=doc,path=dbPath,pdfname=path) #doc=doc,pdfname=path,pdfpath=pdfpath+'Measured Plan/'
|
| 1107 |
+
dbxTeam=tsadropboxretrieval.ADR_Access_DropboxTeam('user')
|
| 1108 |
+
md, res =dbxTeam.files_download(path= dbPath+path)
|
| 1109 |
+
data = res.content
|
| 1110 |
+
doc=fitz.open("pdf", data)
|
| 1111 |
+
# list1=pd.DataFrame(columns=['content', 'creationDate', 'id', 'modDate', 'name', 'subject', 'title'])
|
| 1112 |
+
list1=pd.DataFrame(columns=['content', 'id', 'subject'])
|
| 1113 |
+
for page in doc:
|
| 1114 |
+
for annot in page.annots():
|
| 1115 |
+
list1.loc[len(list1)] =annot.info
|
| 1116 |
+
|
| 1117 |
+
gc,spreadsheet_service,spreadsheetId, spreadsheet_url , namepathArr=google_sheet_Legend.legendGoogleSheets(SimilarAreaDictionary , path,pdfpath)
|
| 1118 |
+
return imgPerimeter1,image_new1,SimilarAreaDictionary, colorsUsed , spreadsheet_url , spreadsheetId , list1 , pdflink , areas_Perimeters , namepathArr
|
| 1119 |
+
|
| 1120 |
+
######################################################
|
| 1121 |
+
|
| 1122 |
+
def deletemarkups(list1, dbPath , path):
|
| 1123 |
+
'''list1 : original markup pdf
|
| 1124 |
+
list2 : deleted markup pdf
|
| 1125 |
+
deletedrows : deleted markups - difference betw both dfs
|
| 1126 |
+
|
| 1127 |
+
'''
|
| 1128 |
+
|
| 1129 |
+
myDict1=eval(list1)
|
| 1130 |
+
list1=pd.DataFrame(myDict1)
|
| 1131 |
+
|
| 1132 |
+
areastodelete = []
|
| 1133 |
+
perimstodelete=[]
|
| 1134 |
+
|
| 1135 |
+
dbxTeam=tsadropboxretrieval.ADR_Access_DropboxTeam('user')
|
| 1136 |
+
# print('pathhhhh',dbPath+path)
|
| 1137 |
+
md, res =dbxTeam.files_download(path= dbPath+path)
|
| 1138 |
+
data = res.content
|
| 1139 |
+
doc=fitz.open("pdf", data)
|
| 1140 |
+
list2=pd.DataFrame(columns=['content', 'id', 'subject'])
|
| 1141 |
+
# list2=pd.DataFrame(columns=['content', 'creationDate', 'id', 'modDate', 'name', 'subject', 'title'])
|
| 1142 |
+
for page in doc:
|
| 1143 |
+
for annot in page.annots():
|
| 1144 |
+
list2.loc[len(list2)] =annot.info
|
| 1145 |
+
# print(list1)
|
| 1146 |
+
deletedrows=pd.concat([list1,list2]).drop_duplicates(keep=False)
|
| 1147 |
+
|
| 1148 |
+
print(deletedrows,len(deletedrows))
|
| 1149 |
+
flag=0
|
| 1150 |
+
if len(deletedrows)!=0:
|
| 1151 |
+
flag=1
|
| 1152 |
+
deletedrows=deletedrows[['content', 'id', 'subject']]
|
| 1153 |
+
deletedrows = deletedrows.drop(deletedrows.index[deletedrows['content'].str.startswith('Scale')] )#, inplace=True)
|
| 1154 |
+
else:
|
| 1155 |
+
flag=0
|
| 1156 |
+
return deletedrows
|
| 1157 |
+
|
| 1158 |
+
#######################################################
|
| 1159 |
+
|