Marthee commited on
Commit
c2a2cf9
·
verified ·
1 Parent(s): 3b804ce

Update InitialMarkups.py

Browse files
Files changed (1) hide show
  1. InitialMarkups.py +432 -0
InitialMarkups.py CHANGED
@@ -1855,4 +1855,436 @@ def extract_section_under_headerRawan(pdf_path,headingjson,pagenum=0,incominghea
1855
 
1856
 
1857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1858
 
 
1855
 
1856
 
1857
 
1858
+
1859
+ ########################################################################################################################################################
1860
+ ########################################################################################################################################################
1861
+ def extract_section_under_header_FullDoc_WithoutNotBilled(pdf_path):
1862
+ Alltexttobebilled=''
1863
+ top_margin = 70
1864
+ bottom_margin = 50
1865
+ headertoContinue1 = False
1866
+ headertoContinue2=False
1867
+
1868
+ parsed_url = urlparse(pdf_path)
1869
+ filename = os.path.basename(parsed_url.path)
1870
+ filename = unquote(filename) # decode URL-encoded characters
1871
+
1872
+ # Optimized URL handling
1873
+ if pdf_path and ('http' in pdf_path or 'dropbox' in pdf_path):
1874
+ pdf_path = pdf_path.replace('dl=0', 'dl=1')
1875
+
1876
+ # Cache frequently used values
1877
+ response = requests.get(pdf_path)
1878
+ pdf_content = BytesIO(response.content)
1879
+ if not pdf_content:
1880
+ raise ValueError("No valid PDF content found.")
1881
+
1882
+ doc = fitz.open(stream=pdf_content, filetype="pdf")
1883
+ docHighlights = fitz.open(stream=pdf_content, filetype="pdf")
1884
+ most_common_font_size, most_common_color, most_common_font = get_regular_font_size_and_color(doc)
1885
+
1886
+ # Precompute regex patterns
1887
+ dot_pattern = re.compile(r'\.{3,}')
1888
+ url_pattern = re.compile(r'https?://\S+|www\.\S+')
1889
+
1890
+ def get_toc_page_numbers(doc, max_pages_to_check=15):
1891
+ toc_pages = []
1892
+ for page_num in range(min(len(doc), max_pages_to_check)):
1893
+ page = doc.load_page(page_num)
1894
+ blocks = page.get_text("dict")["blocks"]
1895
+
1896
+ dot_line_count = 0
1897
+ for block in blocks:
1898
+ for line in block.get("lines", []):
1899
+ line_text = get_spaced_text_from_spans(line["spans"]).strip()
1900
+ if dot_pattern.search(line_text):
1901
+ dot_line_count += 1
1902
+
1903
+ if dot_line_count >= 3:
1904
+ toc_pages.append(page_num)
1905
+
1906
+ return list(range(0, toc_pages[-1] +1)) if toc_pages else toc_pages
1907
+
1908
+ toc_pages = get_toc_page_numbers(doc)
1909
+
1910
+ headers, top_3_font_sizes, smallest_font_size, headersSpans = extract_headers(
1911
+ doc, toc_pages, most_common_font_size, most_common_color, most_common_font, top_margin, bottom_margin
1912
+ )
1913
+
1914
+ hierarchy = build_header_hierarchy(doc, toc_pages, most_common_font_size, most_common_color, most_common_font)
1915
+ listofHeaderstoMarkup = get_leaf_headers_with_paths(hierarchy)
1916
+
1917
+ # Precompute all children headers once
1918
+ allchildrenheaders = [normalize_text(item['text']) for item, p in listofHeaderstoMarkup]
1919
+ allchildrenheaders_set = set(allchildrenheaders) # For faster lookups
1920
+
1921
+ df = pd.DataFrame(columns=["NBSLink","Subject","Page","Author","Creation Date","Layer",'Code', 'head above 1', "head above 2"])
1922
+ dictionaryNBS={}
1923
+ data_list_JSON = []
1924
+
1925
+ if len(top_3_font_sizes)==3:
1926
+ mainHeaderFontSize, subHeaderFontSize, subsubheaderFontSize = top_3_font_sizes
1927
+ elif len(top_3_font_sizes)==2:
1928
+ mainHeaderFontSize= top_3_font_sizes[0]
1929
+ subHeaderFontSize= top_3_font_sizes[1]
1930
+ subsubheaderFontSize= top_3_font_sizes[1]
1931
+
1932
+
1933
+
1934
+ # Preload all pages to avoid repeated loading
1935
+ # pages = [doc.load_page(page_num) for page_num in range(len(doc)) if page_num not in toc_pages]
1936
+
1937
+ for heading_to_searchDict, paths in listofHeaderstoMarkup:
1938
+
1939
+ heading_to_search = heading_to_searchDict['text']
1940
+ heading_to_searchPageNum = heading_to_searchDict['page']
1941
+
1942
+ # Initialize variables
1943
+ headertoContinue1 = False
1944
+ headertoContinue2 = False
1945
+ matched_header_line = None
1946
+ done = False
1947
+ collecting = False
1948
+ collected_lines = []
1949
+ page_highlights = {}
1950
+ current_bbox = {}
1951
+ last_y1s = {}
1952
+ mainHeader = ''
1953
+ subHeader = ''
1954
+ matched_header_line_norm = heading_to_search
1955
+ break_collecting = False
1956
+ heading_norm = normalize_text(heading_to_search)
1957
+ paths_norm = [normalize_text(p) for p in paths[0]] if paths and paths[0] else []
1958
+
1959
+ for page_num in range(0,len(doc)):
1960
+ if page_num in toc_pages:
1961
+ continue
1962
+ if break_collecting:
1963
+ break
1964
+ page=doc[page_num]
1965
+ page_height = page.rect.height
1966
+ blocks = page.get_text("dict")["blocks"]
1967
+
1968
+ for block in blocks:
1969
+ if break_collecting:
1970
+ break
1971
+
1972
+ lines = block.get("lines", [])
1973
+ i = 0
1974
+ while i < len(lines):
1975
+ if break_collecting:
1976
+ break
1977
+
1978
+ spans = lines[i].get("spans", [])
1979
+ if not spans:
1980
+ i += 1
1981
+ continue
1982
+
1983
+ y0 = spans[0]["bbox"][1]
1984
+ y1 = spans[0]["bbox"][3]
1985
+ if y0 < top_margin or y1 > (page_height - bottom_margin):
1986
+ i += 1
1987
+ continue
1988
+
1989
+ line_text = get_spaced_text_from_spans(spans).lower()
1990
+ line_text_norm = normalize_text(line_text)
1991
+
1992
+ # Combine with next line if available
1993
+ if i + 1 < len(lines):
1994
+ next_spans = lines[i + 1].get("spans", [])
1995
+ next_line_text = get_spaced_text_from_spans(next_spans).lower()
1996
+ combined_line_norm = normalize_text(line_text + " " + next_line_text)
1997
+ else:
1998
+ combined_line_norm = line_text_norm
1999
+
2000
+ # Check if we should continue processing
2001
+ if combined_line_norm and combined_line_norm in paths[0]:
2002
+
2003
+ headertoContinue1 = combined_line_norm
2004
+ if combined_line_norm and combined_line_norm in paths[-2]:
2005
+ headertoContinue2 = combined_line_norm
2006
+ if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
2007
+ stringtowrite='Not to be billed'
2008
+ else:
2009
+ stringtowrite='To be billed'
2010
+ # Optimized header matching
2011
+ if stringtowrite=='To be billed':
2012
+ Alltexttobebilled+= combined_line_norm #################################################
2013
+ existsfull = (
2014
+ ( combined_line_norm in allchildrenheaders_set or
2015
+ combined_line_norm in allchildrenheaders ) and heading_to_search in combined_line_norm
2016
+ )
2017
+
2018
+ # New word-based matching
2019
+ current_line_words = set(combined_line_norm.split())
2020
+ heading_words = set(heading_norm.split())
2021
+ all_words_match = current_line_words.issubset(heading_words) and len(current_line_words) > 0
2022
+
2023
+ substring_match = (
2024
+ heading_norm in combined_line_norm or
2025
+ combined_line_norm in heading_norm or
2026
+ all_words_match # Include the new word-based matching
2027
+ )
2028
+ # substring_match = (
2029
+ # heading_norm in combined_line_norm or
2030
+ # combined_line_norm in heading_norm
2031
+ # )
2032
+
2033
+ if (substring_match and existsfull and not collecting and
2034
+ len(combined_line_norm) > 0 ):#and (headertoContinue1 or headertoContinue2) ):
2035
+
2036
+ # Check header conditions more efficiently
2037
+ header_spans = [
2038
+ span for span in spans
2039
+ if (is_header(span, most_common_font_size, most_common_color, most_common_font)
2040
+ # and span['size'] >= subsubheaderFontSize
2041
+ and span['size'] < mainHeaderFontSize)
2042
+ ]
2043
+ if header_spans and stringtowrite.startswith('To'):
2044
+ collecting = True
2045
+ matched_header_font_size = max(span["size"] for span in header_spans)
2046
+ Alltexttobebilled+= ' '+ combined_line_norm
2047
+ collected_lines.append(line_text)
2048
+ valid_spans = [span for span in spans if span.get("bbox")]
2049
+
2050
+ if valid_spans:
2051
+ x0s = [span["bbox"][0] for span in valid_spans]
2052
+ x1s = [span["bbox"][2] for span in valid_spans]
2053
+ y0s = [span["bbox"][1] for span in valid_spans]
2054
+ y1s = [span["bbox"][3] for span in valid_spans]
2055
+
2056
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2057
+
2058
+ if page_num in current_bbox:
2059
+ cb = current_bbox[page_num]
2060
+ current_bbox[page_num] = [
2061
+ min(cb[0], header_bbox[0]),
2062
+ min(cb[1], header_bbox[1]),
2063
+ max(cb[2], header_bbox[2]),
2064
+ max(cb[3], header_bbox[3])
2065
+ ]
2066
+ else:
2067
+ current_bbox[page_num] = header_bbox
2068
+ last_y1s[page_num] = header_bbox[3]
2069
+ x0, y0, x1, y1 = header_bbox
2070
+
2071
+ zoom = 200
2072
+ left = int(x0)
2073
+ top = int(y0)
2074
+ zoom_str = f"{zoom},{left},{top}"
2075
+ pageNumberFound = page_num + 1
2076
+
2077
+ # Build the query parameters
2078
+ params = {
2079
+ 'pdfLink': pdf_path, # Your PDF link
2080
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
2081
+ }
2082
+
2083
+ # URL encode each parameter
2084
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
2085
+
2086
+ # Construct the final encoded link
2087
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
2088
+
2089
+ # Correctly construct the final URL with page and zoom
2090
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
2091
+
2092
+ # Get current date and time
2093
+ now = datetime.now()
2094
+
2095
+ # Format the output
2096
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
2097
+ # Optionally, add the URL to a DataFrame
2098
+
2099
+
2100
+ data_entry = {
2101
+ "NBSLink": final_url,
2102
+ "Subject": heading_to_search,
2103
+ "Page": str(pageNumberFound),
2104
+ "Author": "ADR",
2105
+ "Creation Date": formatted_time,
2106
+ "Layer": "Initial",
2107
+ "Code": stringtowrite,
2108
+ "head above 1": paths[-2],
2109
+ "head above 2": paths[0],
2110
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] +'/'+ heading_to_search.strip().split()[0] + ' in '+ filename
2111
+ }
2112
+ data_list_JSON.append(data_entry)
2113
+
2114
+ # Convert list to JSON
2115
+ json_output = json.dumps(data_list_JSON, indent=4)
2116
+
2117
+ i += 2
2118
+ continue
2119
+ else:
2120
+ if (substring_match and not collecting and
2121
+ len(combined_line_norm) > 0): # and (headertoContinue1 or headertoContinue2) ):
2122
+
2123
+ # Calculate word match percentage
2124
+ word_match_percent = words_match_ratio(heading_norm, combined_line_norm) * 100
2125
+
2126
+ # Check if at least 70% of header words exist in this line
2127
+ meets_word_threshold = word_match_percent >= 100
2128
+
2129
+ # Check header conditions (including word threshold)
2130
+ header_spans = [
2131
+ span for span in spans
2132
+ if (is_header(span, most_common_font_size, most_common_color, most_common_font)
2133
+ # and span['size'] >= subsubheaderFontSize
2134
+ and span['size'] < mainHeaderFontSize)
2135
+ ]
2136
+
2137
+ if header_spans and (meets_word_threshold or same_start_word(heading_to_search, combined_line_norm) ) and stringtowrite.startswith('To'):
2138
+ collecting = True
2139
+ matched_header_font_size = max(span["size"] for span in header_spans)
2140
+ Alltexttobebilled+= ' '+ combined_line_norm
2141
+ collected_lines.append(line_text)
2142
+ valid_spans = [span for span in spans if span.get("bbox")]
2143
+
2144
+ if valid_spans:
2145
+ x0s = [span["bbox"][0] for span in valid_spans]
2146
+ x1s = [span["bbox"][2] for span in valid_spans]
2147
+ y0s = [span["bbox"][1] for span in valid_spans]
2148
+ y1s = [span["bbox"][3] for span in valid_spans]
2149
+
2150
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2151
+
2152
+ if page_num in current_bbox:
2153
+ cb = current_bbox[page_num]
2154
+ current_bbox[page_num] = [
2155
+ min(cb[0], header_bbox[0]),
2156
+ min(cb[1], header_bbox[1]),
2157
+ max(cb[2], header_bbox[2]),
2158
+ max(cb[3], header_bbox[3])
2159
+ ]
2160
+ else:
2161
+ current_bbox[page_num] = header_bbox
2162
+
2163
+ last_y1s[page_num] = header_bbox[3]
2164
+ x0, y0, x1, y1 = header_bbox
2165
+ zoom = 200
2166
+ left = int(x0)
2167
+ top = int(y0)
2168
+ zoom_str = f"{zoom},{left},{top}"
2169
+ pageNumberFound = page_num + 1
2170
+
2171
+ # Build the query parameters
2172
+ params = {
2173
+ 'pdfLink': pdf_path, # Your PDF link
2174
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
2175
+ }
2176
+
2177
+ # URL encode each parameter
2178
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
2179
+
2180
+ # Construct the final encoded link
2181
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
2182
+
2183
+ # Correctly construct the final URL with page and zoom
2184
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
2185
+
2186
+ # Get current date and time
2187
+ now = datetime.now()
2188
+
2189
+ # Format the output
2190
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
2191
+ # Optionally, add the URL to a DataFrame
2192
+
2193
+
2194
+ data_entry = {
2195
+ "NBSLink": final_url,
2196
+ "Subject": heading_to_search,
2197
+ "Page": str(pageNumberFound),
2198
+ "Author": "ADR",
2199
+ "Creation Date": formatted_time,
2200
+ "Layer": "Initial",
2201
+ "Code": stringtowrite,
2202
+ "head above 1": paths[-2],
2203
+ "head above 2": paths[0],
2204
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] +'/'+ heading_to_search.strip().split()[0] + ' in '+ filename
2205
+ }
2206
+ data_list_JSON.append(data_entry)
2207
+
2208
+ # Convert list to JSON
2209
+ json_output = json.dumps(data_list_JSON, indent=4)
2210
+
2211
+
2212
+ i += 2
2213
+ continue
2214
+ if collecting:
2215
+ norm_line = normalize_text(line_text)
2216
+
2217
+ # Optimized URL check
2218
+ if url_pattern.match(norm_line):
2219
+ line_is_header = False
2220
+ else:
2221
+ line_is_header = any(is_header(span, most_common_font_size, most_common_color, most_common_font) for span in spans)
2222
+
2223
+ if line_is_header:
2224
+ header_font_size = max(span["size"] for span in spans)
2225
+ is_probably_real_header = (
2226
+ header_font_size >= matched_header_font_size and
2227
+ is_header(spans[0], most_common_font_size, most_common_color, most_common_font) and
2228
+ len(line_text.strip()) > 2
2229
+ )
2230
+
2231
+ if (norm_line != matched_header_line_norm and
2232
+ norm_line != heading_norm and
2233
+ is_probably_real_header):
2234
+ if line_text not in heading_norm:
2235
+ collecting = False
2236
+ done = True
2237
+ headertoContinue1 = False
2238
+ headertoContinue2=False
2239
+ for page_num, bbox in current_bbox.items():
2240
+ bbox[3] = last_y1s.get(page_num, bbox[3])
2241
+ page_highlights[page_num] = bbox
2242
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
2243
+
2244
+ break_collecting = True
2245
+ break
2246
+
2247
+ if break_collecting:
2248
+ break
2249
+
2250
+ collected_lines.append(line_text)
2251
+ valid_spans = [span for span in spans if span.get("bbox")]
2252
+ if valid_spans:
2253
+ x0s = [span["bbox"][0] for span in valid_spans]
2254
+ x1s = [span["bbox"][2] for span in valid_spans]
2255
+ y0s = [span["bbox"][1] for span in valid_spans]
2256
+ y1s = [span["bbox"][3] for span in valid_spans]
2257
+
2258
+ line_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2259
+
2260
+ if page_num in current_bbox:
2261
+ cb = current_bbox[page_num]
2262
+ current_bbox[page_num] = [
2263
+ min(cb[0], line_bbox[0]),
2264
+ min(cb[1], line_bbox[1]),
2265
+ max(cb[2], line_bbox[2]),
2266
+ max(cb[3], line_bbox[3])
2267
+ ]
2268
+ else:
2269
+ current_bbox[page_num] = line_bbox
2270
+
2271
+ last_y1s[page_num] = line_bbox[3]
2272
+ i += 1
2273
+
2274
+ if not done:
2275
+ for page_num, bbox in current_bbox.items():
2276
+ bbox[3] = last_y1s.get(page_num, bbox[3])
2277
+ page_highlights[page_num] = bbox
2278
+ if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
2279
+ stringtowrite='Not to be billed'
2280
+ else:
2281
+ stringtowrite='To be billed'
2282
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
2283
+
2284
+ # docHighlights.save("highlighted_output.pdf", garbage=4, deflate=True)
2285
+ print('Alltexttobebilled')
2286
+ pdf_bytes = BytesIO()
2287
+ docHighlights.save(pdf_bytes)
2288
+ return pdf_bytes.getvalue(), docHighlights , json_output , Alltexttobebilled
2289
+
2290