Marthee commited on
Commit
d48c776
·
verified ·
1 Parent(s): bce0410

Update InitialMarkups.py

Browse files
Files changed (1) hide show
  1. InitialMarkups.py +482 -0
InitialMarkups.py CHANGED
@@ -1921,3 +1921,485 @@ def extract_section_under_header_tobebilled2(pdf_path):
1921
 
1922
 
1923
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1921
 
1922
 
1923
 
1924
+
1925
+ def extract_section_under_header_tobebilled2marthe(multiplePDF_Paths):
1926
+ # keywordstoSkip=["installation", "execution", "miscellaneous items", "workmanship", "testing", "labeling"]
1927
+
1928
+ keywords = {'installation', 'execution', 'miscellaneous items', 'workmanship', 'testing', 'labeling'}
1929
+ top_margin = 70
1930
+ bottom_margin = 50
1931
+ arrayofPDFS=multiplePDF_Paths.split(',')
1932
+ print(multiplePDF_Paths)
1933
+ print(arrayofPDFS)
1934
+ df = pd.DataFrame(columns=["PDF Name","NBSLink","Subject","Page","Author","Creation Date","Layer",'Code', 'head above 1', "head above 2","BodyText"])
1935
+ for pdf_path in arrayofPDFS:
1936
+
1937
+ headertoContinue1 = False
1938
+ headertoContinue2=False
1939
+ Alltexttobebilled=''
1940
+
1941
+
1942
+
1943
+ parsed_url = urlparse(pdf_path)
1944
+ filename = os.path.basename(parsed_url.path)
1945
+ filename = unquote(filename) # decode URL-encoded characters
1946
+
1947
+
1948
+ # Optimized URL handling
1949
+ if pdf_path and ('http' in pdf_path or 'dropbox' in pdf_path):
1950
+ pdf_path = pdf_path.replace('dl=0', 'dl=1')
1951
+
1952
+ # Cache frequently used values
1953
+ response = requests.get(pdf_path)
1954
+ pdf_content = BytesIO(response.content)
1955
+ if not pdf_content:
1956
+ raise ValueError("No valid PDF content found.")
1957
+
1958
+ doc = fitz.open(stream=pdf_content, filetype="pdf")
1959
+ docHighlights = fitz.open(stream=pdf_content, filetype="pdf")
1960
+ most_common_font_size, most_common_color, most_common_font = get_regular_font_size_and_color(doc)
1961
+
1962
+ # Precompute regex patterns
1963
+ dot_pattern = re.compile(r'\.{3,}')
1964
+ url_pattern = re.compile(r'https?://\S+|www\.\S+')
1965
+
1966
+ def get_toc_page_numbers(doc, max_pages_to_check=15):
1967
+ toc_pages = []
1968
+ for page_num in range(min(len(doc), max_pages_to_check)):
1969
+ page = doc.load_page(page_num)
1970
+ blocks = page.get_text("dict")["blocks"]
1971
+
1972
+ dot_line_count = 0
1973
+ for block in blocks:
1974
+ for line in block.get("lines", []):
1975
+ line_text = get_spaced_text_from_spans(line["spans"]).strip()
1976
+ if dot_pattern.search(line_text):
1977
+ dot_line_count += 1
1978
+
1979
+ if dot_line_count >= 3:
1980
+ toc_pages.append(page_num)
1981
+
1982
+ return list(range(0, toc_pages[-1] +1)) if toc_pages else toc_pages
1983
+
1984
+ toc_pages = get_toc_page_numbers(doc)
1985
+
1986
+ headers, top_3_font_sizes, smallest_font_size, headersSpans = extract_headers(
1987
+ doc, toc_pages, most_common_font_size, most_common_color, most_common_font, top_margin, bottom_margin
1988
+ )
1989
+
1990
+ hierarchy = build_header_hierarchy(doc, toc_pages, most_common_font_size, most_common_color, most_common_font)
1991
+ listofHeaderstoMarkup = get_leaf_headers_with_paths(hierarchy)
1992
+
1993
+ # Precompute all children headers once
1994
+ allchildrenheaders = [normalize_text(item['text']) for item, p in listofHeaderstoMarkup]
1995
+ allchildrenheaders_set = set(allchildrenheaders) # For faster lookups
1996
+
1997
+
1998
+ dictionaryNBS={}
1999
+ data_list_JSON = []
2000
+ currentgroupname=''
2001
+ if len(top_3_font_sizes)==3:
2002
+ mainHeaderFontSize, subHeaderFontSize, subsubheaderFontSize = top_3_font_sizes
2003
+ elif len(top_3_font_sizes)==2:
2004
+ mainHeaderFontSize= top_3_font_sizes[0]
2005
+ subHeaderFontSize= top_3_font_sizes[1]
2006
+ subsubheaderFontSize= top_3_font_sizes[1]
2007
+
2008
+
2009
+
2010
+ # Preload all pages to avoid repeated loading
2011
+ # pages = [doc.load_page(page_num) for page_num in range(len(doc)) if page_num not in toc_pages]
2012
+
2013
+ for heading_to_searchDict, paths in listofHeaderstoMarkup:
2014
+ heading_to_search = heading_to_searchDict['text']
2015
+ heading_to_searchPageNum = heading_to_searchDict['page']
2016
+
2017
+ # Initialize variables
2018
+ headertoContinue1 = False
2019
+ headertoContinue2 = False
2020
+ matched_header_line = None
2021
+ done = False
2022
+ collecting = False
2023
+ collected_lines = []
2024
+ page_highlights = {}
2025
+ current_bbox = {}
2026
+ last_y1s = {}
2027
+ mainHeader = ''
2028
+ subHeader = ''
2029
+ matched_header_line_norm = heading_to_search
2030
+ break_collecting = False
2031
+ heading_norm = normalize_text(heading_to_search)
2032
+ paths_norm = [normalize_text(p) for p in paths[0]] if paths and paths[0] else []
2033
+ for page_num in range(heading_to_searchPageNum,len(doc)):
2034
+ # print(heading_to_search)
2035
+ if paths[0].strip().lower() != currentgroupname.strip().lower():
2036
+ Alltexttobebilled+= paths[0] +'\n'
2037
+ currentgroupname=paths[0]
2038
+ # print(paths[0])
2039
+
2040
+
2041
+ if page_num in toc_pages:
2042
+ continue
2043
+ if break_collecting:
2044
+ break
2045
+ page=doc[page_num]
2046
+ page_height = page.rect.height
2047
+ blocks = page.get_text("dict")["blocks"]
2048
+
2049
+ for block in blocks:
2050
+ if break_collecting:
2051
+ break
2052
+
2053
+ lines = block.get("lines", [])
2054
+ i = 0
2055
+ while i < len(lines):
2056
+ if break_collecting:
2057
+ break
2058
+
2059
+ spans = lines[i].get("spans", [])
2060
+ if not spans:
2061
+ i += 1
2062
+ continue
2063
+
2064
+ y0 = spans[0]["bbox"][1]
2065
+ y1 = spans[0]["bbox"][3]
2066
+ if y0 < top_margin or y1 > (page_height - bottom_margin):
2067
+ i += 1
2068
+ continue
2069
+
2070
+ line_text = get_spaced_text_from_spans(spans).lower()
2071
+ line_text_norm = normalize_text(line_text)
2072
+
2073
+ # Combine with next line if available
2074
+ if i + 1 < len(lines):
2075
+ next_spans = lines[i + 1].get("spans", [])
2076
+ next_line_text = get_spaced_text_from_spans(next_spans).lower()
2077
+ combined_line_norm = normalize_text(line_text + " " + next_line_text)
2078
+ else:
2079
+ combined_line_norm = line_text_norm
2080
+
2081
+ # Check if we should continue processing
2082
+ if combined_line_norm and combined_line_norm in paths[0]:
2083
+
2084
+ headertoContinue1 = combined_line_norm
2085
+ if combined_line_norm and combined_line_norm in paths[-2]:
2086
+
2087
+ headertoContinue2 = combined_line_norm
2088
+ # if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
2089
+ # last_path = paths[-2].lower()
2090
+ flagstring=False
2091
+ last_word = paths[-2].lower()
2092
+
2093
+ # if any(word in paths[-2].lower() for word in keywordstoSkip):
2094
+ # if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() or 'workmanship' in paths[-2].lower() or 'testing' in paths[-2].lower() or 'labeling' in paths[-2].lower():
2095
+ # if any(keyword in last_word for keyword in keywords):
2096
+ # Precompile regex for faster repeated checks
2097
+ pattern = re.compile(r'installation|execution|miscellaneous items|workmanship|testing|labeling', re.IGNORECASE)
2098
+
2099
+ if pattern.search(last_word):
2100
+ stringtowrite = 'Not to be billed'
2101
+ flagstring = False
2102
+ else:
2103
+ stringtowrite = 'To be billed'
2104
+ flagstring = True
2105
+
2106
+ if flagstring:
2107
+ # Alltexttobebilled+= combined_line_norm #################################################
2108
+ if matched_header_line_norm in combined_line_norm:
2109
+ Alltexttobebilled+='\n'
2110
+ Alltexttobebilled+= ' '+combined_line_norm
2111
+ # Optimized header matching
2112
+ existsfull = (
2113
+ ( combined_line_norm in allchildrenheaders_set or
2114
+ combined_line_norm in allchildrenheaders ) and heading_to_search in combined_line_norm
2115
+ )
2116
+
2117
+ # New word-based matching
2118
+ current_line_words = set(combined_line_norm.split())
2119
+ heading_words = set(heading_norm.split())
2120
+ all_words_match = current_line_words.issubset(heading_words) and len(current_line_words) > 0
2121
+
2122
+ substring_match = (
2123
+ heading_norm in combined_line_norm or
2124
+ combined_line_norm in heading_norm or
2125
+ all_words_match # Include the new word-based matching
2126
+ )
2127
+ # substring_match = (
2128
+ # heading_norm in combined_line_norm or
2129
+ # combined_line_norm in heading_norm
2130
+ # )
2131
+
2132
+ if (substring_match and existsfull and not collecting and
2133
+ len(combined_line_norm) > 0 ):#and (headertoContinue1 or headertoContinue2) ):
2134
+
2135
+ # Check header conditions more efficiently
2136
+ header_spans = [
2137
+ span for span in spans
2138
+ if (is_header(span, most_common_font_size, most_common_color, most_common_font)
2139
+ # and span['size'] >= subsubheaderFontSize
2140
+ and span['size'] < mainHeaderFontSize)
2141
+ ]
2142
+ if header_spans and flagstring:
2143
+ collecting = True
2144
+ # if stringtowrite=='To be billed':
2145
+ # Alltexttobebilled+='\n'
2146
+ matched_header_font_size = max(span["size"] for span in header_spans)
2147
+
2148
+ # collected_lines.append(line_text)
2149
+ valid_spans = [span for span in spans if span.get("bbox")]
2150
+
2151
+ if valid_spans:
2152
+ x0s = [span["bbox"][0] for span in valid_spans]
2153
+ x1s = [span["bbox"][2] for span in valid_spans]
2154
+ y0s = [span["bbox"][1] for span in valid_spans]
2155
+ y1s = [span["bbox"][3] for span in valid_spans]
2156
+
2157
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2158
+
2159
+ if page_num in current_bbox:
2160
+ cb = current_bbox[page_num]
2161
+ current_bbox[page_num] = [
2162
+ min(cb[0], header_bbox[0]),
2163
+ min(cb[1], header_bbox[1]),
2164
+ max(cb[2], header_bbox[2]),
2165
+ max(cb[3], header_bbox[3])
2166
+ ]
2167
+ else:
2168
+ current_bbox[page_num] = header_bbox
2169
+ last_y1s[page_num] = header_bbox[3]
2170
+ x0, y0, x1, y1 = header_bbox
2171
+
2172
+ zoom = 200
2173
+ left = int(x0)
2174
+ top = int(y0)
2175
+ zoom_str = f"{zoom},{left},{top}"
2176
+ pageNumberFound = page_num + 1
2177
+
2178
+ # Build the query parameters
2179
+ params = {
2180
+ 'pdfLink': pdf_path, # Your PDF link
2181
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
2182
+ }
2183
+
2184
+ # URL encode each parameter
2185
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
2186
+
2187
+ # Construct the final encoded link
2188
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
2189
+
2190
+ # Correctly construct the final URL with page and zoom
2191
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
2192
+
2193
+ # Get current date and time
2194
+ now = datetime.now()
2195
+
2196
+ # Format the output
2197
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
2198
+ # Optionally, add the URL to a DataFrame
2199
+
2200
+
2201
+ data_entry = {
2202
+ "PDF Name":filename,
2203
+ "NBSLink": final_url,
2204
+ "Subject": heading_to_search,
2205
+ "Page": str(pageNumberFound),
2206
+ "Author": "ADR",
2207
+ "Creation Date": formatted_time,
2208
+ "Layer": "Initial",
2209
+ "Code": stringtowrite,
2210
+ "head above 1": paths[-2],
2211
+ "head above 2": paths[0],
2212
+ "BodyText":collected_lines,
2213
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] +'/'+ heading_to_search.strip().split()[0] + ' in '+ filename
2214
+ }
2215
+ data_list_JSON.append(data_entry)
2216
+
2217
+ # Convert list to JSON
2218
+ json_output = json.dumps(data_list_JSON, indent=4)
2219
+
2220
+ i += 2
2221
+ continue
2222
+ else:
2223
+ if (substring_match and not collecting and
2224
+ len(combined_line_norm) > 0): # and (headertoContinue1 or headertoContinue2) ):
2225
+
2226
+ # Calculate word match percentage
2227
+ word_match_percent = words_match_ratio(heading_norm, combined_line_norm) * 100
2228
+
2229
+ # Check if at least 70% of header words exist in this line
2230
+ meets_word_threshold = word_match_percent >= 100
2231
+
2232
+ # Check header conditions (including word threshold)
2233
+ header_spans = [
2234
+ span for span in spans
2235
+ if (is_header(span, most_common_font_size, most_common_color, most_common_font)
2236
+ # and span['size'] >= subsubheaderFontSize
2237
+ and span['size'] < mainHeaderFontSize)
2238
+ ]
2239
+
2240
+ if header_spans and (meets_word_threshold or same_start_word(heading_to_search, combined_line_norm) ) and flagstring:
2241
+ collecting = True
2242
+ if flagstring:
2243
+ Alltexttobebilled+='\n'
2244
+ # if stringtowrite=='To be billed':
2245
+ # Alltexttobebilled+= ' '+ combined_line_norm
2246
+ matched_header_font_size = max(span["size"] for span in header_spans)
2247
+
2248
+ collected_lines.append(line_text)
2249
+ valid_spans = [span for span in spans if span.get("bbox")]
2250
+
2251
+ if valid_spans:
2252
+ x0s = [span["bbox"][0] for span in valid_spans]
2253
+ x1s = [span["bbox"][2] for span in valid_spans]
2254
+ y0s = [span["bbox"][1] for span in valid_spans]
2255
+ y1s = [span["bbox"][3] for span in valid_spans]
2256
+
2257
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2258
+
2259
+ if page_num in current_bbox:
2260
+ cb = current_bbox[page_num]
2261
+ current_bbox[page_num] = [
2262
+ min(cb[0], header_bbox[0]),
2263
+ min(cb[1], header_bbox[1]),
2264
+ max(cb[2], header_bbox[2]),
2265
+ max(cb[3], header_bbox[3])
2266
+ ]
2267
+ else:
2268
+ current_bbox[page_num] = header_bbox
2269
+
2270
+ last_y1s[page_num] = header_bbox[3]
2271
+ x0, y0, x1, y1 = header_bbox
2272
+ zoom = 200
2273
+ left = int(x0)
2274
+ top = int(y0)
2275
+ zoom_str = f"{zoom},{left},{top}"
2276
+ pageNumberFound = page_num + 1
2277
+
2278
+ # Build the query parameters
2279
+ params = {
2280
+ 'pdfLink': pdf_path, # Your PDF link
2281
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
2282
+ }
2283
+
2284
+ # URL encode each parameter
2285
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
2286
+
2287
+ # Construct the final encoded link
2288
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
2289
+
2290
+ # Correctly construct the final URL with page and zoom
2291
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
2292
+
2293
+ # Get current date and time
2294
+ now = datetime.now()
2295
+
2296
+ # Format the output
2297
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
2298
+ # Optionally, add the URL to a DataFrame
2299
+
2300
+
2301
+ data_entry = {
2302
+ "PDF Name": filename,
2303
+ "NBSLink": final_url,
2304
+ "Subject": heading_to_search,
2305
+ "Page": str(pageNumberFound),
2306
+ "Author": "ADR",
2307
+ "Creation Date": formatted_time,
2308
+ "Layer": "Initial",
2309
+ "Code": stringtowrite,
2310
+ "head above 1": paths[-2],
2311
+ "head above 2": paths[0],
2312
+ "BodyText":collected_lines,
2313
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] +'/'+ heading_to_search.strip().split()[0] + ' in '+ filename
2314
+ }
2315
+ data_list_JSON.append(data_entry)
2316
+
2317
+ # Convert list to JSON
2318
+ json_output = json.dumps(data_list_JSON, indent=4)
2319
+
2320
+
2321
+ i += 2
2322
+ continue
2323
+ if collecting:
2324
+ norm_line = normalize_text(line_text)
2325
+
2326
+ # Optimized URL check
2327
+ if url_pattern.match(norm_line):
2328
+ line_is_header = False
2329
+ else:
2330
+ line_is_header = any(is_header(span, most_common_font_size, most_common_color, most_common_font) for span in spans)
2331
+
2332
+ if line_is_header:
2333
+ header_font_size = max(span["size"] for span in spans)
2334
+ is_probably_real_header = (
2335
+ header_font_size >= matched_header_font_size and
2336
+ is_header(spans[0], most_common_font_size, most_common_color, most_common_font) and
2337
+ len(line_text.strip()) > 2
2338
+ )
2339
+
2340
+ if (norm_line != matched_header_line_norm and
2341
+ norm_line != heading_norm and
2342
+ is_probably_real_header):
2343
+ if line_text not in heading_norm:
2344
+ collecting = False
2345
+ done = True
2346
+ headertoContinue1 = False
2347
+ headertoContinue2=False
2348
+ for page_num, bbox in current_bbox.items():
2349
+ bbox[3] = last_y1s.get(page_num, bbox[3])
2350
+ page_highlights[page_num] = bbox
2351
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
2352
+
2353
+ break_collecting = True
2354
+ break
2355
+
2356
+ if break_collecting:
2357
+ break
2358
+
2359
+ collected_lines.append(line_text)
2360
+ valid_spans = [span for span in spans if span.get("bbox")]
2361
+ if valid_spans:
2362
+ x0s = [span["bbox"][0] for span in valid_spans]
2363
+ x1s = [span["bbox"][2] for span in valid_spans]
2364
+ y0s = [span["bbox"][1] for span in valid_spans]
2365
+ y1s = [span["bbox"][3] for span in valid_spans]
2366
+
2367
+ line_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2368
+
2369
+ if page_num in current_bbox:
2370
+ cb = current_bbox[page_num]
2371
+ current_bbox[page_num] = [
2372
+ min(cb[0], line_bbox[0]),
2373
+ min(cb[1], line_bbox[1]),
2374
+ max(cb[2], line_bbox[2]),
2375
+ max(cb[3], line_bbox[3])
2376
+ ]
2377
+ else:
2378
+ current_bbox[page_num] = line_bbox
2379
+
2380
+ last_y1s[page_num] = line_bbox[3]
2381
+ i += 1
2382
+
2383
+ if not done:
2384
+ for page_num, bbox in current_bbox.items():
2385
+ bbox[3] = last_y1s.get(page_num, bbox[3])
2386
+ page_highlights[page_num] = bbox
2387
+ if flagstring:
2388
+ # if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
2389
+ stringtowrite='To be billed'
2390
+ else:
2391
+ stringtowrite='Not to be billed'
2392
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
2393
+
2394
+ # docHighlights.save("highlighted_output.pdf", garbage=4, deflate=True)
2395
+
2396
+ pdf_bytes = BytesIO()
2397
+ docHighlights.save(pdf_bytes)
2398
+
2399
+ return pdf_bytes.getvalue(), docHighlights , json_output, Alltexttobebilled , filename
2400
+
2401
+
2402
+
2403
+
2404
+
2405
+