Marthee commited on
Commit
80b83ba
·
verified ·
1 Parent(s): 696e934

Update InitialMarkups.py

Browse files
Files changed (1) hide show
  1. InitialMarkups.py +438 -0
InitialMarkups.py CHANGED
@@ -1476,4 +1476,442 @@ def extract_section_under_header_tobebilledOnly(pdf_path):
1476
 
1477
 
1478
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1479
 
 
1476
 
1477
 
1478
 
1479
+ def extract_section_under_header_tobebilled2(pdf_path):
1480
+ top_margin = 70
1481
+ bottom_margin = 50
1482
+ headertoContinue1 = False
1483
+ headertoContinue2=False
1484
+ Alltexttobebilled=''
1485
+ parsed_url = urlparse(pdf_path)
1486
+ filename = os.path.basename(parsed_url.path)
1487
+ filename = unquote(filename) # decode URL-encoded characters
1488
+
1489
+ # Optimized URL handling
1490
+ if pdf_path and ('http' in pdf_path or 'dropbox' in pdf_path):
1491
+ pdf_path = pdf_path.replace('dl=0', 'dl=1')
1492
+
1493
+ # Cache frequently used values
1494
+ response = requests.get(pdf_path)
1495
+ pdf_content = BytesIO(response.content)
1496
+ if not pdf_content:
1497
+ raise ValueError("No valid PDF content found.")
1498
+
1499
+ doc = fitz.open(stream=pdf_content, filetype="pdf")
1500
+ docHighlights = fitz.open(stream=pdf_content, filetype="pdf")
1501
+ most_common_font_size, most_common_color, most_common_font = get_regular_font_size_and_color(doc)
1502
+
1503
+ # Precompute regex patterns
1504
+ dot_pattern = re.compile(r'\.{3,}')
1505
+ url_pattern = re.compile(r'https?://\S+|www\.\S+')
1506
+
1507
+ def get_toc_page_numbers(doc, max_pages_to_check=15):
1508
+ toc_pages = []
1509
+ for page_num in range(min(len(doc), max_pages_to_check)):
1510
+ page = doc.load_page(page_num)
1511
+ blocks = page.get_text("dict")["blocks"]
1512
+
1513
+ dot_line_count = 0
1514
+ for block in blocks:
1515
+ for line in block.get("lines", []):
1516
+ line_text = get_spaced_text_from_spans(line["spans"]).strip()
1517
+ if dot_pattern.search(line_text):
1518
+ dot_line_count += 1
1519
+
1520
+ if dot_line_count >= 3:
1521
+ toc_pages.append(page_num)
1522
+
1523
+ return list(range(0, toc_pages[-1] +1)) if toc_pages else toc_pages
1524
+
1525
+ toc_pages = get_toc_page_numbers(doc)
1526
+
1527
+ headers, top_3_font_sizes, smallest_font_size, headersSpans = extract_headers(
1528
+ doc, toc_pages, most_common_font_size, most_common_color, most_common_font, top_margin, bottom_margin
1529
+ )
1530
+
1531
+ hierarchy = build_header_hierarchy(doc, toc_pages, most_common_font_size, most_common_color, most_common_font)
1532
+ listofHeaderstoMarkup = get_leaf_headers_with_paths(hierarchy)
1533
+
1534
+ # Precompute all children headers once
1535
+ allchildrenheaders = [normalize_text(item['text']) for item, p in listofHeaderstoMarkup]
1536
+ allchildrenheaders_set = set(allchildrenheaders) # For faster lookups
1537
+
1538
+ df = pd.DataFrame(columns=["NBSLink","Subject","Page","Author","Creation Date","Layer",'Code', 'head above 1', "head above 2"])
1539
+ dictionaryNBS={}
1540
+ data_list_JSON = []
1541
+ currentgroupname=''
1542
+ if len(top_3_font_sizes)==3:
1543
+ mainHeaderFontSize, subHeaderFontSize, subsubheaderFontSize = top_3_font_sizes
1544
+ elif len(top_3_font_sizes)==2:
1545
+ mainHeaderFontSize= top_3_font_sizes[0]
1546
+ subHeaderFontSize= top_3_font_sizes[1]
1547
+ subsubheaderFontSize= top_3_font_sizes[1]
1548
+
1549
+
1550
+
1551
+ # Preload all pages to avoid repeated loading
1552
+ # pages = [doc.load_page(page_num) for page_num in range(len(doc)) if page_num not in toc_pages]
1553
+
1554
+ for heading_to_searchDict, paths in listofHeaderstoMarkup:
1555
+ heading_to_search = heading_to_searchDict['text']
1556
+ heading_to_searchPageNum = heading_to_searchDict['page']
1557
+
1558
+ # Initialize variables
1559
+ headertoContinue1 = False
1560
+ headertoContinue2 = False
1561
+ matched_header_line = None
1562
+ done = False
1563
+ collecting = False
1564
+ collected_lines = []
1565
+ page_highlights = {}
1566
+ current_bbox = {}
1567
+ last_y1s = {}
1568
+ mainHeader = ''
1569
+ subHeader = ''
1570
+ matched_header_line_norm = heading_to_search
1571
+ break_collecting = False
1572
+ heading_norm = normalize_text(heading_to_search)
1573
+ paths_norm = [normalize_text(p) for p in paths[0]] if paths and paths[0] else []
1574
+
1575
+ for page_num in range(heading_to_searchPageNum,len(doc)):
1576
+ print(heading_to_search)
1577
+ if paths[0].strip().lower() != currentgroupname.strip().lower():
1578
+ Alltexttobebilled+=' '+ paths[0]
1579
+ currentgroupname=paths[0]
1580
+ print(paths[0])
1581
+ if page_num in toc_pages:
1582
+ continue
1583
+ if break_collecting:
1584
+ break
1585
+ page=doc[page_num]
1586
+ page_height = page.rect.height
1587
+ blocks = page.get_text("dict")["blocks"]
1588
+
1589
+ for block in blocks:
1590
+ if break_collecting:
1591
+ break
1592
+
1593
+ lines = block.get("lines", [])
1594
+ i = 0
1595
+ while i < len(lines):
1596
+ if break_collecting:
1597
+ break
1598
+
1599
+ spans = lines[i].get("spans", [])
1600
+ if not spans:
1601
+ i += 1
1602
+ continue
1603
+
1604
+ y0 = spans[0]["bbox"][1]
1605
+ y1 = spans[0]["bbox"][3]
1606
+ if y0 < top_margin or y1 > (page_height - bottom_margin):
1607
+ i += 1
1608
+ continue
1609
+
1610
+ line_text = get_spaced_text_from_spans(spans).lower()
1611
+ line_text_norm = normalize_text(line_text)
1612
+
1613
+ # Combine with next line if available
1614
+ if i + 1 < len(lines):
1615
+ next_spans = lines[i + 1].get("spans", [])
1616
+ next_line_text = get_spaced_text_from_spans(next_spans).lower()
1617
+ combined_line_norm = normalize_text(line_text + " " + next_line_text)
1618
+ else:
1619
+ combined_line_norm = line_text_norm
1620
+
1621
+ # Check if we should continue processing
1622
+ if combined_line_norm and combined_line_norm in paths[0]:
1623
+
1624
+ headertoContinue1 = combined_line_norm
1625
+ if combined_line_norm and combined_line_norm in paths[-2]:
1626
+
1627
+ headertoContinue2 = combined_line_norm
1628
+ if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
1629
+ stringtowrite='Not to be billed'
1630
+ else:
1631
+ stringtowrite='To be billed'
1632
+ if stringtowrite!='To be billed':
1633
+ Alltexttobebilled+= combined_line_norm #################################################
1634
+
1635
+ # Optimized header matching
1636
+ existsfull = (
1637
+ ( combined_line_norm in allchildrenheaders_set or
1638
+ combined_line_norm in allchildrenheaders ) and heading_to_search in combined_line_norm
1639
+ )
1640
+
1641
+ # New word-based matching
1642
+ current_line_words = set(combined_line_norm.split())
1643
+ heading_words = set(heading_norm.split())
1644
+ all_words_match = current_line_words.issubset(heading_words) and len(current_line_words) > 0
1645
+
1646
+ substring_match = (
1647
+ heading_norm in combined_line_norm or
1648
+ combined_line_norm in heading_norm or
1649
+ all_words_match # Include the new word-based matching
1650
+ )
1651
+ # substring_match = (
1652
+ # heading_norm in combined_line_norm or
1653
+ # combined_line_norm in heading_norm
1654
+ # )
1655
+
1656
+ if (substring_match and existsfull and not collecting and
1657
+ len(combined_line_norm) > 0 ):#and (headertoContinue1 or headertoContinue2) ):
1658
+
1659
+ # Check header conditions more efficiently
1660
+ header_spans = [
1661
+ span for span in spans
1662
+ if (is_header(span, most_common_font_size, most_common_color, most_common_font)
1663
+ # and span['size'] >= subsubheaderFontSize
1664
+ and span['size'] < mainHeaderFontSize)
1665
+ ]
1666
+ if header_spans:
1667
+ collecting = True
1668
+ Alltexttobebilled+= ' '+ combined_line_norm
1669
+ matched_header_font_size = max(span["size"] for span in header_spans)
1670
+
1671
+ collected_lines.append(line_text)
1672
+ valid_spans = [span for span in spans if span.get("bbox")]
1673
+
1674
+ if valid_spans:
1675
+ x0s = [span["bbox"][0] for span in valid_spans]
1676
+ x1s = [span["bbox"][2] for span in valid_spans]
1677
+ y0s = [span["bbox"][1] for span in valid_spans]
1678
+ y1s = [span["bbox"][3] for span in valid_spans]
1679
+
1680
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
1681
+
1682
+ if page_num in current_bbox:
1683
+ cb = current_bbox[page_num]
1684
+ current_bbox[page_num] = [
1685
+ min(cb[0], header_bbox[0]),
1686
+ min(cb[1], header_bbox[1]),
1687
+ max(cb[2], header_bbox[2]),
1688
+ max(cb[3], header_bbox[3])
1689
+ ]
1690
+ else:
1691
+ current_bbox[page_num] = header_bbox
1692
+ last_y1s[page_num] = header_bbox[3]
1693
+ x0, y0, x1, y1 = header_bbox
1694
+
1695
+ zoom = 200
1696
+ left = int(x0)
1697
+ top = int(y0)
1698
+ zoom_str = f"{zoom},{left},{top}"
1699
+ pageNumberFound = page_num + 1
1700
+
1701
+ # Build the query parameters
1702
+ params = {
1703
+ 'pdfLink': pdf_path, # Your PDF link
1704
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
1705
+ }
1706
+
1707
+ # URL encode each parameter
1708
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
1709
+
1710
+ # Construct the final encoded link
1711
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
1712
+
1713
+ # Correctly construct the final URL with page and zoom
1714
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
1715
+
1716
+ # Get current date and time
1717
+ now = datetime.now()
1718
+
1719
+ # Format the output
1720
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
1721
+ # Optionally, add the URL to a DataFrame
1722
+
1723
+
1724
+ data_entry = {
1725
+ "NBSLink": final_url,
1726
+ "Subject": heading_to_search,
1727
+ "Page": str(pageNumberFound),
1728
+ "Author": "ADR",
1729
+ "Creation Date": formatted_time,
1730
+ "Layer": "Initial",
1731
+ "Code": stringtowrite,
1732
+ "head above 1": paths[-2],
1733
+ "head above 2": paths[0],
1734
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] +'/'+ heading_to_search.strip().split()[0] + ' in '+ filename
1735
+ }
1736
+ data_list_JSON.append(data_entry)
1737
+
1738
+ # Convert list to JSON
1739
+ json_output = json.dumps(data_list_JSON, indent=4)
1740
+
1741
+ i += 2
1742
+ continue
1743
+ else:
1744
+ if (substring_match and not collecting and
1745
+ len(combined_line_norm) > 0): # and (headertoContinue1 or headertoContinue2) ):
1746
+
1747
+ # Calculate word match percentage
1748
+ word_match_percent = words_match_ratio(heading_norm, combined_line_norm) * 100
1749
+
1750
+ # Check if at least 70% of header words exist in this line
1751
+ meets_word_threshold = word_match_percent >= 100
1752
+
1753
+ # Check header conditions (including word threshold)
1754
+ header_spans = [
1755
+ span for span in spans
1756
+ if (is_header(span, most_common_font_size, most_common_color, most_common_font)
1757
+ # and span['size'] >= subsubheaderFontSize
1758
+ and span['size'] < mainHeaderFontSize)
1759
+ ]
1760
+
1761
+ if header_spans and (meets_word_threshold or same_start_word(heading_to_search, combined_line_norm) ):
1762
+ collecting = True
1763
+ Alltexttobebilled+= ' '+ combined_line_norm
1764
+ matched_header_font_size = max(span["size"] for span in header_spans)
1765
+
1766
+ collected_lines.append(line_text)
1767
+ valid_spans = [span for span in spans if span.get("bbox")]
1768
+
1769
+ if valid_spans:
1770
+ x0s = [span["bbox"][0] for span in valid_spans]
1771
+ x1s = [span["bbox"][2] for span in valid_spans]
1772
+ y0s = [span["bbox"][1] for span in valid_spans]
1773
+ y1s = [span["bbox"][3] for span in valid_spans]
1774
+
1775
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
1776
+
1777
+ if page_num in current_bbox:
1778
+ cb = current_bbox[page_num]
1779
+ current_bbox[page_num] = [
1780
+ min(cb[0], header_bbox[0]),
1781
+ min(cb[1], header_bbox[1]),
1782
+ max(cb[2], header_bbox[2]),
1783
+ max(cb[3], header_bbox[3])
1784
+ ]
1785
+ else:
1786
+ current_bbox[page_num] = header_bbox
1787
+
1788
+ last_y1s[page_num] = header_bbox[3]
1789
+ x0, y0, x1, y1 = header_bbox
1790
+ zoom = 200
1791
+ left = int(x0)
1792
+ top = int(y0)
1793
+ zoom_str = f"{zoom},{left},{top}"
1794
+ pageNumberFound = page_num + 1
1795
+
1796
+ # Build the query parameters
1797
+ params = {
1798
+ 'pdfLink': pdf_path, # Your PDF link
1799
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
1800
+ }
1801
+
1802
+ # URL encode each parameter
1803
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
1804
+
1805
+ # Construct the final encoded link
1806
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
1807
+
1808
+ # Correctly construct the final URL with page and zoom
1809
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
1810
+
1811
+ # Get current date and time
1812
+ now = datetime.now()
1813
+
1814
+ # Format the output
1815
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
1816
+ # Optionally, add the URL to a DataFrame
1817
+
1818
+
1819
+ data_entry = {
1820
+ "NBSLink": final_url,
1821
+ "Subject": heading_to_search,
1822
+ "Page": str(pageNumberFound),
1823
+ "Author": "ADR",
1824
+ "Creation Date": formatted_time,
1825
+ "Layer": "Initial",
1826
+ "Code": stringtowrite,
1827
+ "head above 1": paths[-2],
1828
+ "head above 2": paths[0],
1829
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] +'/'+ heading_to_search.strip().split()[0] + ' in '+ filename
1830
+ }
1831
+ data_list_JSON.append(data_entry)
1832
+
1833
+ # Convert list to JSON
1834
+ json_output = json.dumps(data_list_JSON, indent=4)
1835
+
1836
+
1837
+ i += 2
1838
+ continue
1839
+ if collecting:
1840
+ norm_line = normalize_text(line_text)
1841
+
1842
+ # Optimized URL check
1843
+ if url_pattern.match(norm_line):
1844
+ line_is_header = False
1845
+ else:
1846
+ line_is_header = any(is_header(span, most_common_font_size, most_common_color, most_common_font) for span in spans)
1847
+
1848
+ if line_is_header:
1849
+ header_font_size = max(span["size"] for span in spans)
1850
+ is_probably_real_header = (
1851
+ header_font_size >= matched_header_font_size and
1852
+ is_header(spans[0], most_common_font_size, most_common_color, most_common_font) and
1853
+ len(line_text.strip()) > 2
1854
+ )
1855
+
1856
+ if (norm_line != matched_header_line_norm and
1857
+ norm_line != heading_norm and
1858
+ is_probably_real_header):
1859
+ if line_text not in heading_norm:
1860
+ collecting = False
1861
+ done = True
1862
+ headertoContinue1 = False
1863
+ headertoContinue2=False
1864
+ for page_num, bbox in current_bbox.items():
1865
+ bbox[3] = last_y1s.get(page_num, bbox[3])
1866
+ page_highlights[page_num] = bbox
1867
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
1868
+
1869
+ break_collecting = True
1870
+ break
1871
+
1872
+ if break_collecting:
1873
+ break
1874
+
1875
+ collected_lines.append(line_text)
1876
+ valid_spans = [span for span in spans if span.get("bbox")]
1877
+ if valid_spans:
1878
+ x0s = [span["bbox"][0] for span in valid_spans]
1879
+ x1s = [span["bbox"][2] for span in valid_spans]
1880
+ y0s = [span["bbox"][1] for span in valid_spans]
1881
+ y1s = [span["bbox"][3] for span in valid_spans]
1882
+
1883
+ line_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
1884
+
1885
+ if page_num in current_bbox:
1886
+ cb = current_bbox[page_num]
1887
+ current_bbox[page_num] = [
1888
+ min(cb[0], line_bbox[0]),
1889
+ min(cb[1], line_bbox[1]),
1890
+ max(cb[2], line_bbox[2]),
1891
+ max(cb[3], line_bbox[3])
1892
+ ]
1893
+ else:
1894
+ current_bbox[page_num] = line_bbox
1895
+
1896
+ last_y1s[page_num] = line_bbox[3]
1897
+ i += 1
1898
+
1899
+ if not done:
1900
+ for page_num, bbox in current_bbox.items():
1901
+ bbox[3] = last_y1s.get(page_num, bbox[3])
1902
+ page_highlights[page_num] = bbox
1903
+ if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
1904
+ stringtowrite='Not to be billed'
1905
+ else:
1906
+ stringtowrite='To be billed'
1907
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
1908
+
1909
+ # docHighlights.save("highlighted_output.pdf", garbage=4, deflate=True)
1910
+
1911
+ pdf_bytes = BytesIO()
1912
+ docHighlights.save(pdf_bytes)
1913
+ return pdf_bytes.getvalue(), docHighlights , json_output , Alltexttobebilled
1914
+
1915
+
1916
+
1917