Marthee commited on
Commit
d938595
·
verified ·
1 Parent(s): 341b3ef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +666 -12
app.py CHANGED
@@ -802,7 +802,38 @@ def openPDF(pdf_path):
802
  # return out
803
 
804
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
805
  def identify_headers_with_openrouterNEWW(pdf_path, model,LLM_prompt, pages_to_check=None, top_margin=0, bottom_margin=0):
 
806
  """Ask an LLM (OpenRouter) to identify headers in the document.
807
  Returns a list of dicts: {text, page, suggested_level, confidence}.
808
  The function sends plain page-line strings to the LLM (including page numbers)
@@ -824,41 +855,150 @@ def identify_headers_with_openrouterNEWW(pdf_path, model,LLM_prompt, pages_to_ch
824
  lines_for_prompt = []
825
  # pgestoRun=20
826
  # logger.info(f"TOC pages to skip: {toc_pages}")
 
827
  logger.info(f"Total pages in document: {len(doc)}")
828
 
829
  # Collect text lines from pages (skip TOC pages)
830
  total_lines = 0
831
- for pno in range(len(doc)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
832
  # if pages_to_check and pno not in pages_to_check:
833
  # continue
834
  # if pno in toc_pages:
835
  # logger.debug(f"Skipping TOC page {pno}")
836
  # continue
837
 
838
- page = doc.load_page(pno)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
839
  page_height = page.rect.height
840
  lines_on_page = 0
841
  text_dict = page.get_text("dict")
842
  lines = []
843
- # y_tolerance = 0.2 # tweak if needed (1–3 usually works)
 
844
  for block in text_dict["blocks"]:
845
  if block["type"] != 0:
846
  continue
847
  for line in block["lines"]:
848
  for span in line["spans"]:
849
  text = span["text"].strip()
850
- if not text:
851
  continue
852
- if text:
853
- # prefix with page for easier mapping back
854
- lines_for_prompt.append(f"PAGE {pno+1}: {text}")
855
- lines_on_page += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
857
  if lines_on_page > 0:
858
  logger.debug(f"Page {pno}: collected {lines_on_page} lines")
859
  total_lines += lines_on_page
860
-
861
  logger.info(f"Total lines collected for LLM: {total_lines}")
 
862
 
863
  if not lines_for_prompt:
864
  logger.warning("No lines collected for prompt")
@@ -1599,7 +1739,521 @@ def extract_section_under_header_tobebilledMultiplePDFS(multiplePDF_Paths,model,
1599
 
1600
 
1601
  return jsons,identified_headers
1602
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603
  def build_subject_body_map(jsons):
1604
  subject_body = {}
1605
 
@@ -1615,8 +2269,8 @@ def build_subject_body_map(jsons):
1615
 
1616
  def identify_headers_and_save_excel(pdf_path, model,LLM_prompt):
1617
  try:
1618
- result = identify_headers_with_openrouterNEWW(pdf_path, model,LLM_prompt)
1619
- jsons = extract_section_under_header_tobebilledMultiplePDFS(pdf_path, model,result)
1620
  if not result:
1621
  df = pd.DataFrame([{
1622
  "text": None,
 
802
  # return out
803
 
804
 
805
+
806
+ def process_document_in_chunks(
807
+ lengthofDoc,
808
+ pdf_path,
809
+ LLM_prompt,
810
+ model,
811
+ chunk_size=15,
812
+
813
+ ):
814
+ total_pages = lengthofDoc
815
+ all_results = []
816
+
817
+ for start in range(0, total_pages, chunk_size):
818
+ end = start + chunk_size
819
+
820
+ logger.info(f"Processing pages {start + 1} → {min(end, total_pages)}")
821
+
822
+ result = identify_headers_with_openrouterNEWW(
823
+ pdf_path=pdf_path,
824
+ model=model,
825
+ LLM_prompt=LLM_prompt,
826
+ pages_to_check=(start, end)
827
+ )
828
+
829
+ if result:
830
+ all_results.extend(result)
831
+
832
+ return all_results
833
+
834
+
835
  def identify_headers_with_openrouterNEWW(pdf_path, model,LLM_prompt, pages_to_check=None, top_margin=0, bottom_margin=0):
836
+
837
  """Ask an LLM (OpenRouter) to identify headers in the document.
838
  Returns a list of dicts: {text, page, suggested_level, confidence}.
839
  The function sends plain page-line strings to the LLM (including page numbers)
 
855
  lines_for_prompt = []
856
  # pgestoRun=20
857
  # logger.info(f"TOC pages to skip: {toc_pages}")
858
+ # logger.info(f"Total pages in document: {len(doc)}")
859
  logger.info(f"Total pages in document: {len(doc)}")
860
 
861
  # Collect text lines from pages (skip TOC pages)
862
  total_lines = 0
863
+
864
+ ArrayofTextWithFormat = []
865
+ total_pages = len(doc)
866
+
867
+ if pages_to_check is None:
868
+ start_page = 0
869
+ end_page = min(15, total_pages)
870
+ else:
871
+ start_page, end_page = pages_to_check
872
+ end_page = min(end_page, total_pages) # 🔑 CRITICAL LINE
873
+
874
+ for pno in range(start_page, end_page):
875
+ page = doc.load_page(pno)
876
+ # # Collect text lines from pages (skip TOC pages)
877
+ # total_lines = 0
878
+ # for pno in range(len(doc)):
879
  # if pages_to_check and pno not in pages_to_check:
880
  # continue
881
  # if pno in toc_pages:
882
  # logger.debug(f"Skipping TOC page {pno}")
883
  # continue
884
 
885
+ # page = doc.load_page(pno)
886
+ # page_height = page.rect.height
887
+ # lines_on_page = 0
888
+ # text_dict = page.get_text("dict")
889
+ # lines = []
890
+ # # y_tolerance = 0.2 # tweak if needed (1–3 usually works)
891
+ # for block in text_dict["blocks"]:
892
+ # if block["type"] != 0:
893
+ # continue
894
+ # for line in block["lines"]:
895
+ # for span in line["spans"]:
896
+ # text = span["text"].strip()
897
+ # if not text:
898
+ # continue
899
+ # if text:
900
+ # # prefix with page for easier mapping back
901
+ # lines_for_prompt.append(f"PAGE {pno+1}: {text}")
902
+ # lines_on_page += 1
903
+
904
+ # if lines_on_page > 0:
905
+ # logger.debug(f"Page {pno}: collected {lines_on_page} lines")
906
+ # total_lines += lines_on_page
907
+
908
+ # logger.info(f"Total lines collected for LLM: {total_lines}")
909
  page_height = page.rect.height
910
  lines_on_page = 0
911
  text_dict = page.get_text("dict")
912
  lines = []
913
+ y_tolerance = 0.5 # tweak if needed (1–3 usually works)
914
+
915
  for block in text_dict["blocks"]:
916
  if block["type"] != 0:
917
  continue
918
  for line in block["lines"]:
919
  for span in line["spans"]:
920
  text = span["text"].strip()
921
+ if not text: # Skip empty text
922
  continue
923
+
924
+ # Extract all formatting attributes
925
+ font = span.get('font')
926
+ size = span.get('size')
927
+ color = span.get('color')
928
+ flags = span.get('flags', 0)
929
+ bbox = span.get("bbox", (0, 0, 0, 0))
930
+ x0, y0, x1, y1 = bbox
931
+
932
+ # Create text format dictionary
933
+ text_format = {
934
+ 'Font': font,
935
+ 'Size': size,
936
+ 'Flags': flags,
937
+ 'Color': color,
938
+ 'Text': text,
939
+ 'BBox': bbox,
940
+ 'Page': pno + 1
941
+ }
942
+
943
+ # Add to ArrayofTextWithFormat
944
+ ArrayofTextWithFormat.append(text_format)
945
+
946
+ # For line grouping (keeping your existing logic)
947
+ matched = False
948
+ for l in lines:
949
+ if abs(l["y"] - y0) <= y_tolerance:
950
+ l["spans"].append((x0, text, font, size, color, flags))
951
+ matched = True
952
+ break
953
+ if not matched:
954
+ lines.append({
955
+ "y": y0,
956
+ "spans": [(x0, text, font, size, color, flags)]
957
+ })
958
+
959
+ lines.sort(key=lambda l: l["y"])
960
 
961
+ # Join text inside each line with formatting info
962
+ final_lines = []
963
+ for l in lines:
964
+ l["spans"].sort(key=lambda s: s[0]) # left → right
965
+
966
+ # Collect all text and formatting for this line
967
+ line_text = " ".join(text for _, text, _, _, _, _ in l["spans"])
968
+
969
+ # Get dominant formatting for the line (based on first span)
970
+ if l["spans"]:
971
+ _, _, font, size, color, flags = l["spans"][0]
972
+
973
+ # Store line with its formatting
974
+ line_with_format = {
975
+ 'text': line_text,
976
+ 'font': font,
977
+ 'size': size,
978
+ 'color': color,
979
+ 'flags': flags,
980
+ 'page': pno + 1,
981
+ 'y_position': l["y"]
982
+ }
983
+ final_lines.append(line_with_format)
984
+
985
+ # Result
986
+ for line_data in final_lines:
987
+ line_text = line_data['text']
988
+ print(line_text)
989
+
990
+ if line_text:
991
+ # Create a formatted string with text properties
992
+ format_info = f"Font: {line_data['font']}, Size: {line_data['size']}, Color: {line_data['color']}"
993
+ lines_for_prompt.append(f"PAGE {pno+1}: {line_text} [{format_info}]")
994
+ lines_on_page += 1
995
+
996
  if lines_on_page > 0:
997
  logger.debug(f"Page {pno}: collected {lines_on_page} lines")
998
  total_lines += lines_on_page
999
+
1000
  logger.info(f"Total lines collected for LLM: {total_lines}")
1001
+
1002
 
1003
  if not lines_for_prompt:
1004
  logger.warning("No lines collected for prompt")
 
1739
 
1740
 
1741
  return jsons,identified_headers
1742
+
1743
+
1744
+
1745
+
1746
+ def testFunction(pdf_path, model,LLM_prompt):
1747
+ Alltexttobebilled=''
1748
+ alltextWithoutNotbilled=''
1749
+ # keywordstoSkip=["installation", "execution", "miscellaneous items", "workmanship", "testing", "labeling"]
1750
+
1751
+ headertoContinue1 = False
1752
+ headertoContinue2=False
1753
+
1754
+ parsed_url = urlparse(pdf_path)
1755
+ filename = os.path.basename(parsed_url.path)
1756
+ filename = unquote(filename) # decode URL-encoded characters
1757
+
1758
+ # Optimized URL handling
1759
+ if pdf_path and ('http' in pdf_path or 'dropbox' in pdf_path):
1760
+ pdf_path = pdf_path.replace('dl=0', 'dl=1')
1761
+
1762
+ # Cache frequently used values
1763
+ response = requests.get(pdf_path)
1764
+ pdf_content = BytesIO(response.content)
1765
+ if not pdf_content:
1766
+ raise ValueError("No valid PDF content found.")
1767
+
1768
+ doc = fitz.open(stream=pdf_content, filetype="pdf")
1769
+ docHighlights = fitz.open(stream=pdf_content, filetype="pdf")
1770
+ parsed_url = urlparse(pdf_path)
1771
+ filename = os.path.basename(parsed_url.path)
1772
+ filename = unquote(filename) # decode URL-encoded characters
1773
+
1774
+ #### Get regular tex font size, style , color
1775
+ most_common_font_size, most_common_color, most_common_font = get_regular_font_size_and_color(doc)
1776
+
1777
+ # Precompute regex patterns
1778
+ dot_pattern = re.compile(r'\.{3,}')
1779
+ url_pattern = re.compile(r'https?://\S+|www\.\S+')
1780
+ highlighted=[]
1781
+ processed_subjects = set() # Initialize at the top of testFunction
1782
+ toc_pages = get_toc_page_numbers(doc)
1783
+ identified_headers=process_document_in_chunks(len(doc), pdf_path,LLM_prompt, model)
1784
+ # identified_headers = identify_headers_with_openrouterNEWW(doc, api_key='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')# ['text', fontsize, page number,y]
1785
+
1786
+ # with open("identified_headers.txt", "w", encoding="utf-8") as f:
1787
+ # json.dump(identified_headers, f, indent=4)
1788
+ # with open("identified_headers.txt", "r", encoding="utf-8") as f:
1789
+ # identified_headers = json.load(f)
1790
+ print(identified_headers)
1791
+ allheaders_LLM=[]
1792
+ for h in identified_headers:
1793
+ if int(h["page"]) in toc_pages:
1794
+ continue
1795
+ if h['text']:
1796
+ allheaders_LLM.append(h['text'])
1797
+
1798
+ headers_json=headers_with_location(doc,identified_headers)
1799
+ headers=filter_headers_outside_toc(headers_json,toc_pages)
1800
+ hierarchy=build_hierarchy_from_llm(headers)
1801
+ # identify_headers_and_save_excel(hierarchy)
1802
+ listofHeaderstoMarkup = get_leaf_headers_with_paths(hierarchy)
1803
+ allchildrenheaders = [normalize_text(item['text']) for item, p in listofHeaderstoMarkup]
1804
+ allchildrenheaders_set = set(allchildrenheaders) # For faster lookups
1805
+ # print('allchildrenheaders_set',allchildrenheaders_set)
1806
+ df = pd.DataFrame(columns=["NBSLink","Subject","Page","Author","Creation Date","Layer",'Code', 'head above 1', "head above 2",'BodyText'])
1807
+ dictionaryNBS={}
1808
+ data_list_JSON = []
1809
+ for heading_to_searchDict,pathss in listofHeaderstoMarkup:
1810
+ heading_to_search = heading_to_searchDict['text']
1811
+ heading_to_searchPageNum = heading_to_searchDict['page']
1812
+ paths=heading_to_searchDict['path']
1813
+ xloc=heading_to_searchDict['x']
1814
+ yloc=heading_to_searchDict['y']
1815
+
1816
+ # Initialize variables
1817
+ headertoContinue1 = False
1818
+ headertoContinue2 = False
1819
+ matched_header_line = None
1820
+ done = False
1821
+ collecting = False
1822
+ collected_lines = []
1823
+ page_highlights = {}
1824
+ current_bbox = {}
1825
+ last_y1s = {}
1826
+ mainHeader = ''
1827
+ subHeader = ''
1828
+ matched_header_line_norm = heading_to_search
1829
+ break_collecting = False
1830
+ heading_norm = normalize_text(heading_to_search)
1831
+ paths_norm = [normalize_text(p) for p in paths[0]] if paths and paths[0] else []
1832
+
1833
+ for page_num in range(heading_to_searchPageNum,len(doc)):
1834
+ if page_num in toc_pages:
1835
+ continue
1836
+ if break_collecting:
1837
+ break
1838
+ page=doc[page_num]
1839
+ page_height = page.rect.height
1840
+ blocks = page.get_text("dict")["blocks"]
1841
+
1842
+ for block in blocks:
1843
+ if break_collecting:
1844
+ break
1845
+
1846
+ lines = block.get("lines", [])
1847
+ i = 0
1848
+ while i < len(lines):
1849
+ if break_collecting:
1850
+ break
1851
+
1852
+ spans = lines[i].get("spans", [])
1853
+ if not spans:
1854
+ i += 1
1855
+ continue
1856
+
1857
+ # y0 = spans[0]["bbox"][1]
1858
+ # y1 = spans[0]["bbox"][3]
1859
+ x0 = spans[0]["bbox"][0] # left
1860
+ x1 = spans[0]["bbox"][2] # right
1861
+ y0 = spans[0]["bbox"][1] # top
1862
+ y1 = spans[0]["bbox"][3] # bottom
1863
+
1864
+ if y0 < top_margin or y1 > (page_height - bottom_margin):
1865
+ i += 1
1866
+ continue
1867
+
1868
+ line_text = get_spaced_text_from_spans(spans).lower()
1869
+ line_text_norm = normalize_text(line_text)
1870
+
1871
+ # Combine with next line if available
1872
+ if i + 1 < len(lines):
1873
+ next_spans = lines[i + 1].get("spans", [])
1874
+ next_line_text = get_spaced_text_from_spans(next_spans).lower()
1875
+ combined_line_norm = normalize_text(line_text + " " + next_line_text)
1876
+ else:
1877
+ combined_line_norm = line_text_norm
1878
+
1879
+ # Check if we should continue processing
1880
+ if combined_line_norm and combined_line_norm in paths[0]:
1881
+
1882
+ headertoContinue1 = combined_line_norm
1883
+ if combined_line_norm and combined_line_norm in paths[-2]:
1884
+
1885
+ headertoContinue2 = combined_line_norm
1886
+ # print('paths',paths)
1887
+
1888
+ # if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
1889
+ # if any(word in paths[-2].lower() for word in keywordstoSkip):
1890
+ # stringtowrite='Not to be billed'
1891
+ # else:
1892
+ stringtowrite='To be billed'
1893
+ if stringtowrite!='To be billed':
1894
+ alltextWithoutNotbilled+= combined_line_norm #################################################
1895
+ # Optimized header matching
1896
+ existsfull = (
1897
+ ( combined_line_norm in allchildrenheaders_set or
1898
+ combined_line_norm in allchildrenheaders ) and heading_to_search in combined_line_norm
1899
+ )
1900
+ # existsfull=False
1901
+ # if xloc==x0 and yloc ==y0:
1902
+ # existsfull=True
1903
+ # New word-based matching
1904
+ current_line_words = set(combined_line_norm.split())
1905
+ heading_words = set(heading_norm.split())
1906
+ all_words_match = current_line_words.issubset(heading_words) and len(current_line_words) > 0
1907
+
1908
+ substring_match = (
1909
+ heading_norm in combined_line_norm or
1910
+ combined_line_norm in heading_norm or
1911
+ all_words_match # Include the new word-based matching
1912
+ )
1913
+ # substring_match = (
1914
+ # heading_norm in combined_line_norm or
1915
+ # combined_line_norm in heading_norm
1916
+ # )
1917
+
1918
+ if ( substring_match and existsfull and not collecting and
1919
+ len(combined_line_norm) > 0 ):#and (headertoContinue1 or headertoContinue2) ):
1920
+
1921
+ # Check header conditions more efficiently
1922
+ # header_spans = [
1923
+ # span for span in spans
1924
+ # if (is_header(span, most_common_font_size, most_common_color, most_common_font) )
1925
+ # # and span['size'] >= subsubheaderFontSize
1926
+ # # and span['size'] < mainHeaderFontSize)
1927
+ # ]
1928
+ if stringtowrite.startswith('To'):
1929
+ collecting = True
1930
+ # matched_header_font_size = max(span["size"] for span in header_spans)
1931
+ Alltexttobebilled+= ' '+ combined_line_norm
1932
+
1933
+ # collected_lines.append(line_text)
1934
+ valid_spans = [span for span in spans if span.get("bbox")]
1935
+
1936
+ if valid_spans:
1937
+ x0s = [span["bbox"][0] for span in valid_spans]
1938
+ x1s = [span["bbox"][2] for span in valid_spans]
1939
+ y0s = [span["bbox"][1] for span in valid_spans]
1940
+ y1s = [span["bbox"][3] for span in valid_spans]
1941
+
1942
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
1943
+
1944
+ if page_num in current_bbox:
1945
+ cb = current_bbox[page_num]
1946
+ current_bbox[page_num] = [
1947
+ min(cb[0], header_bbox[0]),
1948
+ min(cb[1], header_bbox[1]),
1949
+ max(cb[2], header_bbox[2]),
1950
+ max(cb[3], header_bbox[3])
1951
+ ]
1952
+ else:
1953
+ current_bbox[page_num] = header_bbox
1954
+ last_y1s[page_num] = header_bbox[3]
1955
+ x0, y0, x1, y1 = header_bbox
1956
+
1957
+ zoom = 200
1958
+ left = int(x0)
1959
+ top = int(y0)
1960
+ zoom_str = f"{zoom},{left},{top}"
1961
+ pageNumberFound = page_num + 1
1962
+
1963
+ # Build the query parameters
1964
+ params = {
1965
+ 'pdfLink': pdf_path, # Your PDF link
1966
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
1967
+ }
1968
+
1969
+ # URL encode each parameter
1970
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
1971
+
1972
+ # Construct the final encoded link
1973
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
1974
+
1975
+ # Correctly construct the final URL with page and zoom
1976
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
1977
+
1978
+ # Get current date and time
1979
+ now = datetime.now()
1980
+
1981
+ # Format the output
1982
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
1983
+ # Optionally, add the URL to a DataFrame
1984
+
1985
+
1986
+ # Create the data entry only if the subject is unique
1987
+ if heading_to_search not in processed_subjects:
1988
+ data_entry = {
1989
+ "NBSLink": zoom_str,
1990
+ "Subject": heading_to_search,
1991
+ "Page": str(pageNumberFound),
1992
+ "Author": "ADR",
1993
+ "Creation Date": formatted_time,
1994
+ "Layer": "Initial",
1995
+ "Code": stringtowrite,
1996
+ "BodyText": collected_lines,
1997
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] + '/' + heading_to_search.strip().split()[0] + ' in ' + filename
1998
+ }
1999
+
2000
+ # Dynamically add hierarchy paths
2001
+ for i, path_text in enumerate(paths[:-1]):
2002
+ data_entry[f"head above {i+1}"] = path_text
2003
+
2004
+ # Append to the list and mark this subject as processed
2005
+ data_list_JSON.append(data_entry)
2006
+ processed_subjects.add(heading_to_search)
2007
+ else:
2008
+ print(f"Skipping duplicate data entry for Subject: {heading_to_search}")
2009
+
2010
+ # Convert list to JSON
2011
+ json_output = json.dumps(data_list_JSON, indent=4)
2012
+
2013
+ i += 1
2014
+ continue
2015
+ else:
2016
+ if (substring_match and not collecting and
2017
+ len(combined_line_norm) > 0): # and (headertoContinue1 or headertoContinue2) ):
2018
+
2019
+ # Calculate word match percentage
2020
+ word_match_percent = words_match_ratio(heading_norm, combined_line_norm) * 100
2021
+
2022
+ # Check if at least 70% of header words exist in this line
2023
+ meets_word_threshold = word_match_percent >= 100
2024
+
2025
+ # Check header conditions (including word threshold)
2026
+ # header_spans = [
2027
+ # span for span in spans
2028
+ # if (is_header(span, most_common_font_size, most_common_color, most_common_font))
2029
+ # # and span['size'] >= subsubheaderFontSize
2030
+ # # and span['size'] < mainHeaderFontSize)
2031
+ # ]
2032
+
2033
+ if (meets_word_threshold or same_start_word(heading_to_search, combined_line_norm) ) and stringtowrite.startswith('To'):
2034
+ collecting = True
2035
+ # matched_header_font_size = max(span["size"] for span in header_spans)
2036
+ Alltexttobebilled+= ' '+ combined_line_norm
2037
+
2038
+ collected_lines.append(line_text)
2039
+ valid_spans = [span for span in spans if span.get("bbox")]
2040
+
2041
+ if valid_spans:
2042
+ x0s = [span["bbox"][0] for span in valid_spans]
2043
+ x1s = [span["bbox"][2] for span in valid_spans]
2044
+ y0s = [span["bbox"][1] for span in valid_spans]
2045
+ y1s = [span["bbox"][3] for span in valid_spans]
2046
+
2047
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2048
+
2049
+ if page_num in current_bbox:
2050
+ cb = current_bbox[page_num]
2051
+ current_bbox[page_num] = [
2052
+ min(cb[0], header_bbox[0]),
2053
+ min(cb[1], header_bbox[1]),
2054
+ max(cb[2], header_bbox[2]),
2055
+ max(cb[3], header_bbox[3])
2056
+ ]
2057
+ else:
2058
+ current_bbox[page_num] = header_bbox
2059
+
2060
+ last_y1s[page_num] = header_bbox[3]
2061
+ x0, y0, x1, y1 = header_bbox
2062
+ zoom = 200
2063
+ left = int(x0)
2064
+ top = int(y0)
2065
+ zoom_str = f"{zoom},{left},{top}"
2066
+ pageNumberFound = page_num + 1
2067
+
2068
+ # Build the query parameters
2069
+ params = {
2070
+ 'pdfLink': pdf_path, # Your PDF link
2071
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
2072
+ }
2073
+
2074
+ # URL encode each parameter
2075
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
2076
+
2077
+ # Construct the final encoded link
2078
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
2079
+
2080
+ # Correctly construct the final URL with page and zoom
2081
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
2082
+
2083
+ # Get current date and time
2084
+ now = datetime.now()
2085
+
2086
+ # Format the output
2087
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
2088
+ # Optionally, add the URL to a DataFrame
2089
+
2090
+
2091
+ # Create the data entry only if the subject is unique
2092
+ if heading_to_search not in processed_subjects:
2093
+ data_entry = {
2094
+ "NBSLink": zoom_str,
2095
+ "Subject": heading_to_search,
2096
+ "Page": str(pageNumberFound),
2097
+ "Author": "ADR",
2098
+ "Creation Date": formatted_time,
2099
+ "Layer": "Initial",
2100
+ "Code": stringtowrite,
2101
+ "BodyText": collected_lines,
2102
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] + '/' + heading_to_search.strip().split()[0] + ' in ' + filename
2103
+ }
2104
+
2105
+ # Dynamically add hierarchy paths
2106
+ for i, path_text in enumerate(paths[:-1]):
2107
+ data_entry[f"head above {i+1}"] = path_text
2108
+
2109
+ # Append to the list and mark this subject as processed
2110
+ data_list_JSON.append(data_entry)
2111
+ processed_subjects.add(heading_to_search)
2112
+ else:
2113
+ print(f"Skipping duplicate data entry for Subject: {heading_to_search}")
2114
+ # Convert list to JSON
2115
+ json_output = json.dumps(data_list_JSON, indent=4)
2116
+
2117
+
2118
+ i += 2
2119
+ continue
2120
+ if collecting:
2121
+ norm_line = normalize_text(line_text)
2122
+ def normalize(text):
2123
+ if isinstance(text, list):
2124
+ text = " ".join(text)
2125
+ return " ".join(text.lower().split())
2126
+
2127
+ def is_similar(a, b, threshold=0.75):
2128
+ return SequenceMatcher(None, a, b).ratio() >= threshold
2129
+ # Optimized URL check
2130
+ if url_pattern.match(norm_line):
2131
+ line_is_header = False
2132
+ else:
2133
+ line_is_header = any(is_header(span, most_common_font_size, most_common_color, most_common_font,allheaders_LLM) for span in spans)
2134
+ # def normalize(text):
2135
+ # return " ".join(text.lower().split())
2136
+ # line_text = " ".join(span["text"] for span in spans).strip()
2137
+ # line_is_header = any( normalize(line_text) == normalize(header) for header in allheaders_LLM )
2138
+
2139
+
2140
+ # for line_text in lines:
2141
+ # if collecting:
2142
+ # # Join all spans into one line
2143
+ # line_text = " ".join(span["text"] for span in spans).strip()
2144
+ # norm_line = normalize(line_text)
2145
+
2146
+ # # Get max font size in this line
2147
+ # max_font_size = max(span.get("size", 0) for span in spans)
2148
+
2149
+ # # Skip URLs
2150
+ # if url_pattern.match(norm_line):
2151
+ # line_is_header = False
2152
+ # else:
2153
+ # text_matches_header = any(
2154
+ # is_similar(norm_line, normalize(header))
2155
+ # if not isinstance(header, list)
2156
+ # else is_similar(norm_line, normalize(" ".join(header)))
2157
+ # for header in allheaders_LLM
2158
+ # )
2159
+
2160
+ # # ✅ FINAL header condition
2161
+ # line_is_header = text_matches_header and max_font_size > 11
2162
+
2163
+
2164
+ if line_is_header:
2165
+ header_font_size = max(span["size"] for span in spans)
2166
+ is_probably_real_header = (
2167
+ # header_font_size >= matched_header_font_size and
2168
+ # is_header(spans[0], most_common_font_size, most_common_color, most_common_font) and
2169
+ len(line_text.strip()) > 2
2170
+ )
2171
+
2172
+ if (norm_line != matched_header_line_norm and
2173
+ norm_line != heading_norm and
2174
+ is_probably_real_header):
2175
+ if line_text not in heading_norm:
2176
+ collecting = False
2177
+ done = True
2178
+ headertoContinue1 = False
2179
+ headertoContinue2=False
2180
+ for page_num, bbox in current_bbox.items():
2181
+ bbox[3] = last_y1s.get(page_num, bbox[3])
2182
+ page_highlights[page_num] = bbox
2183
+ can_highlight=False
2184
+ if [page_num,bbox] not in highlighted:
2185
+ highlighted.append([page_num,bbox])
2186
+ can_highlight=True
2187
+ if can_highlight:
2188
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
2189
+
2190
+ break_collecting = True
2191
+
2192
+ break
2193
+
2194
+ if break_collecting:
2195
+ break
2196
+
2197
+
2198
+ collected_lines.append(line_text)
2199
+
2200
+ valid_spans = [span for span in spans if span.get("bbox")]
2201
+ if valid_spans:
2202
+ x0s = [span["bbox"][0] for span in valid_spans]
2203
+ x1s = [span["bbox"][2] for span in valid_spans]
2204
+ y0s = [span["bbox"][1] for span in valid_spans]
2205
+ y1s = [span["bbox"][3] for span in valid_spans]
2206
+
2207
+ line_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
2208
+
2209
+ if page_num in current_bbox:
2210
+ cb = current_bbox[page_num]
2211
+ current_bbox[page_num] = [
2212
+ min(cb[0], line_bbox[0]),
2213
+ min(cb[1], line_bbox[1]),
2214
+ max(cb[2], line_bbox[2]),
2215
+ max(cb[3], line_bbox[3])
2216
+ ]
2217
+ else:
2218
+ current_bbox[page_num] = line_bbox
2219
+
2220
+ last_y1s[page_num] = line_bbox[3]
2221
+ i += 1
2222
+
2223
+ if not done:
2224
+ for page_num, bbox in current_bbox.items():
2225
+ bbox[3] = last_y1s.get(page_num, bbox[3])
2226
+ page_highlights[page_num] = bbox
2227
+ # if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
2228
+ # stringtowrite='Not to be billed'
2229
+ # else:
2230
+ stringtowrite='To be billed'
2231
+
2232
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
2233
+
2234
+ print("Current working directory:", os.getcwd())
2235
+ if data_list_JSON and not data_list_JSON[-1]["BodyText"] and collected_lines:
2236
+ data_list_JSON[-1]["BodyText"] = collected_lines[1:] if len(collected_lines) > 0 else []
2237
+ # Final cleanup of the JSON data before returning
2238
+ for entry in data_list_JSON:
2239
+ # Check if BodyText exists and has content
2240
+ if isinstance(entry.get("BodyText"), list) and len(entry["BodyText"]) > 0:
2241
+ # Check if the first line of the body is essentially the same as the Subject
2242
+ first_line = normalize_text(entry["BodyText"][0])
2243
+ subject = normalize_text(entry["Subject"])
2244
+
2245
+ # If they match or the subject is inside the first line, remove it
2246
+ if subject in first_line or first_line in subject:
2247
+ entry["BodyText"] = entry["BodyText"][1:]
2248
+ jsons.append(data_list_JSON)
2249
+ logger.info(f"Markups done! Uploading to dropbox")
2250
+ logger.info(f"Uploaded and Readyy!")
2251
+
2252
+
2253
+ return jsons,identified_headers
2254
+
2255
+
2256
+
2257
  def build_subject_body_map(jsons):
2258
  subject_body = {}
2259
 
 
2269
 
2270
  def identify_headers_and_save_excel(pdf_path, model,LLM_prompt):
2271
  try:
2272
+ # result = identify_headers_with_openrouterNEWW(pdf_path, model,LLM_prompt)
2273
+ jsons,result = testFunction(pdf_path, model,LLM_prompt)
2274
  if not result:
2275
  df = pd.DataFrame([{
2276
  "text": None,