Marthee commited on
Commit
b458a1d
·
verified ·
1 Parent(s): c614262

Upload 2 files

Browse files
InitialMarkupsLLM2.py ADDED
The diff for this file is too large to render. See raw diff
 
InitialMarkupsLLM_huggingFace.py ADDED
@@ -0,0 +1,1913 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Copy of FindSpecsTrial(Retrieving+boundingBoxes)-InitialMarkups(ALL)_CleanedUp.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/12XfVkmKmN3oVjHhLVE0_GgkftgArFEK2
8
+ """
9
+ baselink='https://adr.trevorsadd.co.uk/api/view-pdf?'
10
+
11
+ newlink='https://adr.trevorsadd.co.uk/api/view-highlight?'
12
+ tobebilledonlyLink='https://adr.trevorsadd.co.uk/api/view-pdf-tobebilled?'
13
+
14
+
15
+
16
+ import time
17
+ from datetime import datetime, timezone
18
+ from difflib import SequenceMatcher
19
+
20
+ import ast
21
+
22
+ from urllib.parse import urlparse, unquote
23
+ import os
24
+ from io import BytesIO
25
+ import re
26
+ import requests
27
+ import pandas as pd
28
+ import fitz # PyMuPDF
29
+ import re
30
+ import urllib.parse
31
+ import pandas as pd
32
+ import math
33
+ import random
34
+ import json
35
+ from datetime import datetime
36
+ from collections import defaultdict, Counter
37
+ import difflib
38
+ from fuzzywuzzy import fuzz
39
+ import copy
40
+ import json
41
+ # import tsadropboxretrieval
42
+
43
+ import pandas as pd
44
+ import os
45
+
46
+ import urllib.parse
47
+ import logging
48
+
49
+ # Set up logging to see everything
50
+ logging.basicConfig(
51
+ level=logging.DEBUG,
52
+ format='%(asctime)s - %(levelname)s - %(message)s',
53
+ handlers=[
54
+ logging.StreamHandler(), # Print to console
55
+ logging.FileHandler('debug.log', mode='w') # Save to file
56
+ ]
57
+ )
58
+
59
+ logger = logging.getLogger(__name__)
60
+ top_margin = 50
61
+ bottom_margin = 75
62
+
63
+ def changepdflinks(json_data, pdf_path):
64
+ print('ll , ' ,json_data,pdf_path)
65
+ # base_viewer_link = "https://findconsole-initialmarkups.hf.space/view-pdf?"
66
+
67
+ updated_json = []
68
+ for entry in json_data:
69
+ # Extract needed fields
70
+ zoom_str = entry.get("NBSLink", "")
71
+ page_str=entry.get("Page","")
72
+
73
+ # Encode the pdf link safely for URL usage
74
+ encoded_pdf_link = urllib.parse.quote(pdf_path, safe='')
75
+
76
+ # Construct the final link
77
+ final_url = f"{baselink}pdfLink={encoded_pdf_link}#page={str(page_str)}&zoom={zoom_str}"
78
+
79
+ # Replace the old NBSLink value with the full URL
80
+ entry["NBSLink"] = final_url
81
+
82
+ updated_json.append(entry)
83
+
84
+ return updated_json
85
+
86
+
87
+ import unicodedata
88
+ import re
89
+
90
+ def normalize_text(text):
91
+ if not text:
92
+ return ""
93
+
94
+ text = unicodedata.normalize("NFKC", text)
95
+ text = text.replace("\u00a0", " ") # non-breaking space
96
+ text = re.sub(r"-\s*\n\s*", "", text) # de-hyphenation
97
+ text = re.sub(r"\s+", " ", text) # collapse whitespace
98
+ return text.strip().lower()
99
+
100
+
101
+ def getLocation_of_header(doc, headerText, expected_page=None):
102
+ locations = []
103
+
104
+ # pages = (
105
+ # [(expected_page, doc.load_page(expected_page))]
106
+ # if expected_page is not None
107
+ # else enumerate(doc)
108
+ # )
109
+ expectedpageNorm=expected_page
110
+
111
+ page=doc[expectedpageNorm]
112
+ # for page_number, page in pages:
113
+ page_height = page.rect.height
114
+ rects = page.search_for(headerText)
115
+
116
+ for r in rects:
117
+ y = r.y0
118
+
119
+ # Skip headers in top or bottom margin
120
+ if y <= top_margin:
121
+ continue
122
+ if y >= page_height - bottom_margin:
123
+ continue
124
+
125
+ locations.append({
126
+ "headerText":headerText,
127
+ "page": expectedpageNorm,
128
+ "x": r.x0,
129
+ "y": y
130
+ })
131
+ return locations
132
+
133
+ def filter_headers_outside_toc(headers, toc_pages):
134
+ toc_pages_set = set(toc_pages)
135
+
136
+ filtered = []
137
+ for h in headers:
138
+ page = h[2]
139
+ y = h[3]
140
+
141
+ # Skip invalid / fallback headers
142
+ if page is None or y is None:
143
+ continue
144
+
145
+ # Skip headers inside TOC pages
146
+ if page in toc_pages_set:
147
+ continue
148
+
149
+ filtered.append(h)
150
+
151
+ return filtered
152
+
153
+
154
+ def headers_with_location(doc, llm_headers):
155
+ """
156
+ Converts LLM headers into:
157
+ [text, font_size, page, y, suggested_level, confidence]
158
+ Always include all headers, even if location not found.
159
+ """
160
+ headersJson = []
161
+
162
+ for h in llm_headers:
163
+ text = h["text"]
164
+ llm_page = h["page"]
165
+
166
+ # Attempt to locate the header on the page
167
+ locations = getLocation_of_header(doc, text,llm_page)
168
+
169
+ if locations:
170
+ for loc in locations:
171
+ page = doc.load_page(loc["page"])
172
+ fontsize = None
173
+
174
+ for block in page.get_text("dict")["blocks"]:
175
+ if block.get("type") != 0:
176
+ continue
177
+ for line in block.get("lines", []):
178
+ line_text = "".join(span["text"] for span in line["spans"]).strip()
179
+ if normalize(line_text) == normalize(text):
180
+ fontsize = line["spans"][0]["size"]
181
+ break
182
+ if fontsize:
183
+ break
184
+ entry = [
185
+ text,
186
+ fontsize,
187
+ loc["page"],
188
+ loc["y"],
189
+ loc["x"], ################## added x #################
190
+ h["suggested_level"],
191
+
192
+ ]
193
+ if entry not in headersJson:
194
+ headersJson.append(entry)
195
+ return headersJson
196
+
197
+ def build_hierarchy_from_llm(headers):
198
+ nodes = []
199
+
200
+ # -------------------------
201
+ # 1. Build nodes safely
202
+ # -------------------------
203
+ for h in headers:
204
+ # print("headerrrrrrrrrrrrrrr", h)
205
+
206
+ if len(h) < 5:
207
+ continue
208
+
209
+ text, size, page, y,x, level = h
210
+
211
+ if level is None:
212
+ continue
213
+
214
+ try:
215
+ level = int(level)
216
+ except Exception:
217
+ continue
218
+
219
+ node = {
220
+ "text": text,
221
+ "page": page if page is not None else -1,
222
+ "y": y if y is not None else -1,
223
+ "x": x if x is not None else -1,
224
+ "size": size,
225
+ "bold": False,
226
+ "color": None,
227
+ "font": None,
228
+ "children": [],
229
+ "is_numbered": is_numbered(text),
230
+ "original_size": size,
231
+ "norm_text": normalize(text),
232
+ "level": level,
233
+ }
234
+
235
+ nodes.append(node)
236
+
237
+ if not nodes:
238
+ return []
239
+
240
+ # -------------------------
241
+ # 2. Sort top-to-bottom
242
+ # -------------------------
243
+ nodes.sort(key=lambda x: (x["page"], x["y"]))
244
+
245
+ # -------------------------
246
+ # 3. NORMALIZE LEVELS
247
+ # (smallest level → 0)
248
+ # -------------------------
249
+ min_level = min(n["level"] for n in nodes)
250
+
251
+ for n in nodes:
252
+ n["level"] -= min_level
253
+
254
+ # -------------------------
255
+ # 4. Build hierarchy
256
+ # -------------------------
257
+ root = []
258
+ stack = []
259
+ added_level0 = set()
260
+
261
+ for header in nodes:
262
+ lvl = header["level"]
263
+
264
+ if lvl < 0:
265
+ continue
266
+
267
+ # De-duplicate true top-level headers
268
+ if lvl == 0:
269
+ key = (header["norm_text"], header["page"])
270
+ if key in added_level0:
271
+ continue
272
+ added_level0.add(key)
273
+
274
+ while stack and stack[-1]["level"] >= lvl:
275
+ stack.pop()
276
+
277
+ parent = stack[-1] if stack else None
278
+
279
+ if parent:
280
+ header["path"] = parent["path"] + [header["norm_text"]]
281
+ parent["children"].append(header)
282
+ else:
283
+ header["path"] = [header["norm_text"]]
284
+ root.append(header)
285
+
286
+ stack.append(header)
287
+
288
+ # -------------------------
289
+ # 5. Enforce nesting sanity
290
+ # -------------------------
291
+ def enforce_nesting(node_list, parent_level=-1):
292
+ for node in node_list:
293
+ if node["level"] <= parent_level:
294
+ node["level"] = parent_level + 1
295
+ enforce_nesting(node["children"], node["level"])
296
+
297
+ enforce_nesting(root)
298
+
299
+ # -------------------------
300
+ # 6. OPTIONAL cleanup
301
+ # (only if real level-0s exist)
302
+ # -------------------------
303
+ if any(h["level"] == 0 for h in root):
304
+ root = [
305
+ h for h in root
306
+ if not (h["level"] == 0 and not h["children"])
307
+ ]
308
+
309
+ # -------------------------
310
+ # 7. Final pass
311
+ # -------------------------
312
+ header_tree = enforce_level_hierarchy(root)
313
+
314
+ return header_tree
315
+
316
+
317
+
318
+ def get_toc_page_numbers(doc, max_pages_to_check=15):
319
+ toc_pages = []
320
+
321
+ # 1. Existing Dot Pattern (looking for ".....")
322
+ dot_pattern = re.compile(r"\.{2,}")
323
+
324
+ # 2. NEW: Title Pattern (looking for specific headers)
325
+ # ^ and $ ensure the line is JUST that word (ignoring "The contents of the bag...")
326
+ # re.IGNORECASE makes it match "CONTENTS", "Contents", "Index", etc.
327
+ title_pattern = re.compile(r"^\s*(table of contents|contents|index)\s*$", re.IGNORECASE)
328
+
329
+ for page_num in range(min(len(doc), max_pages_to_check)):
330
+ page = doc.load_page(page_num)
331
+ blocks = page.get_text("dict")["blocks"]
332
+
333
+ dot_line_count = 0
334
+ has_toc_title = False
335
+
336
+ for block in blocks:
337
+ for line in block.get("lines", []):
338
+ # Extract text from spans (mimicking get_spaced_text_from_spans)
339
+ line_text = " ".join([span["text"] for span in line["spans"]]).strip()
340
+
341
+ # CHECK A: Does the line have dots?
342
+ if dot_pattern.search(line_text):
343
+ dot_line_count += 1
344
+
345
+ # CHECK B: Is this line a Title?
346
+ # We check this early in the loop. If a page has a title "Contents",
347
+ # we mark it immediately.
348
+ if title_pattern.match(line_text):
349
+ has_toc_title = True
350
+
351
+ # CONDITION:
352
+ # It is a TOC page if it has a Title OR if it has dot leaders.
353
+ # We use 'dot_line_count >= 1' to be sensitive to single-item lists.
354
+ if has_toc_title or dot_line_count >= 1:
355
+ toc_pages.append(page_num)
356
+
357
+ # RETURN:
358
+ # If we found TOC pages (e.g., [2, 3]), we return [0, 1, 2, 3]
359
+ # This covers the cover page, inside cover, and the TOC itself.
360
+ if toc_pages:
361
+ last_toc_page = toc_pages[0]
362
+ return list(range(0, last_toc_page + 1))
363
+
364
+ return [] # Return empty list if nothing found
365
+
366
+
367
+
368
+ def get_regular_font_size_and_color(doc):
369
+ font_sizes = []
370
+ colors = []
371
+ fonts = []
372
+
373
+ # Loop through all pages
374
+ for page_num in range(len(doc)):
375
+ page = doc.load_page(page_num)
376
+ for span in page.get_text("dict")["blocks"]:
377
+ if "lines" in span:
378
+ for line in span["lines"]:
379
+ for span in line["spans"]:
380
+ font_sizes.append(span['size'])
381
+ colors.append(span['color'])
382
+ fonts.append(span['font'])
383
+
384
+ # Get the most common font size, color, and font
385
+ most_common_font_size = Counter(font_sizes).most_common(1)[0][0] if font_sizes else None
386
+ most_common_color = Counter(colors).most_common(1)[0][0] if colors else None
387
+ most_common_font = Counter(fonts).most_common(1)[0][0] if fonts else None
388
+
389
+ return most_common_font_size, most_common_color, most_common_font
390
+
391
+ def normalize_text(text):
392
+ if text is None:
393
+ return ""
394
+ return re.sub(r'\s+', ' ', text.strip().lower())
395
+
396
+ def get_spaced_text_from_spans(spans):
397
+ return normalize_text(" ".join(span["text"].strip() for span in spans))
398
+
399
+ def is_header(span, most_common_font_size, most_common_color, most_common_font,allheadersLLM):
400
+ fontname = span.get("font", "").lower()
401
+ # is_italic = "italic" in fontname or "oblique" in fontname
402
+ isheader=False
403
+ is_bold = "bold" in fontname or span.get("bold", False)
404
+ # print('TEXT to CHECK',span['text'] )
405
+ if span['text'] in allheadersLLM: # normalize text in both span[text]
406
+ isheader=True
407
+ return (
408
+ (
409
+ span["size"] > most_common_font_size or
410
+ # span["font"].lower() != most_common_font.lower() or
411
+ (isheader and span["size"] > most_common_font_size )
412
+ )
413
+ )
414
+
415
+ def add_span_to_nearest_group(span_y, grouped_dict, pageNum=None, threshold=0.5):
416
+ for (p, y) in grouped_dict:
417
+ if pageNum is not None and p != pageNum:
418
+ continue
419
+ if abs(y - span_y) <= threshold:
420
+ return (p, y)
421
+ return (pageNum, span_y)
422
+
423
+ def extract_headers(doc, toc_pages, most_common_font_size, most_common_color, most_common_font, top_margin, bottom_margin):
424
+
425
+ grouped_headers = defaultdict(list)
426
+ spans = []
427
+ line_merge_threshold = 1.5 # Maximum vertical distance between lines to consider as part of same header
428
+
429
+ for pageNum in range(len(doc)):
430
+ if pageNum in toc_pages:
431
+ continue
432
+ page = doc.load_page(pageNum)
433
+ page_height = page.rect.height
434
+ text_instances = page.get_text("dict")
435
+
436
+ # First pass: collect all potential header spans
437
+ potential_header_spans = []
438
+ for block in text_instances['blocks']:
439
+ if block['type'] != 0:
440
+ continue
441
+
442
+ for line in block['lines']:
443
+ for span in line['spans']:
444
+ span_y0 = span['bbox'][1]
445
+ span_y1 = span['bbox'][3]
446
+
447
+ if span_y0 < top_margin or span_y1 > (page_height - bottom_margin):
448
+ continue
449
+
450
+ span_text = normalize_text(span.get('text', ''))
451
+ if not span_text:
452
+ continue
453
+ if span_text.startswith('http://www') or span_text.startswith('www'):
454
+ continue
455
+ if any((
456
+ 'page' in span_text,
457
+ not re.search(r'[a-z0-9]', span_text),
458
+ 'end of section' in span_text,
459
+ re.search(r'page\s+\d+\s+of\s+\d+', span_text),
460
+ re.search(r'\b(?:\d{1,2}[/-])?\d{1,2}[/-]\d{2,4}\b', span_text),
461
+ # re.search(r'\b(?:jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)', span_text),
462
+ 'specification:' in span_text
463
+ )):
464
+ continue
465
+
466
+ cleaned_text = re.sub(r'[.\-]{4,}.*$', '', span_text).strip()
467
+ cleaned_text = normalize_text(cleaned_text)
468
+
469
+ if is_header(span, most_common_font_size, most_common_color, most_common_font):
470
+ potential_header_spans.append({
471
+ 'text': cleaned_text,
472
+ 'size': span['size'],
473
+ 'pageNum': pageNum,
474
+ 'y0': span_y0,
475
+ 'y1': span_y1,
476
+ 'x0': span['bbox'][0],
477
+ 'x1': span['bbox'][2],
478
+ 'span': span
479
+ })
480
+
481
+ # Sort spans by vertical position (top to bottom)
482
+ potential_header_spans.sort(key=lambda s: (s['pageNum'], s['y0']))
483
+
484
+ # Second pass: group spans that are vertically close and likely part of same header
485
+ i = 0
486
+ while i < len(potential_header_spans):
487
+ current = potential_header_spans[i]
488
+ header_text = current['text']
489
+ header_size = current['size']
490
+ header_page = current['pageNum']
491
+ min_y = current['y0']
492
+ max_y = current['y1']
493
+ spans_group = [current['span']]
494
+
495
+ # Look ahead to find adjacent lines that might be part of same header
496
+ j = i + 1
497
+ while j < len(potential_header_spans):
498
+ next_span = potential_header_spans[j]
499
+ # Check if on same page and vertically close with similar styling
500
+ if (next_span['pageNum'] == header_page and
501
+ next_span['y0'] - max_y < line_merge_threshold and
502
+ abs(next_span['size'] - header_size) < 0.5):
503
+ header_text += " " + next_span['text']
504
+ max_y = next_span['y1']
505
+ spans_group.append(next_span['span'])
506
+ j += 1
507
+ else:
508
+ break
509
+
510
+ # Add the merged header
511
+ grouped_headers[(header_page, min_y)].append({
512
+ "text": header_text.strip(),
513
+ "size": header_size,
514
+ "pageNum": header_page,
515
+ "spans": spans_group
516
+ })
517
+ spans.extend(spans_group)
518
+ i = j # Skip the spans we've already processed
519
+
520
+ # Prepare final headers list
521
+ headers = []
522
+ for (pageNum, y), header_groups in sorted(grouped_headers.items()):
523
+ for group in header_groups:
524
+ headers.append([
525
+ group['text'],
526
+ group['size'],
527
+ group['pageNum'],
528
+ y
529
+ ])
530
+
531
+ font_sizes = [size for _, size, _, _ in headers]
532
+ font_size_counts = Counter(font_sizes)
533
+
534
+ # Filter font sizes that appear at least 3 times
535
+ valid_font_sizes = [size for size, count in font_size_counts.items() if count >= 1]
536
+
537
+ # Sort in descending order
538
+ valid_font_sizes_sorted = sorted(valid_font_sizes, reverse=True)
539
+
540
+ # If only 2 sizes, repeat the second one
541
+ if len(valid_font_sizes_sorted) == 2:
542
+ top_3_font_sizes = [valid_font_sizes_sorted[0], valid_font_sizes_sorted[1], valid_font_sizes_sorted[1]]
543
+ else:
544
+ top_3_font_sizes = valid_font_sizes_sorted[:3]
545
+
546
+ # Get the smallest font size among valid ones
547
+ smallest_font_size = min(valid_font_sizes) if valid_font_sizes else None
548
+
549
+ return headers, top_3_font_sizes, smallest_font_size, spans
550
+
551
+
552
+ def process_document_in_chunks(
553
+ doc,
554
+ model,
555
+ LLM_prompt,
556
+ chunk_size=13,
557
+ ):
558
+ total_pages = len(doc)
559
+ all_results = []
560
+
561
+ for start in range(0, total_pages, chunk_size):
562
+ end = start + chunk_size
563
+
564
+ logger.info(f"Processing pages {start + 1} → {min(end, total_pages)}")
565
+
566
+ result = identify_headers_with_openrouterNEWW(
567
+ doc=doc,
568
+ model=model,
569
+ LLM_prompt=LLM_prompt,
570
+ pages_to_check=(start, end)
571
+ )
572
+ # page 1 -> 15 1,2,3
573
+ # page 16 : header 1
574
+ #
575
+
576
+ if result:
577
+ all_results.extend(result)
578
+
579
+ return all_results
580
+
581
+ def identify_hierarchy_levels_openrouter(allheadersLLM, model,LLMpromptHierarchy, top_margin=0, bottom_margin=0):
582
+ """Ask an LLM (OpenRouter) to identify headers in the document.
583
+ Returns a list of dicts: {text, page, suggested_level, confidence}.
584
+ The function sends plain page-line strings to the LLM (including page numbers)
585
+ and asks for a JSON array containing only header lines with suggested levels.
586
+ """
587
+ logger.info("=" * 80)
588
+ logger.info("STARTING IDENTIFY_HEADERS_WITH_OPENROUTER")
589
+ # logger.info(f"PDF Path: {pdf_path}")
590
+ logger.info(f"Model: {model}")
591
+ # logger.info(f"LLM Prompt: {LLM_prompt[:200]}..." if len(LLM_prompt) > 200 else f"LLM Prompt: {LLM_prompt}")
592
+
593
+ # doc = openPDF(pdf_path)
594
+ api_key = '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'
595
+ if api_key is None:
596
+ api_key = os.getenv("OPENROUTER_API_KEY") or None
597
+
598
+ model = str(model)
599
+ lines_for_prompt = []
600
+ total_lines = len(allheadersLLM)
601
+
602
+
603
+ lines_for_prompt = []
604
+ # keep a list of pages in same order as lines_for_prompt
605
+ pages_for_prompt = []
606
+
607
+ for item in allheadersLLM:
608
+ # if isinstance(item, dict):
609
+ t = item[0]
610
+ page = item[1]
611
+ if t is not None:
612
+ lines_for_prompt.append(t)
613
+ pages_for_prompt.append(page)
614
+
615
+
616
+ logger.info(f"Total lines collected for LLM: {total_lines}")
617
+
618
+ if not lines_for_prompt:
619
+ logger.warning("No lines collected for prompt")
620
+ return []
621
+
622
+ # Log sample of lines
623
+ logger.info("Sample lines (first 10):")
624
+ for i, line in enumerate(lines_for_prompt[:10]):
625
+ logger.info(f" {i}: {line}")
626
+
627
+ prompt =LLMpromptHierarchy+ "\n\nLines:\n" + "\n".join(lines_for_prompt)
628
+
629
+ logger.debug(f"Full prompt length: {len(prompt)} characters")
630
+ # Changed: Print entire prompt, not truncated
631
+ print("=" * 80)
632
+ print("FULL LLM PROMPT:")
633
+ print(prompt)
634
+ print("=" * 80)
635
+
636
+ # Also log to file
637
+ try:
638
+ with open("full_prompt.txt", "w", encoding="utf-8") as f:
639
+ f.write(prompt)
640
+ logger.info("Full prompt saved to full_prompt.txt")
641
+ except Exception as e:
642
+ logger.error(f"Could not save prompt to file: {e}")
643
+
644
+ if not api_key:
645
+ # No API key: return empty so caller can fallback to heuristics
646
+ logger.error("No API key provided")
647
+ return []
648
+
649
+ url = "https://openrouter.ai/api/v1/chat/completions"
650
+
651
+ # Unix timestamp (seconds since epoch)
652
+ unix_timestamp = int(time.time())
653
+
654
+ # Current datetime in ISO format (UTC)
655
+ current_time = datetime.now(timezone.utc).isoformat()
656
+ # Build headers following the OpenRouter example
657
+ headers = {
658
+ "Authorization": f"Bearer {api_key}",
659
+ "Content-Type": "application/json",
660
+ "HTTP-Referer": os.getenv("OPENROUTER_REFERER", ""),
661
+ "X-Title": os.getenv("OPENROUTER_X_TITLE", ""),
662
+ # "X-Request-Timestamp": str(unix_timestamp),
663
+ # "X-Request-Datetime": current_time,
664
+ }
665
+
666
+
667
+ # Log request details (without exposing full API key)
668
+ logger.info(f"Making request to OpenRouter with model: {model}")
669
+ logger.debug(f"Headers (API key masked): { {k: '***' if k == 'Authorization' else v for k, v in headers.items()} }")
670
+
671
+ # Wrap the prompt as the example 'content' array expected by OpenRouter
672
+ body = {
673
+ "model": model,
674
+ "messages": [
675
+ {
676
+ "role": "user",
677
+ "content": [
678
+ {"type": "text", "text": prompt}
679
+ ]
680
+ }
681
+ ]
682
+ }
683
+ # print(f"Request sent at: {current_time}")
684
+
685
+ # print(f"Unix timestamp: {unix_timestamp}")
686
+ # Debug: log request body (truncated) and write raw response for inspection
687
+ try:
688
+ # Changed: Log full body (excluding prompt text which is already logged)
689
+ logger.debug(f"Request body (without prompt text): { {k: v if k != 'messages' else '[...prompt...]' for k, v in body.items()} }")
690
+
691
+ # Removed timeout parameter
692
+ resp = requests.post(
693
+ url=url,
694
+ headers=headers,
695
+ data=json.dumps(body)
696
+ )
697
+
698
+ logger.info(f"HTTP Response Status: {resp.status_code}")
699
+ resp.raise_for_status()
700
+
701
+ resp_text = resp.text
702
+ # Changed: Print entire response
703
+ print("=" * 80)
704
+ print("FULL LLM RESPONSE:")
705
+ print(resp_text)
706
+ print("=" * 80)
707
+
708
+ logger.info(f"LLM raw response length: {len(resp_text)}")
709
+
710
+ # Save raw response for offline inspection
711
+ try:
712
+ with open("llm_debug.json", "w", encoding="utf-8") as fh:
713
+ fh.write(resp_text)
714
+ logger.info("Raw response saved to llm_debug.json")
715
+ except Exception as e:
716
+ logger.error(f"Warning: could not write llm_debug.json: {e}")
717
+
718
+ rj = resp.json()
719
+ logger.info(f"LLM parsed response type: {type(rj)}")
720
+ if isinstance(rj, dict):
721
+ logger.debug(f"Response keys: {list(rj.keys())}")
722
+
723
+ except requests.exceptions.RequestException as e:
724
+ logger.error(f"HTTP request failed: {repr(e)}")
725
+ return []
726
+ except Exception as e:
727
+ logger.error(f"LLM call failed: {repr(e)}")
728
+ return []
729
+
730
+ # Extract textual reply robustly
731
+ text_reply = None
732
+ if isinstance(rj, dict):
733
+ choices = rj.get('choices') or []
734
+ logger.debug(f"Number of choices in response: {len(choices)}")
735
+
736
+ if choices:
737
+ for i, c in enumerate(choices):
738
+ logger.debug(f"Choice {i}: {c}")
739
+
740
+ c0 = choices[0]
741
+ msg = c0.get('message') or c0.get('delta') or {}
742
+ content = msg.get('content')
743
+
744
+ if isinstance(content, list):
745
+ logger.debug(f"Content is a list with {len(content)} items")
746
+ for idx, c in enumerate(content):
747
+ if c.get('type') == 'text' and c.get('text'):
748
+ text_reply = c.get('text')
749
+ logger.debug(f"Found text reply in content[{idx}], length: {len(text_reply)}")
750
+ break
751
+ elif isinstance(content, str):
752
+ text_reply = content
753
+ logger.debug(f"Content is string, length: {len(text_reply)}")
754
+ elif isinstance(msg, dict) and msg.get('content') and isinstance(msg.get('content'), dict):
755
+ text_reply = msg.get('content').get('text')
756
+ logger.debug(f"Found text in nested content dict")
757
+
758
+ # Fallback extraction
759
+ if not text_reply:
760
+ logger.debug("Trying fallback extraction from choices")
761
+ for c in rj.get('choices', []):
762
+ if isinstance(c.get('text'), str):
763
+ text_reply = c.get('text')
764
+ logger.debug(f"Found text reply in choice.text, length: {len(text_reply)}")
765
+ break
766
+
767
+ if not text_reply:
768
+ logger.error("Could not extract text reply from response")
769
+ # Changed: Print the entire response structure for debugging
770
+ print("=" * 80)
771
+ print("FAILED TO EXTRACT TEXT REPLY. FULL RESPONSE STRUCTURE:")
772
+ print(json.dumps(rj, indent=2))
773
+ print("=" * 80)
774
+ return []
775
+
776
+ # Changed: Print the extracted text reply
777
+ print("=" * 80)
778
+ print("EXTRACTED TEXT REPLY:")
779
+ print(text_reply)
780
+ print("=" * 80)
781
+
782
+ logger.info(f"Extracted text reply length: {len(text_reply)}")
783
+ logger.debug(f"First 500 chars of reply: {text_reply[:500]}...")
784
+
785
+ s = text_reply.strip()
786
+ start = s.find('[')
787
+ end = s.rfind(']')
788
+ js = s[start:end+1] if start != -1 and end != -1 else s
789
+
790
+ logger.debug(f"Looking for JSON array: start={start}, end={end}")
791
+ logger.debug(f"Extracted JSON string (first 500 chars): {js[:500]}...")
792
+
793
+ try:
794
+ parsed = json.loads(js)
795
+ logger.info(f"Successfully parsed JSON, got {len(parsed)} items")
796
+ except json.JSONDecodeError as e:
797
+ logger.error(f"Failed to parse JSON: {e}")
798
+ logger.error(f"JSON string that failed to parse: {js[:1000]}")
799
+ # Try to find any JSON-like structure
800
+ print('text reply:',text_reply)
801
+ try:
802
+ # Try to extract any JSON array
803
+ import re
804
+ json_pattern = r'\[\s*\{.*?\}\s*\]'
805
+ matches = re.findall(json_pattern, text_reply, re.DOTALL)
806
+ if matches:
807
+ logger.info(f"Found {len(matches)} potential JSON arrays via regex")
808
+ for i, match in enumerate(matches):
809
+ try:
810
+ parsed = json.loads(match)
811
+ logger.info(f"Successfully parsed regex match {i} with {len(parsed)} items")
812
+ break
813
+ except json.JSONDecodeError as e2:
814
+ logger.debug(f"Regex match {i} also failed: {e2}")
815
+ continue
816
+ else:
817
+ logger.error("All regex matches failed to parse")
818
+ return []
819
+ else:
820
+ logger.error("No JSON-like pattern found via regex")
821
+ return []
822
+ except Exception as e2:
823
+ logger.error(f"Regex extraction also failed: {e2}")
824
+ return []
825
+
826
+ # Log parsed results
827
+ logger.info(f"Parsed {len(parsed)} header items:")
828
+ for i, obj in enumerate(parsed[:10]): # Log first 10 items
829
+ logger.info(f" Item {i}: {obj}")
830
+ out = []
831
+
832
+ for i, obj in enumerate(parsed):
833
+ t = obj.get('text')
834
+ level = obj.get('suggested_level')
835
+ conf = float(obj.get('confidence') or 0)
836
+
837
+ # Assign page number directly from the same index
838
+ page = pages_for_prompt[i] if i < len(pages_for_prompt) else None
839
+
840
+ out.append({
841
+ 'text': t,
842
+ 'page': page,
843
+ 'suggested_level': int(level),
844
+ 'confidence': conf
845
+ })
846
+ logger.info(f"Returning {len(out)} valid header entries")
847
+
848
+ return out
849
+
850
+ def identify_headers_with_openrouterNEWW(doc,model,LLM_prompt, pages_to_check, top_margin=0, bottom_margin=0):
851
+ """Ask an LLM (OpenRouter) to identify headers in the document.
852
+ Returns a list of dicts: {text, page, suggested_level, confidence}.
853
+ The function sends plain page-line strings to the LLM (including page numbers)
854
+ and asks for a JSON array containing only header lines with suggested levels.
855
+ """
856
+ logger.info("=" * 80)
857
+ logger.info("STARTING IDENTIFY_HEADERS_WITH_OPENROUTER")
858
+ # logger.info(f"PDF Path: {pdf_path}")
859
+ logger.info(f"Model: {model}")
860
+ # logger.info(f"LLM Prompt: {LLM_prompt[:200]}..." if len(LLM_prompt) > 200 else f"LLM Prompt: {LLM_prompt}")
861
+
862
+ # doc = openPDF(pdf_path)
863
+ api_key = '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'
864
+ if api_key is None:
865
+ api_key = os.getenv("OPENROUTER_API_KEY") or None
866
+
867
+ model = str(model)
868
+ # toc_pages = get_toc_page_numbers(doc)
869
+ lines_for_prompt = []
870
+ # pgestoRun=20
871
+ # logger.info(f"TOC pages to skip: {toc_pages}")
872
+ logger.info(f"Total pages in document: {len(doc)}")
873
+
874
+ # Collect text lines from pages (skip TOC pages)
875
+ total_lines = 0
876
+
877
+ ArrayofTextWithFormat = []
878
+ total_pages = len(doc)
879
+
880
+ if pages_to_check is None:
881
+ start_page = 0
882
+ end_page = min(15, total_pages)
883
+ else:
884
+ start_page, end_page = pages_to_check
885
+ end_page = min(end_page, total_pages) # 🔑 CRITICAL LINE
886
+
887
+ for pno in range(start_page, end_page):
888
+ page = doc.load_page(pno)
889
+ # for pno in range(start,end):
890
+ # page = doc.load_page(pno)
891
+ page_height = page.rect.height
892
+ lines_on_page = 0
893
+ text_dict = page.get_text("dict")
894
+ lines = []
895
+ y_tolerance = 0.5 # tweak if needed (1–3 usually works)
896
+
897
+ for block in text_dict["blocks"]:
898
+ if block["type"] != 0:
899
+ continue
900
+ for line in block["lines"]:
901
+ for span in line["spans"]:
902
+ text = span["text"].strip()
903
+ if not text: # Skip empty text
904
+ continue
905
+
906
+ # Extract all formatting attributes
907
+ font = span.get('font')
908
+ size = span.get('size')
909
+ color = span.get('color')
910
+ flags = span.get('flags', 0)
911
+ bbox = span.get("bbox", (0, 0, 0, 0))
912
+ x0, y0, x1, y1 = bbox
913
+
914
+ # Create text format dictionary
915
+ text_format = {
916
+ 'Font': font,
917
+ 'Size': size,
918
+ 'Flags': flags,
919
+ 'Color': color,
920
+ 'Text': text,
921
+ 'BBox': bbox,
922
+ 'Page': pno + 1
923
+ }
924
+
925
+ # Add to ArrayofTextWithFormat
926
+ ArrayofTextWithFormat.append(text_format)
927
+
928
+ # For line grouping (keeping your existing logic)
929
+ matched = False
930
+ for l in lines:
931
+ if abs(l["y"] - y0) <= y_tolerance:
932
+ l["spans"].append((x0, text, font, size, color, flags))
933
+ matched = True
934
+ break
935
+ if not matched:
936
+ lines.append({
937
+ "y": y0,
938
+ "spans": [(x0, text, font, size, color, flags)]
939
+ })
940
+
941
+ lines.sort(key=lambda l: l["y"])
942
+
943
+ # Join text inside each line with formatting info
944
+ final_lines = []
945
+ for l in lines:
946
+ l["spans"].sort(key=lambda s: s[0]) # left → right
947
+
948
+ # Collect all text and formatting for this line
949
+ line_text = " ".join(text for _, text, _, _, _, _ in l["spans"])
950
+
951
+ # Get dominant formatting for the line (based on first span)
952
+ if l["spans"]:
953
+ _, _, font, size, color, flags = l["spans"][0]
954
+
955
+ # Store line with its formatting
956
+ line_with_format = {
957
+ 'text': line_text,
958
+ 'font': font,
959
+ 'size': size,
960
+ 'color': color,
961
+ 'flags': flags,
962
+ 'page': pno + 1,
963
+ 'y_position': l["y"]
964
+ }
965
+ final_lines.append(line_with_format)
966
+
967
+ # Result
968
+ for line_data in final_lines:
969
+ line_text = line_data['text']
970
+ print(line_text)
971
+
972
+ if line_text:
973
+ # Create a formatted string with text properties
974
+ # format_info = f"Font: {line_data['font']}, Size: {line_data['size']}, Color: {line_data['color']}"
975
+ lines_for_prompt.append(f"PAGE {pno+1}: {line_text}")
976
+ lines_on_page += 1
977
+
978
+ if lines_on_page > 0:
979
+ logger.debug(f"Page {pno}: collected {lines_on_page} lines")
980
+ total_lines += lines_on_page
981
+
982
+ logger.info(f"Total lines collected for LLM: {total_lines}")
983
+
984
+ # Now ArrayofTextWithFormat contains dictionaries for each text span with full formatting
985
+ print(f"\nTotal text spans with formatting: {len(ArrayofTextWithFormat)}")
986
+ print("\nSample of formatted text entries:")
987
+ for i, entry in enumerate(ArrayofTextWithFormat[:3]): # Show first 3 entries
988
+ print(f"Entry {i+1}: {entry}")
989
+ if not lines_for_prompt:
990
+ logger.warning("No lines collected for prompt")
991
+ return []
992
+
993
+ # Log sample of lines
994
+ logger.info("Sample lines (first 10):")
995
+ for i, line in enumerate(lines_for_prompt[:10]):
996
+ logger.info(f" {i}: {line}")
997
+
998
+ prompt = LLM_prompt+ "\n\nLines:\n" + "\n".join(lines_for_prompt)
999
+
1000
+ logger.debug(f"Full prompt length: {len(prompt)} characters")
1001
+ # Changed: Print entire prompt, not truncated
1002
+ print("=" * 80)
1003
+ print("FULL LLM PROMPT:")
1004
+ print(prompt)
1005
+ print("=" * 80)
1006
+
1007
+ # Also log to file
1008
+ try:
1009
+ with open("full_prompt.txt", "w", encoding="utf-8") as f:
1010
+ f.write(prompt)
1011
+ logger.info("Full prompt saved to full_prompt.txt")
1012
+ except Exception as e:
1013
+ logger.error(f"Could not save prompt to file: {e}")
1014
+
1015
+ if not api_key:
1016
+ # No API key: return empty so caller can fallback to heuristics
1017
+ logger.error("No API key provided")
1018
+ return []
1019
+
1020
+ url = "https://openrouter.ai/api/v1/chat/completions"
1021
+
1022
+ # Unix timestamp (seconds since epoch)
1023
+ unix_timestamp = int(time.time())
1024
+
1025
+ # Current datetime in ISO format (UTC)
1026
+ current_time = datetime.now(timezone.utc).isoformat()
1027
+ # Build headers following the OpenRouter example
1028
+ headers = {
1029
+ "Authorization": f"Bearer {api_key}",
1030
+ "Content-Type": "application/json",
1031
+ "HTTP-Referer": os.getenv("OPENROUTER_REFERER", ""),
1032
+ "X-Title": os.getenv("OPENROUTER_X_TITLE", ""),
1033
+ # "X-Request-Timestamp": str(unix_timestamp),
1034
+ # "X-Request-Datetime": current_time,
1035
+ }
1036
+
1037
+
1038
+ # Log request details (without exposing full API key)
1039
+ logger.info(f"Making request to OpenRouter with model: {model}")
1040
+ logger.debug(f"Headers (API key masked): { {k: '***' if k == 'Authorization' else v for k, v in headers.items()} }")
1041
+
1042
+ # Wrap the prompt as the example 'content' array expected by OpenRouter
1043
+ body = {
1044
+ "model": model,
1045
+ "messages": [
1046
+ {
1047
+ "role": "user",
1048
+ "content": [
1049
+ {"type": "text", "text": prompt}
1050
+ ]
1051
+ }
1052
+ ]
1053
+ }
1054
+ # print(f"Request sent at: {current_time}")
1055
+
1056
+ # print(f"Unix timestamp: {unix_timestamp}")
1057
+ # Debug: log request body (truncated) and write raw response for inspection
1058
+ try:
1059
+ # Changed: Log full body (excluding prompt text which is already logged)
1060
+ logger.debug(f"Request body (without prompt text): { {k: v if k != 'messages' else '[...prompt...]' for k, v in body.items()} }")
1061
+
1062
+ # Removed timeout parameter
1063
+ resp = requests.post(
1064
+ url=url,
1065
+ headers=headers,
1066
+ data=json.dumps(body)
1067
+ )
1068
+
1069
+ logger.info(f"HTTP Response Status: {resp.status_code}")
1070
+ resp.raise_for_status()
1071
+
1072
+ resp_text = resp.text
1073
+ # Changed: Print entire response
1074
+ print("=" * 80)
1075
+ print("FULL LLM RESPONSE:")
1076
+ print(resp_text)
1077
+ print("=" * 80)
1078
+
1079
+ logger.info(f"LLM raw response length: {len(resp_text)}")
1080
+
1081
+ # Save raw response for offline inspection
1082
+ try:
1083
+ with open("llm_debug.json", "w", encoding="utf-8") as fh:
1084
+ fh.write(resp_text)
1085
+ logger.info("Raw response saved to llm_debug.json")
1086
+ except Exception as e:
1087
+ logger.error(f"Warning: could not write llm_debug.json: {e}")
1088
+
1089
+ rj = resp.json()
1090
+ logger.info(f"LLM parsed response type: {type(rj)}")
1091
+ if isinstance(rj, dict):
1092
+ logger.debug(f"Response keys: {list(rj.keys())}")
1093
+
1094
+ except requests.exceptions.RequestException as e:
1095
+ logger.error(f"HTTP request failed: {repr(e)}")
1096
+ return []
1097
+ except Exception as e:
1098
+ logger.error(f"LLM call failed: {repr(e)}")
1099
+ return []
1100
+
1101
+ # Extract textual reply robustly
1102
+ text_reply = None
1103
+ if isinstance(rj, dict):
1104
+ choices = rj.get('choices') or []
1105
+ logger.debug(f"Number of choices in response: {len(choices)}")
1106
+
1107
+ if choices:
1108
+ for i, c in enumerate(choices):
1109
+ logger.debug(f"Choice {i}: {c}")
1110
+
1111
+ c0 = choices[0]
1112
+ msg = c0.get('message') or c0.get('delta') or {}
1113
+ content = msg.get('content')
1114
+
1115
+ if isinstance(content, list):
1116
+ logger.debug(f"Content is a list with {len(content)} items")
1117
+ for idx, c in enumerate(content):
1118
+ if c.get('type') == 'text' and c.get('text'):
1119
+ text_reply = c.get('text')
1120
+ logger.debug(f"Found text reply in content[{idx}], length: {len(text_reply)}")
1121
+ break
1122
+ elif isinstance(content, str):
1123
+ text_reply = content
1124
+ logger.debug(f"Content is string, length: {len(text_reply)}")
1125
+ elif isinstance(msg, dict) and msg.get('content') and isinstance(msg.get('content'), dict):
1126
+ text_reply = msg.get('content').get('text')
1127
+ logger.debug(f"Found text in nested content dict")
1128
+
1129
+ # Fallback extraction
1130
+ if not text_reply:
1131
+ logger.debug("Trying fallback extraction from choices")
1132
+ for c in rj.get('choices', []):
1133
+ if isinstance(c.get('text'), str):
1134
+ text_reply = c.get('text')
1135
+ logger.debug(f"Found text reply in choice.text, length: {len(text_reply)}")
1136
+ break
1137
+
1138
+ if not text_reply:
1139
+ logger.error("Could not extract text reply from response")
1140
+ # Changed: Print the entire response structure for debugging
1141
+ print("=" * 80)
1142
+ print("FAILED TO EXTRACT TEXT REPLY. FULL RESPONSE STRUCTURE:")
1143
+ print(json.dumps(rj, indent=2))
1144
+ print("=" * 80)
1145
+ return []
1146
+
1147
+ # Changed: Print the extracted text reply
1148
+ print("=" * 80)
1149
+ print("EXTRACTED TEXT REPLY:")
1150
+ print(text_reply)
1151
+ print("=" * 80)
1152
+
1153
+ logger.info(f"Extracted text reply length: {len(text_reply)}")
1154
+ logger.debug(f"First 500 chars of reply: {text_reply[:500]}...")
1155
+
1156
+ s = text_reply.strip()
1157
+ start = s.find('[')
1158
+ end = s.rfind(']')
1159
+ js = s[start:end+1] if start != -1 and end != -1 else s
1160
+
1161
+ logger.debug(f"Looking for JSON array: start={start}, end={end}")
1162
+ logger.debug(f"Extracted JSON string (first 500 chars): {js[:500]}...")
1163
+
1164
+ try:
1165
+ parsed = json.loads(js)
1166
+ logger.info(f"Successfully parsed JSON, got {len(parsed)} items")
1167
+ except json.JSONDecodeError as e:
1168
+ logger.error(f"Failed to parse JSON: {e}")
1169
+ logger.error(f"JSON string that failed to parse: {js[:1000]}")
1170
+ # Try to find any JSON-like structure
1171
+ try:
1172
+ # Try to extract any JSON array
1173
+ import re
1174
+ json_pattern = r'\[\s*\{.*?\}\s*\]'
1175
+ matches = re.findall(json_pattern, text_reply, re.DOTALL)
1176
+ if matches:
1177
+ logger.info(f"Found {len(matches)} potential JSON arrays via regex")
1178
+ for i, match in enumerate(matches):
1179
+ try:
1180
+ parsed = json.loads(match)
1181
+ logger.info(f"Successfully parsed regex match {i} with {len(parsed)} items")
1182
+ break
1183
+ except json.JSONDecodeError as e2:
1184
+ logger.debug(f"Regex match {i} also failed: {e2}")
1185
+ continue
1186
+ else:
1187
+ logger.error("All regex matches failed to parse")
1188
+ return []
1189
+ else:
1190
+ logger.error("No JSON-like pattern found via regex")
1191
+ return []
1192
+ except Exception as e2:
1193
+ logger.error(f"Regex extraction also failed: {e2}")
1194
+ return []
1195
+
1196
+ # Log parsed results
1197
+ logger.info(f"Parsed {len(parsed)} header items:")
1198
+ for i, obj in enumerate(parsed[:10]): # Log first 10 items
1199
+ logger.info(f" Item {i}: {obj}")
1200
+
1201
+ # Normalize parsed entries and return
1202
+ out = []
1203
+ for obj in parsed:
1204
+ t = obj.get('text')
1205
+ page = int(obj.get('page')) if obj.get('page') else None
1206
+ level = obj.get('suggested_level')
1207
+ conf = float(obj.get('confidence') or 0)
1208
+ size=obj.get('size')
1209
+ if t and page is not None:
1210
+ out.append({'text': t, 'page': page-1, 'suggested_level': level, 'confidence': conf,'size':size})
1211
+
1212
+ logger.info(f"Returning {len(out)} valid header entries")
1213
+
1214
+ return out
1215
+
1216
+ def mapPages_header_hierarchy(headers,hierarchy):
1217
+
1218
+ mapped_hierarchy = []
1219
+ header_idx = 0 # pointer in headers
1220
+
1221
+ for h_item in hierarchy:
1222
+ h_text = h_item.get("text")
1223
+ h_level = h_item.get("suggested_level")
1224
+ h_conf = float(h_item.get("confidence", 0))
1225
+
1226
+ page = None
1227
+ combined_text = ""
1228
+ start_idx = header_idx
1229
+
1230
+ # Try to match hierarchy text by concatenating headers
1231
+ while header_idx < len(headers) and len(combined_text) < len(h_text):
1232
+ header = headers[header_idx]
1233
+ header_text = header.get("text") if isinstance(header, dict) else str(header)
1234
+ header_page = header.get("page") if isinstance(header, dict) else None
1235
+
1236
+ if combined_text:
1237
+ combined_text += " " # add space between concatenated headers
1238
+ combined_text += header_text
1239
+
1240
+ if page is None:
1241
+ page = header_page # take page of first matching header
1242
+
1243
+ header_idx += 1
1244
+
1245
+ # Optional: check if merged headers partially match hierarchy
1246
+ if h_text not in combined_text:
1247
+ # fallback: use last header page or None
1248
+ if start_idx < len(headers):
1249
+ page = headers[start_idx].get("page") if isinstance(headers[start_idx], dict) else None
1250
+
1251
+ mapped_hierarchy.append({
1252
+ "text": h_text,
1253
+ "page": page,
1254
+ "suggested_level": int(h_level),
1255
+ "confidence": h_conf
1256
+ })
1257
+ return mapped_hierarchy
1258
+
1259
+
1260
+
1261
+ def is_numbered(text):
1262
+ return bool(re.match(r'^\d', text.strip()))
1263
+
1264
+ def is_similar(a, b, threshold=0.85):
1265
+ return difflib.SequenceMatcher(None, a, b).ratio() > threshold
1266
+
1267
+ def normalize(text):
1268
+ text = text.lower()
1269
+ text = re.sub(r'\.{2,}', '', text) # remove long dots
1270
+ text = re.sub(r'\s+', ' ', text) # replace multiple spaces with one
1271
+ return text.strip()
1272
+
1273
+ def clean_toc_entry(toc_text):
1274
+ """Remove page numbers and formatting from TOC entries"""
1275
+ # Remove everything after last sequence of dots/whitespace followed by digits
1276
+ return re.sub(r'[\.\s]+\d+.*$', '', toc_text).strip('. ')
1277
+
1278
+ def enforce_level_hierarchy(headers):
1279
+ """
1280
+ Ensure level 2 headers only exist under level 1 headers
1281
+ and clean up any orphaned headers
1282
+ """
1283
+ def process_node_list(node_list, parent_level=-1):
1284
+ i = 0
1285
+ while i < len(node_list):
1286
+ node = node_list[i]
1287
+
1288
+ # Remove level 2 headers that don't have a level 1 parent
1289
+ if node['level'] == 2 and parent_level != 1:
1290
+ node_list.pop(i)
1291
+ continue
1292
+
1293
+ # Recursively process children
1294
+ process_node_list(node['children'], node['level'])
1295
+ i += 1
1296
+
1297
+ process_node_list(headers)
1298
+ return headers
1299
+
1300
+
1301
+
1302
+ def highlight_boxes(doc, highlights, stringtowrite, fixed_width=500): # Set your desired width here
1303
+ for page_num, bbox in highlights.items():
1304
+ page = doc.load_page(page_num)
1305
+ page_width = page.rect.width
1306
+
1307
+ # Get original rect for vertical coordinates
1308
+ orig_rect = fitz.Rect(bbox)
1309
+ rect_height = orig_rect.height
1310
+ if rect_height > 30:
1311
+ if orig_rect.width > 10:
1312
+ # Center horizontally using fixed width
1313
+ center_x = page_width / 2
1314
+ new_x0 = center_x - fixed_width / 2
1315
+ new_x1 = center_x + fixed_width / 2
1316
+ new_rect = fitz.Rect(new_x0, orig_rect.y0, new_x1, orig_rect.y1)
1317
+
1318
+ # Add highlight rectangle
1319
+ annot = page.add_rect_annot(new_rect)
1320
+ if stringtowrite.startswith('Not'):
1321
+ annot.set_colors(stroke=(0.5, 0.5, 0.5), fill=(0.5, 0.5, 0.5))
1322
+ else:
1323
+ annot.set_colors(stroke=(1, 1, 0), fill=(1, 1, 0))
1324
+
1325
+ annot.set_opacity(0.3)
1326
+ annot.update()
1327
+
1328
+ # Add right-aligned freetext annotation inside the fixed-width box
1329
+ text = '['+stringtowrite +']'
1330
+ annot1 = page.add_freetext_annot(
1331
+ new_rect,
1332
+ text,
1333
+ fontsize=15,
1334
+ fontname='helv',
1335
+ text_color=(1, 0, 0),
1336
+ rotate=page.rotation,
1337
+ align=2 # right alignment
1338
+ )
1339
+ annot1.update()
1340
+
1341
+ def get_leaf_headers_with_paths(listtoloop, path=None, output=None):
1342
+ if path is None:
1343
+ path = []
1344
+ if output is None:
1345
+ output = []
1346
+ for header in listtoloop:
1347
+ current_path = path + [header['text']]
1348
+ if not header['children']:
1349
+ if header['level'] != 0 and header['level'] != 1:
1350
+ output.append((header, current_path))
1351
+ else:
1352
+ get_leaf_headers_with_paths(header['children'], current_path, output)
1353
+ return output
1354
+
1355
+ # Add this helper function at the top of your code
1356
+ def words_match_ratio(text1, text2):
1357
+ words1 = set(text1.split())
1358
+ words2 = set(text2.split())
1359
+ if not words1 or not words2:
1360
+ return 0.0
1361
+ common_words = words1 & words2
1362
+ return len(common_words) / len(words1)
1363
+
1364
+ def same_start_word(s1, s2):
1365
+ # Split both strings into words
1366
+ words1 = s1.strip().split()
1367
+ words2 = s2.strip().split()
1368
+
1369
+ # Check if both have at least one word and compare the first ones
1370
+ if words1 and words2:
1371
+ return words1[0].lower() == words2[0].lower()
1372
+ return False
1373
+
1374
+ def testFunction(pdf_path,model,LLM_prompt,LLMpromptHierarchy):
1375
+ Alltexttobebilled=''
1376
+ alltextWithoutNotbilled=''
1377
+ # keywordstoSkip=["installation", "execution", "miscellaneous items", "workmanship", "testing", "labeling"]
1378
+
1379
+ headertoContinue1 = False
1380
+ headertoContinue2=False
1381
+
1382
+ parsed_url = urlparse(pdf_path)
1383
+ filename = os.path.basename(parsed_url.path)
1384
+ filename = unquote(filename) # decode URL-encoded characters
1385
+
1386
+ # Optimized URL handling
1387
+ if pdf_path and ('http' in pdf_path or 'dropbox' in pdf_path):
1388
+ pdf_path = pdf_path.replace('dl=0', 'dl=1')
1389
+
1390
+ # Cache frequently used values
1391
+ response = requests.get(pdf_path)
1392
+ pdf_content = BytesIO(response.content)
1393
+ if not pdf_content:
1394
+ raise ValueError("No valid PDF content found.")
1395
+
1396
+ doc = fitz.open(stream=pdf_content, filetype="pdf")
1397
+ docHighlights = fitz.open(stream=pdf_content, filetype="pdf")
1398
+ parsed_url = urlparse(pdf_path)
1399
+ filename = os.path.basename(parsed_url.path)
1400
+ filename = unquote(filename) # decode URL-encoded characters
1401
+
1402
+ #### Get regular tex font size, style , color
1403
+ most_common_font_size, most_common_color, most_common_font = get_regular_font_size_and_color(doc)
1404
+
1405
+ # Precompute regex patterns
1406
+ dot_pattern = re.compile(r'\.{3,}')
1407
+ url_pattern = re.compile(r'https?://\S+|www\.\S+')
1408
+ highlighted=[]
1409
+ processed_subjects = set() # Initialize at the top of testFunction
1410
+ toc_pages = get_toc_page_numbers(doc)
1411
+ headers=process_document_in_chunks(doc,model,LLM_prompt)
1412
+
1413
+ # identified_headers = identify_headers_with_openrouterNEWW(doc, api_key='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')# ['text', fontsize, page number,y]
1414
+
1415
+ with open("identified_headers.json", "w", encoding="utf-8") as f:
1416
+ json.dump(headers, f, indent=4)
1417
+ # with open("identified_headers.json", "r", encoding="utf-8") as f:
1418
+ # headers = json.load(f)
1419
+ # print(identified_headers)
1420
+ allheaders_LLM=[]
1421
+ for h in headers:
1422
+ # if int(h["page"]) in toc_pages:
1423
+ # continue
1424
+ if h['text']:
1425
+ allheaders_LLM.append([h['text'],h["page"]])
1426
+ hierarchy=identify_hierarchy_levels_openrouter(allheaders_LLM,model,LLMpromptHierarchy)
1427
+ with open("identified_hierarchy.json", "w", encoding="utf-8") as f:
1428
+ json.dump(hierarchy, f, indent=4)
1429
+ # with open("identified_hierarchy.json", "r", encoding="utf-8") as f:
1430
+ # hierarchy = json.load(f)
1431
+
1432
+ identified_headers=mapPages_header_hierarchy(headers,hierarchy)
1433
+ print('identified_headers',identified_headers)
1434
+ headers_json=headers_with_location(doc,identified_headers)
1435
+ headers=filter_headers_outside_toc(headers_json,toc_pages)
1436
+ hierarchy=build_hierarchy_from_llm(headers)
1437
+ # identify_headers_and_save_excel(hierarchy)
1438
+ listofHeaderstoMarkup = get_leaf_headers_with_paths(hierarchy)
1439
+ allchildrenheaders = [normalize_text(item['text']) for item, p in listofHeaderstoMarkup]
1440
+ allchildrenheaders_set = set(allchildrenheaders) # For faster lookups
1441
+ # print('allchildrenheaders_set',allchildrenheaders_set)
1442
+ df = pd.DataFrame(columns=["NBSLink","Subject","Page","Author","Creation Date","Layer",'Code', 'head above 1', "head above 2",'BodyText'])
1443
+ dictionaryNBS={}
1444
+ data_list_JSON = []
1445
+ for heading_to_searchDict,pathss in listofHeaderstoMarkup:
1446
+ heading_to_search = heading_to_searchDict['text']
1447
+ heading_to_searchPageNum = heading_to_searchDict['page']
1448
+ paths=heading_to_searchDict['path']
1449
+ # xloc=heading_to_searchDict['x']
1450
+ yloc=heading_to_searchDict['y']
1451
+ # Initialize variables
1452
+ headertoContinue1 = False
1453
+ headertoContinue2 = False
1454
+ matched_header_line = None
1455
+ done = False
1456
+ collecting = False
1457
+ collected_lines = []
1458
+ page_highlights = {}
1459
+ current_bbox = {}
1460
+ last_y1s = {}
1461
+ mainHeader = ''
1462
+ subHeader = ''
1463
+ matched_header_line_norm = heading_to_search
1464
+ break_collecting = False
1465
+ heading_norm = normalize_text(heading_to_search)
1466
+ paths_norm = [normalize_text(p) for p in paths[0]] if paths and paths[0] else []
1467
+
1468
+ for page_num in range(heading_to_searchPageNum,len(doc)):
1469
+ if page_num in toc_pages:
1470
+ continue
1471
+ if break_collecting:
1472
+ break
1473
+ page=doc[page_num]
1474
+ page_height = page.rect.height
1475
+ blocks = page.get_text("dict")["blocks"]
1476
+
1477
+ for block in blocks:
1478
+ if break_collecting:
1479
+ break
1480
+
1481
+ lines = block.get("lines", [])
1482
+ i = 0
1483
+ while i < len(lines):
1484
+ if break_collecting:
1485
+ break
1486
+
1487
+ spans = lines[i].get("spans", [])
1488
+ if not spans:
1489
+ i += 1
1490
+ continue
1491
+
1492
+ # y0 = spans[0]["bbox"][1]
1493
+ # y1 = spans[0]["bbox"][3]
1494
+ x0 = spans[0]["bbox"][0] # left
1495
+ x1 = spans[0]["bbox"][2] # right
1496
+ y0 = spans[0]["bbox"][1] # top
1497
+ y1 = spans[0]["bbox"][3] # bottom
1498
+
1499
+ if y0 < top_margin or y1 > (page_height - bottom_margin):
1500
+ i += 1
1501
+ continue
1502
+
1503
+ line_text = get_spaced_text_from_spans(spans).lower()
1504
+ line_text_norm = normalize_text(line_text)
1505
+
1506
+ # Combine with next line if available
1507
+ if i + 1 < len(lines):
1508
+ next_spans = lines[i + 1].get("spans", [])
1509
+ next_line_text = get_spaced_text_from_spans(next_spans).lower()
1510
+ combined_line_norm = normalize_text(line_text + " " + next_line_text)
1511
+ else:
1512
+ combined_line_norm = line_text_norm
1513
+
1514
+ # Check if we should continue processing
1515
+ if combined_line_norm and combined_line_norm in paths[0]:
1516
+
1517
+ headertoContinue1 = combined_line_norm
1518
+ if combined_line_norm and combined_line_norm in paths[-2]:
1519
+
1520
+ headertoContinue2 = combined_line_norm
1521
+ # print('paths',paths)
1522
+
1523
+ # if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
1524
+ # if any(word in paths[-2].lower() for word in keywordstoSkip):
1525
+ # stringtowrite='Not to be billed'
1526
+ # else:
1527
+ stringtowrite='To be billed'
1528
+ if stringtowrite!='To be billed':
1529
+ alltextWithoutNotbilled+= combined_line_norm #################################################
1530
+ # Optimized header matching
1531
+ existsfull = (
1532
+ ( combined_line_norm in allchildrenheaders_set or
1533
+ combined_line_norm in allchildrenheaders ) and heading_to_search in combined_line_norm
1534
+ )
1535
+ # existsfull=False
1536
+ # if xloc==x0 and yloc ==y0:
1537
+ # existsfull=True
1538
+ # New word-based matching
1539
+ current_line_words = set(combined_line_norm.split())
1540
+ heading_words = set(heading_norm.split())
1541
+ all_words_match = current_line_words.issubset(heading_words) and len(current_line_words) > 0
1542
+
1543
+ substring_match = (
1544
+ heading_norm in combined_line_norm or
1545
+ combined_line_norm in heading_norm or
1546
+ all_words_match # Include the new word-based matching
1547
+ )
1548
+ # substring_match = (
1549
+ # heading_norm in combined_line_norm or
1550
+ # combined_line_norm in heading_norm
1551
+ # )
1552
+
1553
+ if ( substring_match and existsfull and not collecting and
1554
+ len(combined_line_norm) > 0 ):#and (headertoContinue1 or headertoContinue2) ):
1555
+
1556
+ # Check header conditions more efficiently
1557
+ # header_spans = [
1558
+ # span for span in spans
1559
+ # if (is_header(span, most_common_font_size, most_common_color, most_common_font) )
1560
+ # # and span['size'] >= subsubheaderFontSize
1561
+ # # and span['size'] < mainHeaderFontSize)
1562
+ # ]
1563
+ if stringtowrite.startswith('To'):
1564
+ collecting = True
1565
+ # matched_header_font_size = max(span["size"] for span in header_spans)
1566
+ Alltexttobebilled+= ' '+ combined_line_norm
1567
+
1568
+ # collected_lines.append(line_text)
1569
+ valid_spans = [span for span in spans if span.get("bbox")]
1570
+
1571
+ if valid_spans:
1572
+ x0s = [span["bbox"][0] for span in valid_spans]
1573
+ x1s = [span["bbox"][2] for span in valid_spans]
1574
+ y0s = [span["bbox"][1] for span in valid_spans]
1575
+ y1s = [span["bbox"][3] for span in valid_spans]
1576
+
1577
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
1578
+
1579
+ if page_num in current_bbox:
1580
+ cb = current_bbox[page_num]
1581
+ current_bbox[page_num] = [
1582
+ min(cb[0], header_bbox[0]),
1583
+ min(cb[1], header_bbox[1]),
1584
+ max(cb[2], header_bbox[2]),
1585
+ max(cb[3], header_bbox[3])
1586
+ ]
1587
+ else:
1588
+ current_bbox[page_num] = header_bbox
1589
+ last_y1s[page_num] = header_bbox[3]
1590
+ x0, y0, x1, y1 = header_bbox
1591
+
1592
+ zoom = 200
1593
+ left = int(x0)
1594
+ top = int(y0)
1595
+ zoom_str = f"{zoom},{left},{top}"
1596
+ pageNumberFound = page_num + 1
1597
+
1598
+ # Build the query parameters
1599
+ params = {
1600
+ 'pdfLink': pdf_path, # Your PDF link
1601
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
1602
+ }
1603
+
1604
+ # URL encode each parameter
1605
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
1606
+
1607
+ # Construct the final encoded link
1608
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
1609
+
1610
+ # Correctly construct the final URL with page and zoom
1611
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
1612
+
1613
+ # Get current date and time
1614
+ now = datetime.now()
1615
+
1616
+ # Format the output
1617
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
1618
+ # Optionally, add the URL to a DataFrame
1619
+
1620
+
1621
+ # Create the data entry only if the subject is unique
1622
+ if heading_to_search not in processed_subjects:
1623
+ data_entry = {
1624
+ "NBSLink": zoom_str,
1625
+ "Subject": heading_to_search,
1626
+ "Page": str(pageNumberFound),
1627
+ "Author": "ADR",
1628
+ "Creation Date": formatted_time,
1629
+ "Layer": "Initial",
1630
+ "Code": stringtowrite,
1631
+ "BodyText": collected_lines,
1632
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] + '/' + heading_to_search.strip().split()[0] + ' in ' + filename
1633
+ }
1634
+
1635
+ # Dynamically add hierarchy paths
1636
+ for i, path_text in enumerate(paths[:-1]):
1637
+ data_entry[f"head above {i+1}"] = path_text
1638
+
1639
+ # Append to the list and mark this subject as processed
1640
+ data_list_JSON.append(data_entry)
1641
+ processed_subjects.add(heading_to_search)
1642
+ else:
1643
+ print(f"Skipping duplicate data entry for Subject: {heading_to_search}")
1644
+
1645
+ # Convert list to JSON
1646
+ json_output = json.dumps(data_list_JSON, indent=4)
1647
+
1648
+ i += 1
1649
+ continue
1650
+ else:
1651
+ if (substring_match and not collecting and
1652
+ len(combined_line_norm) > 0): # and (headertoContinue1 or headertoContinue2) ):
1653
+
1654
+ # Calculate word match percentage
1655
+ word_match_percent = words_match_ratio(heading_norm, combined_line_norm) * 100
1656
+
1657
+ # Check if at least 70% of header words exist in this line
1658
+ meets_word_threshold = word_match_percent >= 100
1659
+
1660
+ # Check header conditions (including word threshold)
1661
+ # header_spans = [
1662
+ # span for span in spans
1663
+ # if (is_header(span, most_common_font_size, most_common_color, most_common_font))
1664
+ # # and span['size'] >= subsubheaderFontSize
1665
+ # # and span['size'] < mainHeaderFontSize)
1666
+ # ]
1667
+
1668
+ if (meets_word_threshold or same_start_word(heading_to_search, combined_line_norm) ) and stringtowrite.startswith('To'):
1669
+ collecting = True
1670
+ # matched_header_font_size = max(span["size"] for span in header_spans)
1671
+ Alltexttobebilled+= ' '+ combined_line_norm
1672
+
1673
+ collected_lines.append(line_text)
1674
+ valid_spans = [span for span in spans if span.get("bbox")]
1675
+
1676
+ if valid_spans:
1677
+ x0s = [span["bbox"][0] for span in valid_spans]
1678
+ x1s = [span["bbox"][2] for span in valid_spans]
1679
+ y0s = [span["bbox"][1] for span in valid_spans]
1680
+ y1s = [span["bbox"][3] for span in valid_spans]
1681
+
1682
+ header_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
1683
+
1684
+ if page_num in current_bbox:
1685
+ cb = current_bbox[page_num]
1686
+ current_bbox[page_num] = [
1687
+ min(cb[0], header_bbox[0]),
1688
+ min(cb[1], header_bbox[1]),
1689
+ max(cb[2], header_bbox[2]),
1690
+ max(cb[3], header_bbox[3])
1691
+ ]
1692
+ else:
1693
+ current_bbox[page_num] = header_bbox
1694
+
1695
+ last_y1s[page_num] = header_bbox[3]
1696
+ x0, y0, x1, y1 = header_bbox
1697
+ zoom = 200
1698
+ left = int(x0)
1699
+ top = int(y0)
1700
+ zoom_str = f"{zoom},{left},{top}"
1701
+ pageNumberFound = page_num + 1
1702
+
1703
+ # Build the query parameters
1704
+ params = {
1705
+ 'pdfLink': pdf_path, # Your PDF link
1706
+ 'keyword': heading_to_search, # Your keyword (could be a string or list)
1707
+ }
1708
+
1709
+ # URL encode each parameter
1710
+ encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
1711
+
1712
+ # Construct the final encoded link
1713
+ encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
1714
+
1715
+ # Correctly construct the final URL with page and zoom
1716
+ final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
1717
+
1718
+ # Get current date and time
1719
+ now = datetime.now()
1720
+
1721
+ # Format the output
1722
+ formatted_time = now.strftime("%d/%m/%Y %I:%M:%S %p")
1723
+ # Optionally, add the URL to a DataFrame
1724
+
1725
+
1726
+ # Create the data entry only if the subject is unique
1727
+ if heading_to_search not in processed_subjects:
1728
+ data_entry = {
1729
+ "NBSLink": zoom_str,
1730
+ "Subject": heading_to_search,
1731
+ "Page": str(pageNumberFound),
1732
+ "Author": "ADR",
1733
+ "Creation Date": formatted_time,
1734
+ "Layer": "Initial",
1735
+ "Code": stringtowrite,
1736
+ "BodyText": collected_lines,
1737
+ "MC Connnection": 'Go to ' + paths[0].strip().split()[0] + '/' + heading_to_search.strip().split()[0] + ' in ' + filename
1738
+ }
1739
+
1740
+ # Dynamically add hierarchy paths
1741
+ for i, path_text in enumerate(paths[:-1]):
1742
+ data_entry[f"head above {i+1}"] = path_text
1743
+
1744
+ # Append to the list and mark this subject as processed
1745
+ data_list_JSON.append(data_entry)
1746
+ processed_subjects.add(heading_to_search)
1747
+ else:
1748
+ print(f"Skipping duplicate data entry for Subject: {heading_to_search}")
1749
+ # Convert list to JSON
1750
+ json_output = json.dumps(data_list_JSON, indent=4)
1751
+
1752
+
1753
+ i += 2
1754
+ continue
1755
+ if collecting:
1756
+ norm_line = normalize_text(line_text)
1757
+ def normalize(text):
1758
+ if isinstance(text, list):
1759
+ text = " ".join(text)
1760
+ return " ".join(text.lower().split())
1761
+
1762
+ def is_similar(a, b, threshold=0.75):
1763
+ return SequenceMatcher(None, a, b).ratio() >= threshold
1764
+ # Optimized URL check
1765
+ if url_pattern.match(norm_line):
1766
+ line_is_header = False
1767
+ else:
1768
+ line_is_header = any(is_header(span, most_common_font_size, most_common_color, most_common_font,allheaders_LLM) for span in spans)
1769
+
1770
+ if line_is_header:
1771
+ header_font_size = max(span["size"] for span in spans)
1772
+ is_probably_real_header = (
1773
+ # header_font_size >= matched_header_font_size and
1774
+ # is_header(spans[0], most_common_font_size, most_common_color, most_common_font) and
1775
+ len(line_text.strip()) > 2
1776
+ )
1777
+
1778
+ if (norm_line != matched_header_line_norm and
1779
+ norm_line != heading_norm and
1780
+ is_probably_real_header):
1781
+ if line_text not in heading_norm:
1782
+ collecting = False
1783
+ done = True
1784
+ headertoContinue1 = False
1785
+ headertoContinue2=False
1786
+ for page_num, bbox in current_bbox.items():
1787
+ bbox[3] = last_y1s.get(page_num, bbox[3])
1788
+ page_highlights[page_num] = bbox
1789
+ can_highlight=False
1790
+ if [page_num,bbox] not in highlighted:
1791
+ highlighted.append([page_num,bbox])
1792
+ can_highlight=True
1793
+ if can_highlight:
1794
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
1795
+
1796
+ break_collecting = True
1797
+
1798
+ break
1799
+
1800
+ if break_collecting:
1801
+ break
1802
+
1803
+
1804
+ collected_lines.append(line_text)
1805
+
1806
+ valid_spans = [span for span in spans if span.get("bbox")]
1807
+ if valid_spans:
1808
+ x0s = [span["bbox"][0] for span in valid_spans]
1809
+ x1s = [span["bbox"][2] for span in valid_spans]
1810
+ y0s = [span["bbox"][1] for span in valid_spans]
1811
+ y1s = [span["bbox"][3] for span in valid_spans]
1812
+
1813
+ line_bbox = [min(x0s), min(y0s), max(x1s), max(y1s)]
1814
+
1815
+ if page_num in current_bbox:
1816
+ cb = current_bbox[page_num]
1817
+ current_bbox[page_num] = [
1818
+ min(cb[0], line_bbox[0]),
1819
+ min(cb[1], line_bbox[1]),
1820
+ max(cb[2], line_bbox[2]),
1821
+ max(cb[3], line_bbox[3])
1822
+ ]
1823
+ else:
1824
+ current_bbox[page_num] = line_bbox
1825
+
1826
+ last_y1s[page_num] = line_bbox[3]
1827
+ i += 1
1828
+
1829
+ if not done:
1830
+ for page_num, bbox in current_bbox.items():
1831
+ bbox[3] = last_y1s.get(page_num, bbox[3])
1832
+ page_highlights[page_num] = bbox
1833
+ # if 'installation' in paths[-2].lower() or 'execution' in paths[-2].lower() or 'miscellaneous items' in paths[-2].lower() :
1834
+ # stringtowrite='Not to be billed'
1835
+ # else:
1836
+ stringtowrite='To be billed'
1837
+
1838
+ highlight_boxes(docHighlights, page_highlights,stringtowrite)
1839
+
1840
+ print("Current working directory:", os.getcwd())
1841
+
1842
+ docHighlights.save("highlighted_output.pdf")
1843
+ if data_list_JSON and not data_list_JSON[-1]["BodyText"] and collected_lines:
1844
+ data_list_JSON[-1]["BodyText"] = collected_lines[1:] if len(collected_lines) > 0 else []
1845
+ # Final cleanup of the JSON data before returning
1846
+ for entry in data_list_JSON:
1847
+ # Check if BodyText exists and has content
1848
+ if isinstance(entry.get("BodyText"), list) and len(entry["BodyText"]) > 0:
1849
+ # Check if the first line of the body is essentially the same as the Subject
1850
+ first_line = normalize_text(entry["BodyText"][0])
1851
+ subject = normalize_text(entry["Subject"])
1852
+
1853
+ # If they match or the subject is inside the first line, remove it
1854
+ if subject in first_line or first_line in subject:
1855
+ entry["BodyText"] = entry["BodyText"][1:]
1856
+
1857
+ # return json_output
1858
+ json_output = json.dumps(data_list_JSON, indent=4)
1859
+ return json_output, identified_headers
1860
+
1861
+
1862
+ def identify_headers_and_save_excel(pdf_path,model,LLM_prompt,LLMpromptHierarchy):
1863
+ try:
1864
+ jsons, result = testFunction(pdf_path,model,LLM_prompt,LLMpromptHierarchy)
1865
+
1866
+ if not result:
1867
+ df = pd.DataFrame([{
1868
+ "text": None,
1869
+ "page": None,
1870
+ "suggested_level": None,
1871
+ "confidence": None,
1872
+ "body": None,
1873
+ "System Message": "No headers were identified by the LLM."
1874
+ }])
1875
+
1876
+ else:
1877
+ print('here')
1878
+ df = pd.DataFrame(result)
1879
+
1880
+ # Convert JSON string to list if needed
1881
+ if isinstance(jsons, str):
1882
+ jsons = json.loads(jsons)
1883
+
1884
+ subject_body_map = {}
1885
+
1886
+ # ✅ jsons is a flat list of dicts
1887
+ for obj in jsons:
1888
+
1889
+ if not isinstance(obj, dict):
1890
+ continue
1891
+
1892
+ subject = obj.get("Subject")
1893
+ body = obj.get("BodyText", [])
1894
+
1895
+ if subject:
1896
+ subject_body_map[subject.strip()] = " ".join(body)
1897
+
1898
+ # ✅ Map body to dataframe
1899
+ df["body"] = df["text"].map(subject_body_map).fillna("")
1900
+
1901
+ # ✅ Save once at end
1902
+ output_path = os.path.abspath("header_analysis_output.xlsx")
1903
+ df.to_excel(output_path, index=False, engine="openpyxl")
1904
+
1905
+ print("--- Processed DataFrame ---")
1906
+ print(df)
1907
+
1908
+ return output_path
1909
+
1910
+ except Exception as e:
1911
+ logger.error(f"Critical error in processing: {str(e)}")
1912
+ return None
1913
+