Spaces:
Sleeping
Sleeping
Upload Find_Hyperlinking_text.py
Browse files- Find_Hyperlinking_text.py +392 -0
Find_Hyperlinking_text.py
ADDED
|
@@ -0,0 +1,392 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import fitz # PyMuPDF
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
import re
|
| 4 |
+
import requests
|
| 5 |
+
import pandas as pd
|
| 6 |
+
from collections import Counter
|
| 7 |
+
import fitz # PyMuPDF
|
| 8 |
+
import re
|
| 9 |
+
import urllib.parse
|
| 10 |
+
import pandas as pd
|
| 11 |
+
import tempfile
|
| 12 |
+
from fpdf import FPDF
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
baselink='https://marthee-nbslink.hf.space/view-pdf?'
|
| 16 |
+
class PDF(FPDF):
|
| 17 |
+
def header(self):
|
| 18 |
+
self.set_font("Arial", "B", 12)
|
| 19 |
+
self.cell(0, 10, "NBS Document Links", ln=True, align="C")
|
| 20 |
+
self.ln(5) # Space after header
|
| 21 |
+
|
| 22 |
+
def save_df_to_pdf(df):
|
| 23 |
+
pdf = PDF()
|
| 24 |
+
pdf.set_auto_page_break(auto=True, margin=15)
|
| 25 |
+
|
| 26 |
+
# Set equal margins
|
| 27 |
+
margin = 15
|
| 28 |
+
pdf.set_left_margin(margin)
|
| 29 |
+
pdf.set_right_margin(margin)
|
| 30 |
+
|
| 31 |
+
pdf.add_page()
|
| 32 |
+
pdf.set_font("Arial", size=10)
|
| 33 |
+
|
| 34 |
+
# Set column widths and calculate total table width
|
| 35 |
+
col_width = 50
|
| 36 |
+
num_cols = 4
|
| 37 |
+
table_width = col_width * num_cols
|
| 38 |
+
|
| 39 |
+
# Get page width and calculate left alignment
|
| 40 |
+
page_width = pdf.w
|
| 41 |
+
start_x = (page_width - table_width) / 2 # Centering the table
|
| 42 |
+
|
| 43 |
+
pdf.set_x(start_x) # Move to calculated start position
|
| 44 |
+
|
| 45 |
+
# Table headers
|
| 46 |
+
pdf.set_fill_color(200, 200, 200) # Light gray background
|
| 47 |
+
pdf.set_font("Arial", "B", 10)
|
| 48 |
+
headers = ["NBS Link", "NBS", "Head Above 1", "Head Above 2"]
|
| 49 |
+
|
| 50 |
+
# Draw table headers
|
| 51 |
+
for header in headers:
|
| 52 |
+
pdf.cell(col_width, 8, header, border=1, fill=True, align="C")
|
| 53 |
+
pdf.ln()
|
| 54 |
+
|
| 55 |
+
pdf.set_font("Arial", size=9)
|
| 56 |
+
|
| 57 |
+
# Add rows
|
| 58 |
+
for _, row in df.iterrows():
|
| 59 |
+
x_start = start_x # Ensure every row starts at the same position
|
| 60 |
+
y_start = pdf.get_y()
|
| 61 |
+
|
| 62 |
+
# Calculate max height needed for this row
|
| 63 |
+
text_lines = {col: pdf.multi_cell(col_width, 5, row[col], border=0, align="L", split_only=True) for col in ["NBS", "head above 1", "head above 2"]}
|
| 64 |
+
max_lines = max(len(lines) for lines in text_lines.values())
|
| 65 |
+
max_height = max_lines * 5
|
| 66 |
+
|
| 67 |
+
pdf.set_x(x_start) # Ensure correct alignment for each row
|
| 68 |
+
|
| 69 |
+
# Clickable link cell (keeps same height as others)
|
| 70 |
+
pdf.cell(col_width, max_height, "Click Here", border=1, link=row["NBSLink"], align="C")
|
| 71 |
+
|
| 72 |
+
# Move to next column
|
| 73 |
+
pdf.set_xy(x_start + col_width, y_start)
|
| 74 |
+
|
| 75 |
+
# Draw each cell manually, ensuring equal height
|
| 76 |
+
for i, col_name in enumerate(["NBS", "head above 1", "head above 2"]):
|
| 77 |
+
x_col = x_start + col_width * (i + 1)
|
| 78 |
+
y_col = y_start
|
| 79 |
+
pdf.multi_cell(col_width, 5, row[col_name], border=0, align="L") # Draw text
|
| 80 |
+
pdf.rect(x_col, y_col, col_width, max_height) # Draw border
|
| 81 |
+
pdf.set_xy(x_col + col_width, y_start) # Move to next column
|
| 82 |
+
|
| 83 |
+
# Move to the next row
|
| 84 |
+
pdf.ln(max_height)
|
| 85 |
+
# Save PDF to memory instead of a file
|
| 86 |
+
# pdf_output = BytesIO()
|
| 87 |
+
# pdf_output = 'output.pdf'
|
| 88 |
+
pdf_output = pdf.output(dest="S").encode("latin1") # Returns the PDF as a byte string
|
| 89 |
+
|
| 90 |
+
return pdf_output
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def normalize_text(text):
|
| 95 |
+
"""Lowercase, remove extra spaces, and strip special characters."""
|
| 96 |
+
text = text.lower().strip()
|
| 97 |
+
text = re.sub(r'\s+', ' ', text) # Normalize multiple spaces
|
| 98 |
+
return re.sub(r'[^\w\s]', '', text) # Remove punctuation
|
| 99 |
+
def get_repeated_texts(pdf_document, threshold=0.9):
|
| 100 |
+
"""
|
| 101 |
+
Identify text that appears on most pages.
|
| 102 |
+
:param pdf_document: The opened PDF document.
|
| 103 |
+
:param threshold: The percentage of pages a text must appear on to be considered "repeated".
|
| 104 |
+
"""
|
| 105 |
+
text_counts = Counter()
|
| 106 |
+
total_pages = pdf_document.page_count
|
| 107 |
+
|
| 108 |
+
for page_num in range(total_pages):
|
| 109 |
+
page = pdf_document.load_page(page_num)
|
| 110 |
+
page_text = page.get_text("text")
|
| 111 |
+
normalized_lines = {normalize_text(line) for line in page_text.splitlines() if line.strip()}
|
| 112 |
+
|
| 113 |
+
text_counts.update(normalized_lines)
|
| 114 |
+
|
| 115 |
+
# Find texts that appear in at least `threshold * total_pages` pages
|
| 116 |
+
min_occurrence = max(1, int(threshold * total_pages))
|
| 117 |
+
repeated_texts = {text for text, count in text_counts.items() if count >= min_occurrence}
|
| 118 |
+
return repeated_texts
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def split_links(links_string):
|
| 122 |
+
"""Split a comma-separated string of links into an array of trimmed links."""
|
| 123 |
+
return [link.strip() for link in links_string.split(',')]
|
| 124 |
+
def annotate_text_from_pdf(pdfshareablelinks, LISTheading_to_search):
|
| 125 |
+
"""
|
| 126 |
+
Annotates text under a specific heading in a PDF, highlights it,
|
| 127 |
+
and constructs zoom coordinates for the first occurrence of the heading.
|
| 128 |
+
Args:
|
| 129 |
+
pdfshareablelinks (list): List of shareable links to PDFs.
|
| 130 |
+
heading_to_search (str): The heading to search for in the PDF.
|
| 131 |
+
Returns:
|
| 132 |
+
Tuple: Annotated PDF bytes, count of heading occurrences, and zoom string.
|
| 133 |
+
"""
|
| 134 |
+
print("Input links:", pdfshareablelinks)
|
| 135 |
+
print(LISTheading_to_search)
|
| 136 |
+
|
| 137 |
+
link = pdfshareablelinks[0]
|
| 138 |
+
pdf_content = None
|
| 139 |
+
headings_TOC = []
|
| 140 |
+
# Modify Dropbox shareable link for direct download
|
| 141 |
+
if link and ('http' in link or 'dropbox' in link):
|
| 142 |
+
if 'dl=0' in link:
|
| 143 |
+
link = link.replace('dl=0', 'dl=1')
|
| 144 |
+
|
| 145 |
+
# Download the PDF content from the shareable link
|
| 146 |
+
response = requests.get(link)
|
| 147 |
+
pdf_content = BytesIO(response.content) # Store the content in memory
|
| 148 |
+
if pdf_content is None:
|
| 149 |
+
raise ValueError("No valid PDF content found.")
|
| 150 |
+
|
| 151 |
+
# Open the PDF using PyMuPDF
|
| 152 |
+
pdf_document = fitz.open(stream=pdf_content, filetype="pdf")
|
| 153 |
+
repeated_texts = get_repeated_texts(pdf_document)
|
| 154 |
+
df = pd.DataFrame(columns=["NBSLink","NBS", 'head above 1', "head above 2"])
|
| 155 |
+
dictionaryNBS={}
|
| 156 |
+
for NBSindex, heading_to_search in enumerate(LISTheading_to_search):
|
| 157 |
+
if NBSindex == len(LISTheading_to_search) - 1:
|
| 158 |
+
flagAllNBSvisited = True
|
| 159 |
+
all_text = []
|
| 160 |
+
current_line = ""
|
| 161 |
+
collecting_text = False
|
| 162 |
+
f10_count = 0
|
| 163 |
+
current_y = None
|
| 164 |
+
highlight_rect = None
|
| 165 |
+
zoom_str = None
|
| 166 |
+
toc_flag = False
|
| 167 |
+
span_font_goal = None
|
| 168 |
+
span_size_goal = None
|
| 169 |
+
pageNumberFound = None
|
| 170 |
+
groupheadings = []
|
| 171 |
+
merged_groupheadings = []
|
| 172 |
+
collectheader2 = False
|
| 173 |
+
header2 = ''
|
| 174 |
+
header2_first_span_size = 0
|
| 175 |
+
previous_header = ''
|
| 176 |
+
next_span_text = ''
|
| 177 |
+
current_line_span_size = 0
|
| 178 |
+
flagAllNBSvisited = False
|
| 179 |
+
|
| 180 |
+
text = ''
|
| 181 |
+
heading_to_searchNBS = heading_to_search
|
| 182 |
+
heading_words = heading_to_search.split() # Split heading into words
|
| 183 |
+
first_word = heading_words[0] # First word to search for
|
| 184 |
+
remaining_words = heading_words[1:] # Remaining words to verify
|
| 185 |
+
print(heading_words)
|
| 186 |
+
heading_to_search = heading_to_search.replace(" ", "")
|
| 187 |
+
|
| 188 |
+
# Process each page in the PDF
|
| 189 |
+
for page_num in range(pdf_document.page_count):
|
| 190 |
+
page = pdf_document.load_page(page_num)
|
| 191 |
+
# Get page dimensions
|
| 192 |
+
page_height = page.rect.height
|
| 193 |
+
header_threshold = page_height * 0.1 # Top 10% of the page height
|
| 194 |
+
footer_threshold = page_height * 0.9 # Bottom 10% of the page height
|
| 195 |
+
|
| 196 |
+
# Extract text in dictionary format
|
| 197 |
+
text_dict = page.get_text("dict")
|
| 198 |
+
|
| 199 |
+
# Collect header y-coordinates to detect header area
|
| 200 |
+
header_threshold = 0 # Header area: top 10% of the page height
|
| 201 |
+
current_line_text = ""
|
| 202 |
+
previous_y = None
|
| 203 |
+
# Process text blocks
|
| 204 |
+
for block in text_dict['blocks']:
|
| 205 |
+
for line_index, line in enumerate(block.get('lines', [])):
|
| 206 |
+
spans = line.get('spans', [])
|
| 207 |
+
if spans and any(span['text'].strip() for span in spans):
|
| 208 |
+
for i, span in enumerate(spans):
|
| 209 |
+
span_text = span['text'].strip()
|
| 210 |
+
highlight_rect = span['bbox']
|
| 211 |
+
span_y = span['bbox'][1]
|
| 212 |
+
span_font = span['font']
|
| 213 |
+
span_size = span['size']
|
| 214 |
+
|
| 215 |
+
if previous_y is None:
|
| 216 |
+
previous_y = span_y # Initialize on first span
|
| 217 |
+
|
| 218 |
+
# If same Y coordinate as previous, append to the current line
|
| 219 |
+
if abs(span_y - previous_y) < 5: # Allow a small margin for OCR variations
|
| 220 |
+
current_line_text += " " + span_text
|
| 221 |
+
current_line_text = normalize_text(current_line_text)
|
| 222 |
+
current_line_span_size = span_size
|
| 223 |
+
else:
|
| 224 |
+
# Store the complete line and reset for the new line
|
| 225 |
+
if current_line_text.strip():
|
| 226 |
+
all_text.append(current_line_text.strip())
|
| 227 |
+
|
| 228 |
+
current_line_text = span_text # Start a new line
|
| 229 |
+
previous_y = span_y # Update the reference Y
|
| 230 |
+
text = span_text
|
| 231 |
+
if collecting_text and span_font == span_font_goal and span_size == span_size_goal and span_text[0].isdigit():
|
| 232 |
+
print(f"Ending collection at heading: {span_text}")
|
| 233 |
+
print("merged_groupheadings:", merged_groupheadings)
|
| 234 |
+
print('groupheadingss',groupheadings)
|
| 235 |
+
collecting_text = False
|
| 236 |
+
continue
|
| 237 |
+
if collecting_text:
|
| 238 |
+
annot = page.add_highlight_annot(highlight_rect)
|
| 239 |
+
annot.update()
|
| 240 |
+
|
| 241 |
+
if 'Content' in span_text:
|
| 242 |
+
toc_flag = True
|
| 243 |
+
TOC_start = span_text
|
| 244 |
+
print('content', TOC_start, span_size)
|
| 245 |
+
|
| 246 |
+
if toc_flag:
|
| 247 |
+
if 'Content' not in span_text:
|
| 248 |
+
if current_y is None:
|
| 249 |
+
current_y = span_y
|
| 250 |
+
current_size = span_size # Initialize the reference span size
|
| 251 |
+
# Check if the current span size deviates significantly
|
| 252 |
+
if abs(span_size - current_size) > 1: # Threshold for size difference
|
| 253 |
+
toc_flag = False
|
| 254 |
+
|
| 255 |
+
if abs(current_y - span_y) < 5: # Allowing more flexibility for multi-line headings
|
| 256 |
+
current_line += " " + span_text # Keep accumulating text
|
| 257 |
+
else:
|
| 258 |
+
if current_line.strip(): # Only process non-empty lines
|
| 259 |
+
print('current_line',current_line)
|
| 260 |
+
pattern = r"^([A-Za-z0-9\s\/\-,]+)(?=\.+)"
|
| 261 |
+
match = re.match(pattern, current_line.strip())
|
| 262 |
+
|
| 263 |
+
if match:
|
| 264 |
+
groupheadings.append(match.group(1).strip())
|
| 265 |
+
# else:
|
| 266 |
+
# toc_flag = False
|
| 267 |
+
|
| 268 |
+
current_line = span_text
|
| 269 |
+
current_y = span_y
|
| 270 |
+
current_size = span_size # Update reference span size
|
| 271 |
+
# print('outofcurrent')
|
| 272 |
+
if len(groupheadings) > 0:
|
| 273 |
+
pattern = re.compile(r"^[A-Za-z]\d{2} ") # Match headings starting with letter + 2 digits
|
| 274 |
+
merged_groupheadings = []
|
| 275 |
+
current_item = None # Start as None to avoid an initial blank entry
|
| 276 |
+
|
| 277 |
+
for item in groupheadings:
|
| 278 |
+
if pattern.match(item): # If item starts with correct pattern, it's a new heading
|
| 279 |
+
if current_item: # Append only if current_item is not empty
|
| 280 |
+
merged_groupheadings.append(current_item.strip())
|
| 281 |
+
current_item = item # Start new heading
|
| 282 |
+
else:
|
| 283 |
+
if current_item:
|
| 284 |
+
current_item += " " + item # Merge with previous heading
|
| 285 |
+
|
| 286 |
+
# Append last merged item after loop
|
| 287 |
+
if current_item:
|
| 288 |
+
merged_groupheadings.append(current_item.strip())
|
| 289 |
+
if span_text == first_word:
|
| 290 |
+
print('First word found:', span_text)
|
| 291 |
+
# Check if it's not the last span in the current line
|
| 292 |
+
print(i + 1, len(spans))
|
| 293 |
+
if i + 1 < len(spans):
|
| 294 |
+
next_span_text = (spans[i + 1]['text'].strip())
|
| 295 |
+
# Check if the next span's text is in the heading list
|
| 296 |
+
if next_span_text.replace(" ", "") in heading_to_search.replace(" ", ""):
|
| 297 |
+
text = (span_text + ' ' + next_span_text)
|
| 298 |
+
# After processing the current line, check if there's a next line
|
| 299 |
+
if first_word == span_text:
|
| 300 |
+
if line_index + 1 < len(block.get('lines', [])):
|
| 301 |
+
next_line = block['lines'][line_index + 1]
|
| 302 |
+
# You can process the spans of the next line here
|
| 303 |
+
for next_span in next_line.get('spans', []):
|
| 304 |
+
next_span_text = next_span['text'].strip()
|
| 305 |
+
text = span_text + ' ' + next_span_text
|
| 306 |
+
if len(merged_groupheadings) > 0:
|
| 307 |
+
if re.match(r"[A-Za-z]\d{2}", span_text) and span_size > 10:
|
| 308 |
+
previous_header = span_text # Store last detected header
|
| 309 |
+
print('previous_header', span_text)
|
| 310 |
+
groupmainheadingFromArray = [item for item in merged_groupheadings if previous_header in item]
|
| 311 |
+
|
| 312 |
+
if previous_header:
|
| 313 |
+
if not collectheader2:
|
| 314 |
+
if header2_first_span_size == 0:
|
| 315 |
+
spanSizeHeader = 10
|
| 316 |
+
else:
|
| 317 |
+
spanSizeHeader = header2_first_span_size
|
| 318 |
+
|
| 319 |
+
for item in groupmainheadingFromArray:
|
| 320 |
+
if not any(normalize_text(current_line_text) in normalize_text(item) for item in groupmainheadingFromArray):
|
| 321 |
+
if span_size >= spanSizeHeader:
|
| 322 |
+
if not re.match(r"^\d{2}", current_line_text) and current_line_text not in repeated_texts and "Bold" in span["font"] :
|
| 323 |
+
if len(header2) > 0:
|
| 324 |
+
header2_first_span_size = span_size
|
| 325 |
+
header2 = current_line_text
|
| 326 |
+
print('header2', header2, span_size, spanSizeHeader)
|
| 327 |
+
|
| 328 |
+
trimmed_text = text.replace(" ", "")
|
| 329 |
+
if len(text) > 0:
|
| 330 |
+
if text.split()[0] in heading_words:
|
| 331 |
+
if len(trimmed_text) > 0 and (heading_to_search.replace(" ", "") in trimmed_text):
|
| 332 |
+
print(trimmed_text, heading_to_search)
|
| 333 |
+
f10_count += 1
|
| 334 |
+
# Start collecting text under the second occurrence of the heading
|
| 335 |
+
if f10_count == 1:
|
| 336 |
+
collecting_text = True
|
| 337 |
+
print(f"Starting collection under heading: {text}, {span_font}, {span_size}")
|
| 338 |
+
collectheader2 = True
|
| 339 |
+
NBS_heading = heading_to_searchNBS
|
| 340 |
+
x0, y0, x1, y1 = highlight_rect
|
| 341 |
+
|
| 342 |
+
span_font_goal = span_font # Capture the font at the first heading match
|
| 343 |
+
span_size_goal = span_size # Capture the size at the first heading match
|
| 344 |
+
zoom = 200
|
| 345 |
+
left = int(x0)
|
| 346 |
+
top = int(y0)
|
| 347 |
+
zoom_str = f"{zoom},{left},{top}"
|
| 348 |
+
pageNumberFound = page_num + 1
|
| 349 |
+
dictionaryNBS[heading_to_searchNBS] = [pageNumberFound, zoom_str]
|
| 350 |
+
|
| 351 |
+
annot = page.add_highlight_annot(highlight_rect)
|
| 352 |
+
annot.update()
|
| 353 |
+
groupmainheadingFromArray = [item for item in merged_groupheadings if previous_header in item]
|
| 354 |
+
|
| 355 |
+
# Build the query parameters
|
| 356 |
+
params = {
|
| 357 |
+
'pdfLink': link, # Your PDF link
|
| 358 |
+
'keyword': NBS_heading, # Your keyword (could be a string or list)
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
# URL encode each parameter
|
| 362 |
+
encoded_params = {key: urllib.parse.quote(value, safe='') for key, value in params.items()}
|
| 363 |
+
|
| 364 |
+
# Construct the final encoded link
|
| 365 |
+
encoded_link = '&'.join([f"{key}={value}" for key, value in encoded_params.items()])
|
| 366 |
+
|
| 367 |
+
# Correctly construct the final URL with page and zoom
|
| 368 |
+
final_url = f"{baselink}{encoded_link}#page={str(pageNumberFound)}&zoom={zoom_str}"
|
| 369 |
+
|
| 370 |
+
# Optionally, add the URL to a DataFrame
|
| 371 |
+
if len(groupmainheadingFromArray) > 0:
|
| 372 |
+
df = pd.concat([df, pd.DataFrame([{
|
| 373 |
+
"NBSLink": final_url,
|
| 374 |
+
"NBS": NBS_heading,
|
| 375 |
+
'head above 1': header2,
|
| 376 |
+
"head above 2": groupmainheadingFromArray[0]
|
| 377 |
+
}])], ignore_index=True)
|
| 378 |
+
|
| 379 |
+
print("Final URL:", final_url)
|
| 380 |
+
|
| 381 |
+
if collecting_text:
|
| 382 |
+
annot = page.add_highlight_annot(highlight_rect)
|
| 383 |
+
annot.update()
|
| 384 |
+
if current_line.strip():
|
| 385 |
+
all_text += current_line.strip() + '\n' # Append the current line
|
| 386 |
+
print(df)
|
| 387 |
+
print(dictionaryNBS)
|
| 388 |
+
xx=save_df_to_pdf(df)
|
| 389 |
+
outputpdfFitz =fitz.open('pdf',xx)
|
| 390 |
+
pdf_bytes = BytesIO()
|
| 391 |
+
pdf_document.save(pdf_bytes)
|
| 392 |
+
return pdf_bytes.getvalue(), pdf_document , df,outputpdfFitz
|