Marthee commited on
Commit
80d514c
·
verified ·
1 Parent(s): d19d78e

Rename pdftotext.py to Find_Hyperlinking_text.py

Browse files
Files changed (2) hide show
  1. Find_Hyperlinking_text.py +282 -0
  2. pdftotext.py +0 -236
Find_Hyperlinking_text.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fitz # PyMuPDF
2
+ from io import BytesIO
3
+ import re
4
+ import requests
5
+ import pandas as pd
6
+ from collections import Counter
7
+ import fitz # PyMuPDF
8
+ import re
9
+
10
+ def normalize_text(text):
11
+ """Lowercase, remove extra spaces, and strip special characters."""
12
+ text = text.lower().strip()
13
+ text = re.sub(r'\s+', ' ', text) # Normalize multiple spaces
14
+ return re.sub(r'[^\w\s]', '', text) # Remove punctuation
15
+ def get_repeated_texts(pdf_document, threshold=0.9):
16
+ """
17
+ Identify text that appears on most pages.
18
+ :param pdf_document: The opened PDF document.
19
+ :param threshold: The percentage of pages a text must appear on to be considered "repeated".
20
+ """
21
+ text_counts = Counter()
22
+ total_pages = pdf_document.page_count
23
+
24
+ for page_num in range(total_pages):
25
+ page = pdf_document.load_page(page_num)
26
+ page_text = page.get_text("text")
27
+ normalized_lines = {normalize_text(line) for line in page_text.splitlines() if line.strip()}
28
+
29
+ text_counts.update(normalized_lines)
30
+
31
+ # Find texts that appear in at least `threshold * total_pages` pages
32
+ min_occurrence = max(1, int(threshold * total_pages))
33
+ repeated_texts = {text for text, count in text_counts.items() if count >= min_occurrence}
34
+ return repeated_texts
35
+
36
+
37
+ def split_links(links_string):
38
+ """Split a comma-separated string of links into an array of trimmed links."""
39
+ return [link.strip() for link in links_string.split(',')]
40
+ def annotate_text_from_pdf(pdfshareablelinks, LISTheading_to_search):
41
+ """
42
+ Annotates text under a specific heading in a PDF, highlights it,
43
+ and constructs zoom coordinates for the first occurrence of the heading.
44
+
45
+ Args:
46
+ pdfshareablelinks (list): List of shareable links to PDFs.
47
+ heading_to_search (str): The heading to search for in the PDF.
48
+
49
+ Returns:
50
+ Tuple: Annotated PDF bytes, count of heading occurrences, and zoom string.
51
+ """
52
+ print("Input links:", pdfshareablelinks)
53
+ print(LISTheading_to_search)
54
+
55
+ link = pdfshareablelinks[0]
56
+ pdf_content = None
57
+ headings_TOC = []
58
+ # Modify Dropbox shareable link for direct download
59
+ if link and ('http' in link or 'dropbox' in link):
60
+ if 'dl=0' in link:
61
+ link = link.replace('dl=0', 'dl=1')
62
+
63
+ # Download the PDF content from the shareable link
64
+ response = requests.get(link)
65
+ pdf_content = BytesIO(response.content) # Store the content in memory
66
+ if pdf_content is None:
67
+ raise ValueError("No valid PDF content found.")
68
+
69
+ # Open the PDF using PyMuPDF
70
+ pdf_document = fitz.open(stream=pdf_content, filetype="pdf")
71
+ repeated_texts = get_repeated_texts(pdf_document)
72
+ df = pd.DataFrame(columns=["NBS Link","NBS", 'head above 1', "head above 2"])
73
+ dictionaryNBS={}
74
+ for NBSindex, heading_to_search in enumerate(LISTheading_to_search):
75
+ if NBSindex == len(LISTheading_to_search) - 1:
76
+ flagAllNBSvisited = True
77
+ all_text = []
78
+ current_line = ""
79
+ collecting_text = False
80
+ f10_count = 0
81
+ current_y = None
82
+ highlight_rect = None
83
+ zoom_str = None
84
+ toc_flag = False
85
+ span_font_goal = None
86
+ span_size_goal = None
87
+ pageNumberFound = None
88
+ groupheadings = []
89
+ merged_groupheadings = []
90
+ collectheader2 = False
91
+ header2 = ''
92
+ header2_first_span_size = 0
93
+ previous_header = ''
94
+ next_span_text = ''
95
+ current_line_span_size = 0
96
+ flagAllNBSvisited = False
97
+
98
+ text = ''
99
+ heading_to_searchNBS = heading_to_search
100
+ heading_words = heading_to_search.split() # Split heading into words
101
+ first_word = heading_words[0] # First word to search for
102
+ remaining_words = heading_words[1:] # Remaining words to verify
103
+ print(heading_words)
104
+ heading_to_search = heading_to_search.replace(" ", "")
105
+
106
+ # Process each page in the PDF
107
+ for page_num in range(pdf_document.page_count):
108
+ page = pdf_document.load_page(page_num)
109
+ # Get page dimensions
110
+ page_height = page.rect.height
111
+ header_threshold = page_height * 0.1 # Top 10% of the page height
112
+ footer_threshold = page_height * 0.9 # Bottom 10% of the page height
113
+
114
+ # Extract text in dictionary format
115
+ text_dict = page.get_text("dict")
116
+
117
+ # Collect header y-coordinates to detect header area
118
+ header_threshold = 0 # Header area: top 10% of the page height
119
+ current_line_text = ""
120
+ previous_y = None
121
+ # Process text blocks
122
+ for block in text_dict['blocks']:
123
+ for line_index, line in enumerate(block.get('lines', [])):
124
+ spans = line.get('spans', [])
125
+ if spans and any(span['text'].strip() for span in spans):
126
+ for i, span in enumerate(spans):
127
+ span_text = span['text'].strip()
128
+ highlight_rect = span['bbox']
129
+ span_y = span['bbox'][1]
130
+ span_font = span['font']
131
+ span_size = span['size']
132
+
133
+ if previous_y is None:
134
+ previous_y = span_y # Initialize on first span
135
+
136
+ # If same Y coordinate as previous, append to the current line
137
+ if abs(span_y - previous_y) < 5: # Allow a small margin for OCR variations
138
+ current_line_text += " " + span_text
139
+ current_line_text = normalize_text(current_line_text)
140
+ current_line_span_size = span_size
141
+ else:
142
+ # Store the complete line and reset for the new line
143
+ if current_line_text.strip():
144
+ all_text.append(current_line_text.strip())
145
+
146
+ current_line_text = span_text # Start a new line
147
+ previous_y = span_y # Update the reference Y
148
+ text = span_text
149
+ if collecting_text and span_font == span_font_goal and span_size == span_size_goal and span_text[0].isdigit():
150
+ print(f"Ending collection at heading: {span_text}")
151
+ print("merged_groupheadings:", merged_groupheadings)
152
+ collecting_text = False
153
+ continue
154
+ if collecting_text:
155
+ annot = page.add_highlight_annot(highlight_rect)
156
+ annot.update()
157
+
158
+ if 'Content' in span_text:
159
+ toc_flag = True
160
+ TOC_start = span_text
161
+ print('content', TOC_start, span_size)
162
+
163
+ if toc_flag:
164
+ if 'Content' not in span_text:
165
+ if current_y is None:
166
+ current_y = span_y
167
+ current_size = span_size # Initialize the reference span size
168
+ # Check if the current span size deviates significantly
169
+ if abs(span_size - current_size) > 1: # Threshold for size difference
170
+ toc_flag = False
171
+
172
+ if abs(current_y - span_y) < 5: # Allowing more flexibility for multi-line headings
173
+ current_line += " " + span_text # Keep accumulating text
174
+ else:
175
+ if current_line.strip(): # Only process non-empty lines
176
+ pattern = r"^([A-Za-z0-9\s\/\-,]+)(?=\.+)"
177
+ match = re.match(pattern, current_line.strip())
178
+
179
+ if match:
180
+ groupheadings.append(match.group(1).strip())
181
+ current_line = span_text
182
+ current_y = span_y
183
+ current_size = span_size # Update reference span size
184
+ if len(groupheadings) > 0:
185
+ pattern = re.compile(r"^[A-Za-z]\d{2} ") # Match headings starting with letter + 2 digits
186
+ merged_groupheadings = []
187
+ current_item = None # Start as None to avoid an initial blank entry
188
+
189
+ for item in groupheadings:
190
+ if pattern.match(item): # If item starts with correct pattern, it's a new heading
191
+ if current_item: # Append only if current_item is not empty
192
+ merged_groupheadings.append(current_item.strip())
193
+ current_item = item # Start new heading
194
+ else:
195
+ if current_item:
196
+ current_item += " " + item # Merge with previous heading
197
+
198
+ # Append last merged item after loop
199
+ if current_item:
200
+ merged_groupheadings.append(current_item.strip())
201
+ if span_text == first_word:
202
+ print('First word found:', span_text)
203
+ # Check if it's not the last span in the current line
204
+ print(i + 1, len(spans))
205
+ if i + 1 < len(spans):
206
+ next_span_text = (spans[i + 1]['text'].strip())
207
+ # Check if the next span's text is in the heading list
208
+ if next_span_text.replace(" ", "") in heading_to_search.replace(" ", ""):
209
+ text = (span_text + ' ' + next_span_text)
210
+ # After processing the current line, check if there's a next line
211
+ if first_word == span_text:
212
+ if line_index + 1 < len(block.get('lines', [])):
213
+ next_line = block['lines'][line_index + 1]
214
+ # You can process the spans of the next line here
215
+ for next_span in next_line.get('spans', []):
216
+ next_span_text = next_span['text'].strip()
217
+ text = span_text + ' ' + next_span_text
218
+ if len(merged_groupheadings) > 0:
219
+ if re.match(r"[A-Za-z]\d{2}", span_text) and span_size > 10:
220
+ previous_header = span_text # Store last detected header
221
+ print('previous_header', span_text)
222
+ groupmainheadingFromArray = [item for item in merged_groupheadings if previous_header in item]
223
+
224
+ if previous_header:
225
+ if not collectheader2:
226
+ if header2_first_span_size == 0:
227
+ spanSizeHeader = 10
228
+ else:
229
+ spanSizeHeader = header2_first_span_size
230
+
231
+ for item in groupmainheadingFromArray:
232
+ if not any(normalize_text(current_line_text) in normalize_text(item) for item in groupmainheadingFromArray):
233
+ if span_size >= spanSizeHeader:
234
+ if not re.match(r"^\d{2}", current_line_text) and current_line_text not in repeated_texts and "Bold" in span["font"] :
235
+ if len(header2) > 0:
236
+ header2_first_span_size = span_size
237
+ header2 = current_line_text
238
+ print('header2', header2, span_size, spanSizeHeader)
239
+
240
+ trimmed_text = text.replace(" ", "")
241
+ if len(text) > 0:
242
+ if text.split()[0] in heading_words:
243
+ if len(trimmed_text) > 0 and (heading_to_search.replace(" ", "") in trimmed_text):
244
+ print(trimmed_text, heading_to_search)
245
+ f10_count += 1
246
+ # Start collecting text under the second occurrence of the heading
247
+ if f10_count == 1:
248
+ collecting_text = True
249
+ print(f"Starting collection under heading: {text}, {span_font}, {span_size}")
250
+ collectheader2 = True
251
+ NBS_heading = heading_to_searchNBS
252
+ x0, y0, x1, y1 = highlight_rect
253
+
254
+ span_font_goal = span_font # Capture the font at the first heading match
255
+ span_size_goal = span_size # Capture the size at the first heading match
256
+ zoom = 200
257
+ left = int(x0)
258
+ top = int(y0)
259
+ zoom_str = f"{zoom},{left},{top}"
260
+ pageNumberFound = page_num + 1
261
+ dictionaryNBS[heading_to_searchNBS] = [pageNumberFound, zoom_str]
262
+
263
+ annot = page.add_highlight_annot(highlight_rect)
264
+ annot.update()
265
+ groupmainheadingFromArray = [item for item in merged_groupheadings if previous_header in item]
266
+
267
+ if len(groupmainheadingFromArray) > 0:
268
+ df = pd.concat([df, pd.DataFrame([{"NBS": NBS_heading, 'head above 1': header2, "head above 2": groupmainheadingFromArray[0]}])], ignore_index=True)
269
+ # Highlight the text
270
+ if collecting_text:
271
+ annot = page.add_highlight_annot(highlight_rect)
272
+ annot.update()
273
+ if current_line.strip():
274
+ all_text += current_line.strip() + '\n' # Append the current line
275
+ print(df)
276
+ print(dictionaryNBS)
277
+ # Save the annotated PDF to bytes
278
+ pdf_bytes = BytesIO()
279
+ pdf_document.save(pdf_bytes)
280
+ pdf_document.close()
281
+
282
+ return pdf_bytes.getvalue(), pageNumberFound, zoom_str
pdftotext.py DELETED
@@ -1,236 +0,0 @@
1
- import fitz # PyMuPDF
2
- from io import BytesIO
3
- import re
4
- import requests
5
-
6
- def split_links(links_string):
7
- # Remove any extra whitespace around each link after splitting
8
- links_array = [link.strip() for link in links_string.split(',')]
9
- return links_array
10
-
11
- def texts_from_pdf(pdfshareablelinks, heading_to_search):
12
- print('intexts',pdfshareablelinks)
13
-
14
-
15
- pdfshareablelinks=split_links(pdfshareablelinks)
16
-
17
- # Case 1: If it's a shareable link
18
- for link in pdfshareablelinks:
19
- pdf_content = None
20
-
21
- if link and ('http' in link or 'dropbox' in link):
22
- # Modify Dropbox link for direct download
23
- if 'dl=0' in link:
24
- link = link.replace('dl=0', 'dl=1')
25
-
26
- # Download the PDF content from the shareable link
27
- response = requests.get(link)
28
- pdf_content = BytesIO(response.content) # Store the content in memory
29
- print('Downloaded from shareable link.')
30
- # elif dbpdfpath:
31
- # dbxTeam = tsadropboxretrieval.ADR_Access_DropboxTeam('user')
32
- # print('Dropbox team access initialized.')
33
- # md, res = dbxTeam.files_download(path=dbpdfpath)
34
- # pdf_content = BytesIO(res.content) # Store the content in memory
35
- # print('Downloaded from Dropbox path.')
36
-
37
- # Check if the PDF content is available
38
- if pdf_content is None:
39
- raise ValueError("No valid PDF content found.")
40
-
41
- # Open the PDF using fitz (PyMuPDF) directly from memory
42
- pdf_document = fitz.open(stream=pdf_content, filetype="pdf")
43
- print('PDF opened in memory.')
44
-
45
- all_text = "" # Initialize a string to store all text
46
- current_line = "" # To build the current line
47
- collecting_text = False # Track whether we're currently collecting text under the heading
48
- f10_count = 0 # Counter for F10 headings
49
- current_y = None # To track the y-coordinate
50
-
51
- # Define a regex pattern to match headings
52
- heading_pattern = re.compile(r"[A-Za-z]\d{2}") # Heading pattern (letter followed by two numbers)
53
-
54
- # Loop through each page in the PDF
55
- for page_num in range(pdf_document.page_count):
56
- page = pdf_document.load_page(page_num)
57
-
58
- # Get text as dictionary to extract lines
59
- text_dict = page.get_text("dict")
60
-
61
- # Collect header y-coordinates to determine header area
62
- header_y_values = []
63
-
64
- # First pass to collect y-coordinates for detecting header area
65
- for block in text_dict['blocks']:
66
- if 'lines' in block: # Check if 'lines' key exists
67
- for line in block['lines']:
68
- for span in line['spans']:
69
- header_y_values.append(span['bbox'][1]) # Collect top y-coordinates of spans
70
-
71
- # Determine a threshold for the header area (e.g., top 20% of the page height)
72
- header_threshold = min(header_y_values) + (page.rect.height * 0.1) # Adding 10% for a buffer
73
- # print(f"Header threshold for page {page_num + 1}: {header_threshold}")
74
-
75
- # Iterate over blocks, lines, and spans to extract lines of text
76
- for block in text_dict['blocks']:
77
- if 'lines' in block: # Check if 'lines' key exists
78
- for line in block['lines']:
79
- for span in line['spans']:
80
- span_text = span['text'].strip()
81
- span_y = span['bbox'][1] # Get the top y-coordinate of the span
82
-
83
- # Check if it's a heading based on the format
84
- if heading_pattern.match(span_text):
85
- if heading_to_search in span_text:
86
- f10_count += 1 # Increment the F10 counter
87
-
88
- # Start collecting text under the second occurrence of F10
89
- if f10_count == 2:
90
- collecting_text = True # Start collecting text
91
- print(f"Starting collection under heading: {span_text}")
92
-
93
- # Stop collecting text if we reach a new heading
94
- if collecting_text:
95
- # If we encounter a new heading, we stop the collection
96
- if heading_pattern.match(span_text) and span_text != heading_to_search:
97
- print(f"Ending collection at heading: {span_text}")
98
- collecting_text = False # Stop collecting
99
-
100
- return all_text.strip() # Return collected text
101
-
102
- # If we're collecting text, add it to the output
103
- if collecting_text:
104
- # Exclude spans that fall within the header area
105
- if span_y < header_threshold:
106
- continue # Skip spans in the header area
107
-
108
- if current_y is None:
109
- current_y = span_y # Initialize the first y-coordinate
110
-
111
- # Check if the current span belongs to the same line (based on y-coordinate)
112
- if abs(current_y - span_y) < 2: # Threshold to determine if it's the same line
113
- current_line += " " + span_text # Add span text to the current line
114
- else:
115
- # If it's a new line, append the current line to all_text
116
- all_text += current_line.strip() + '\n' # Add line to all_text with a newline
117
- current_line = span_text # Start the new line with the current span
118
- current_y = span_y # Update the y-coordinate for the new line
119
-
120
- # Append the current line if we hit a new line at the end of the page
121
- if current_line:
122
- all_text += current_line.strip() + '\n'
123
- current_line = "" # Reset for the next line
124
-
125
- # print(f"\nCollected Text:\n{all_text.strip()}")
126
- return all_text.strip() if f10_count > 1 else "Heading not found"
127
-
128
-
129
- def apiFiltering(apitext):
130
-
131
- filtered_items = []
132
-
133
- for item in apitext:
134
- project_template_details = item.get('projecttemplatedetails', [])
135
- for detail in project_template_details:
136
- filtered_items.append({
137
- "id": detail.get('id'),
138
- "projecttemplateid": detail.get('projecttemplateid'),
139
- "bqcode": detail.get('bqcodelibrary', {}).get('bqcode')
140
- })
141
- return filtered_items
142
- # import fitz
143
-
144
- # import tsadropboxretrieval
145
- # from io import BytesIO
146
- # import requests
147
- # def texts_from_pdf(pdfshareablelink):
148
- # print('intexts')
149
-
150
- # pdf_content = None
151
-
152
- # # Case 1: If it's a shareable link
153
- # if pdfshareablelink and ('http' in pdfshareablelink or 'dropbox' in pdfshareablelink):
154
- # # Modify Dropbox link for direct download
155
- # if 'dl=0' in pdfshareablelink:
156
- # pdfshareablelink = pdfshareablelink.replace('dl=0', 'dl=1')
157
-
158
- # # Download the PDF content from the shareable link
159
- # response = requests.get(pdfshareablelink)
160
- # pdf_content = BytesIO(response.content) # Store the content in memory
161
- # print('Downloaded from shareable link.')
162
-
163
- # # Case 2: If it's a Dropbox path, use the Dropbox API to download
164
- # elif dbpdfpath:
165
- # dbxTeam = tsadropboxretrieval.ADR_Access_DropboxTeam('user')
166
- # print('Dropbox team access initialized.')
167
- # md, res = dbxTeam.files_download(path=dbpdfpath)
168
- # pdf_content = BytesIO(res.content) # Store the content in memory
169
- # print('Downloaded from Dropbox path.')
170
-
171
- # # Check if the PDF content is available
172
- # if pdf_content is None:
173
- # raise ValueError("No valid PDF content found.")
174
-
175
- # # Open the PDF using fitz (PyMuPDF) directly from memory
176
- # pdf_document = fitz.open(stream=pdf_content, filetype="pdf")
177
- # print('PDF opened in memory.')
178
-
179
- # all_text = "" # Initialize a string to store all text
180
- # current_line = "" # To build the current line
181
- # current_y = None # Track the y-coordinate of the current line
182
-
183
- # # Loop through each page in the PDF
184
- # for page_num in range(pdf_document.page_count):
185
- # page = pdf_document.load_page(page_num)
186
-
187
- # # Get text as dictionary to extract lines
188
- # text_dict = page.get_text("dict")
189
-
190
- # # Iterate over blocks, lines, and spans to extract lines of text
191
- # for block in text_dict['blocks']:
192
- # if 'lines' in block: # Check if 'lines' key exists
193
- # for line in block['lines']:
194
- # for span in line['spans']:
195
- # span_text = span['text'].strip()
196
- # span_y = span['bbox'][1] # Y-coordinate of the span (bbox[1] is the top y-coordinate)
197
-
198
- # # Check if the current span belongs to the same line (based on y-coordinate)
199
- # if current_y is None:
200
- # current_y = span_y # Initialize the first y-coordinate
201
-
202
- # if abs(current_y - span_y) < 2: # Threshold to determine if it's the same line
203
- # # If the y-coordinate is close enough, add to the current line
204
- # current_line += " " + span_text
205
- # else:
206
- # # If it's a new line, append the current line and reset
207
- # all_text += current_line.strip() + '\n' # Add line to all_text with a newline
208
- # current_line = span_text # Start the new line with the current span
209
- # current_y = span_y # Update the y-coordinate for the new line
210
-
211
- # # Append the last line of the page (if there's any)
212
- # if current_line:
213
- # all_text += current_line.strip() + '\n'
214
- # current_line = "" # Reset after each page
215
- # # all_text = all_text.replace('\n', ' ')
216
- # # return all_lines
217
- # print(all_text)
218
- # return all_text
219
- # # print('intexts')
220
- # # dbxTeam= tsadropboxretrieval.ADR_Access_DropboxTeam('user')
221
- # # print('dbdone')
222
- # # md, res =dbxTeam.files_download(path=dbpdfpath)
223
- # # print('downloaded')
224
- # # dataDoc = res.content
225
- # # print('l')
226
- # # pdf_document = fitz.open('pdf',dataDoc)
227
- # # print('k')
228
- # # alltexts=''
229
- # # for page_num in range(pdf_document.page_count):
230
- # # page = pdf_document[page_num]
231
- # # text_instances = page.get_text()
232
- # # alltexts+=text_instances
233
-
234
- # # # alltexts = alltexts.replace('\n', ' ')
235
- # # return alltexts
236
-