File size: 39,760 Bytes
0a9f9c2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
import os
import io
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple, Union
import hashlib
import time
import streamlit as st
from config import Config

class BulletproofDocumentProcessor:
    """
    Bulletproof PDF processor designed for maximum compatibility and reliability.
    
    This processor implements a multi-strategy extraction approach with intelligent
    fallbacks, avoiding complex dependencies while ensuring robust text extraction
    from diverse PDF formats commonly found in HR documentation.
    
    Architecture:
    - Primary: Native text extraction using minimal libraries
    - Secondary: Byte-level pattern matching for encoded content
    - Tertiary: Manual content stream parsing for complex PDFs
    - Fallback: User-guided content input for problematic files
    """
    
    def __init__(self):
        self.config = Config()
        self.embedding_model = self._initialize_embedding_engine()
        self.extraction_stats = {
            'attempts': 0,
            'successes': 0,
            'method_effectiveness': {}
        }
        
    def _initialize_embedding_engine(self):
        """
        Initialize embedding engine with enhanced error handling and fallback mechanisms.
        
        This method implements a graceful degradation strategy, ensuring the system
        remains functional even if specific embedding libraries encounter issues.
        """
        try:
            from sentence_transformers import SentenceTransformer
            
            # Use a more compatible model that's less likely to trigger torch issues
            model = SentenceTransformer('all-MiniLM-L6-v2', device='cpu')
            
            # Suppress torch warnings that don't affect functionality
            import warnings
            warnings.filterwarnings("ignore", message=".*torch.classes.*")
            
            return model
            
        except Exception as embedding_error:
            st.warning(f"Embedding model initialization issue: {str(embedding_error)}")
            st.info("πŸ“Œ System will continue with basic functionality. Some features may be limited.")
            return None
    
    def extract_text_from_pdf(self, pdf_file) -> Optional[str]:
        """
        Bulletproof PDF text extraction using progressive strategy escalation.
        
        This method implements a sophisticated extraction pipeline that adapts
        to different PDF types and encoding scenarios, ensuring maximum success
        rate across diverse document formats.
        
        Args:
            pdf_file: PDF file object or path
            
        Returns:
            Extracted text content or None if all methods fail
        """
        self.extraction_stats['attempts'] += 1
        
        # Define extraction strategies in order of preference and reliability
        extraction_strategies = [
            ('PyPDF2_Enhanced', self._extract_pypdf2_enhanced),
            ('ByteLevel_Analysis', self._extract_byte_level),
            ('Pattern_Matching', self._extract_pattern_based),
            ('Manual_Parsing', self._extract_manual_streams)
        ]
        
        # Execute extraction strategies with comprehensive error handling
        for strategy_name, extraction_method in extraction_strategies:
            try:
                st.info(f"πŸ”„ Executing {strategy_name} extraction...")
                
                # Reset file pointer for each attempt
                self._reset_file_pointer(pdf_file)
                
                # Execute extraction with timeout protection
                extracted_text = self._execute_with_timeout(
                    extraction_method, 
                    pdf_file, 
                    timeout_seconds=30
                )
                
                # Validate extraction quality
                if self._validate_extraction_quality(extracted_text):
                    self._record_success(strategy_name)
                    st.success(f"βœ… {strategy_name} extraction successful!")
                    return self._post_process_extracted_text(extracted_text)
                else:
                    st.warning(f"⚠️ {strategy_name} extracted insufficient content")
                    
            except Exception as strategy_error:
                st.warning(f"⚠️ {strategy_name} failed: {str(strategy_error)}")
                self._record_failure(strategy_name, str(strategy_error))
                continue
        
        # All automated strategies failed - provide comprehensive guidance
        self._handle_extraction_failure(pdf_file)
        return None
    
    def _extract_pypdf2_enhanced(self, pdf_file) -> str:
        """
        Enhanced PyPDF2 extraction with robust error handling and encoding management.
        
        This method implements intelligent PDF parsing that handles various
        encoding scenarios and structural anomalies commonly found in HR documents.
        """
        try:
            import PyPDF2
            
            # Prepare PDF reader with enhanced configuration
            pdf_data = self._read_pdf_data(pdf_file)
            
            # Create reader with multiple fallback configurations
            reader_configs = [
                {'strict': False, 'password': None},
                {'strict': True, 'password': None},
                {'strict': False, 'password': ''}  # Some PDFs have empty passwords
            ]
            
            pdf_reader = None
            for config in reader_configs:
                try:
                    pdf_reader = PyPDF2.PdfReader(
                        io.BytesIO(pdf_data), 
                        strict=config['strict']
                    )
                    if pdf_reader.is_encrypted and config['password'] is not None:
                        pdf_reader.decrypt(config['password'])
                    break
                except Exception:
                    continue
            
            if not pdf_reader:
                raise Exception("Could not initialize PDF reader with any configuration")
            
            # Extract text with page-level error handling
            text_fragments = []
            successful_pages = 0
            
            for page_index, page in enumerate(pdf_reader.pages):
                try:
                    # Multi-method text extraction per page
                    page_text = self._extract_page_text_robust(page, page_index)
                    
                    if page_text and len(page_text.strip()) > 10:
                        text_fragments.append(f"\n--- Page {page_index + 1} ---\n{page_text}")
                        successful_pages += 1
                        
                except Exception as page_error:
                    # Log page error but continue with other pages
                    st.warning(f"Page {page_index + 1} extraction failed: {str(page_error)}")
                    continue
            
            if successful_pages == 0:
                raise Exception("No pages yielded readable content")
            
            return '\n'.join(text_fragments)
            
        except ImportError:
            raise Exception("PyPDF2 library not available")
        except Exception as e:
            raise Exception(f"PyPDF2 extraction failed: {str(e)}")
    
    def _extract_page_text_robust(self, page, page_index: int) -> str:
        """
        Robust page-level text extraction with multiple fallback methods.
        
        This method implements several text extraction approaches for individual
        pages, ensuring maximum content recovery from diverse PDF structures.
        """
        # Primary extraction method
        try:
            text = page.extract_text()
            if text and len(text.strip()) > 10:
                return text
        except Exception:
            pass
        
        # Secondary extraction: access text objects directly
        try:
            if hasattr(page, 'get_contents') and page.get_contents():
                content_stream = page.get_contents()
                if hasattr(content_stream, 'get_data'):
                    stream_data = content_stream.get_data()
                    decoded_stream = stream_data.decode('latin-1', errors='ignore')
                    
                    # Extract text from stream using safe pattern matching
                    text = self._extract_from_content_stream(decoded_stream)
                    if text and len(text.strip()) > 10:
                        return text
        except Exception:
            pass
        
        # Tertiary extraction: character mapping approach
        try:
            return self._extract_via_character_mapping(page)
        except Exception:
            pass
        
        return ""
    
    def _extract_byte_level(self, pdf_file) -> str:
        """
        Byte-level PDF analysis for extracting text from structurally complex files.
        
        This method performs low-level byte analysis to identify and extract
        text content from PDFs that resist standard parsing methods.
        """
        pdf_data = self._read_pdf_data(pdf_file)
        
        # Multi-encoding text extraction strategy
        text_candidates = []
        
        # Strategy 1: Latin-1 decoding with pattern extraction
        try:
            decoded_content = pdf_data.decode('latin-1', errors='ignore')
            latin_text = self._extract_text_patterns(decoded_content)
            if latin_text:
                text_candidates.append(('latin-1', latin_text))
        except Exception:
            pass
        
        # Strategy 2: UTF-8 decoding with lenient error handling
        try:
            decoded_content = pdf_data.decode('utf-8', errors='ignore')
            utf8_text = self._extract_text_patterns(decoded_content)
            if utf8_text:
                text_candidates.append(('utf-8', utf8_text))
        except Exception:
            pass
        
        # Strategy 3: Windows-1252 encoding (common in office documents)
        try:
            decoded_content = pdf_data.decode('cp1252', errors='ignore')
            cp1252_text = self._extract_text_patterns(decoded_content)
            if cp1252_text:
                text_candidates.append(('cp1252', cp1252_text))
        except Exception:
            pass
        
        # Select best candidate based on content quality metrics
        if text_candidates:
            best_candidate = max(
                text_candidates, 
                key=lambda x: self._calculate_text_quality_score(x[1])
            )
            return best_candidate[1]
        
        raise Exception("Byte-level extraction found no readable content")
    
    def _extract_text_patterns(self, decoded_content: str) -> str:
        """
        Extract text using safe pattern matching without complex regex.
        
        This method identifies text content using simple string operations,
        avoiding regex compilation issues while maintaining extraction effectiveness.
        """
        text_fragments = []
        
        # Extract content between parentheses (common PDF text marker)
        content_length = len(decoded_content)
        i = 0
        
        while i < content_length - 1:
            if decoded_content[i] == '(':
                # Found potential text start
                j = i + 1
                parenthesis_depth = 1
                extracted_fragment = ""
                
                # Extract until matching closing parenthesis
                while j < content_length and parenthesis_depth > 0:
                    char = decoded_content[j]
                    
                    if char == '(':
                        parenthesis_depth += 1
                    elif char == ')':
                        parenthesis_depth -= 1
                    
                    if parenthesis_depth > 0:
                        # Handle escape sequences
                        if char == '\\' and j + 1 < content_length:
                            next_char = decoded_content[j + 1]
                            if next_char in 'ntr\\()':
                                escape_map = {'n': '\n', 't': '\t', 'r': '\r', '\\': '\\', '(': '(', ')': ')'}
                                extracted_fragment += escape_map.get(next_char, next_char)
                                j += 2
                            else:
                                extracted_fragment += next_char
                                j += 2
                        else:
                            extracted_fragment += char
                            j += 1
                    else:
                        j += 1
                
                # Process extracted fragment
                cleaned_fragment = self._clean_text_fragment(extracted_fragment)
                if self._is_meaningful_text(cleaned_fragment):
                    text_fragments.append(cleaned_fragment)
                
                i = j
            else:
                i += 1
        
        return ' '.join(text_fragments) if text_fragments else ""
    
    def _extract_pattern_based(self, pdf_file) -> str:
        """
        Pattern-based extraction for identifying text in various PDF structures.
        
        This method uses content structure analysis to locate and extract
        text from PDFs with non-standard formatting or encoding.
        """
        pdf_data = self._read_pdf_data(pdf_file)
        decoded_content = pdf_data.decode('latin-1', errors='ignore')
        
        # Define text extraction patterns (using simple string operations)
        extraction_patterns = [
            self._extract_bt_et_blocks,      # Text objects between BT/ET markers
            self._extract_tj_operations,     # Text show operations
            self._extract_font_encoded_text, # Font-encoded text content
            self._extract_stream_objects     # Direct stream object analysis
        ]
        
        best_extraction = ""
        best_quality_score = 0
        
        for pattern_extractor in extraction_patterns:
            try:
                extracted_text = pattern_extractor(decoded_content)
                quality_score = self._calculate_text_quality_score(extracted_text)
                
                if quality_score > best_quality_score:
                    best_extraction = extracted_text
                    best_quality_score = quality_score
                    
            except Exception as pattern_error:
                st.warning(f"Pattern extraction method failed: {str(pattern_error)}")
                continue
        
        if best_quality_score > 0.3:  # Minimum quality threshold
            return best_extraction
        
        raise Exception("Pattern-based extraction found no high-quality content")
    
    def _extract_bt_et_blocks(self, content: str) -> str:
        """Extract text from BT/ET (Begin Text/End Text) blocks."""
        text_blocks = []
        
        # Find BT/ET pairs using simple string searching
        bt_positions = []
        et_positions = []
        
        search_pos = 0
        while True:
            bt_pos = content.find('BT\n', search_pos)
            if bt_pos == -1:
                bt_pos = content.find('BT ', search_pos)
            if bt_pos == -1:
                break
            bt_positions.append(bt_pos)
            search_pos = bt_pos + 1
        
        search_pos = 0
        while True:
            et_pos = content.find('ET\n', search_pos)
            if et_pos == -1:
                et_pos = content.find('ET ', search_pos)
            if et_pos == -1:
                break
            et_positions.append(et_pos)
            search_pos = et_pos + 1
        
        # Match BT/ET pairs and extract content
        for bt_pos in bt_positions:
            # Find corresponding ET
            matching_et = None
            for et_pos in et_positions:
                if et_pos > bt_pos:
                    matching_et = et_pos
                    break
            
            if matching_et:
                block_content = content[bt_pos:matching_et]
                block_text = self._extract_text_from_block(block_content)
                if block_text:
                    text_blocks.append(block_text)
        
        return ' '.join(text_blocks)
    
    def _extract_manual_streams(self, pdf_file) -> str:
        """
        Manual PDF stream parsing for maximum compatibility.
        
        This method implements a custom PDF parser that handles edge cases
        and structural variations that standard libraries might miss.
        """
        pdf_data = self._read_pdf_data(pdf_file)
        
        # Identify and extract content streams
        stream_markers = [b'stream\n', b'stream\r\n', b'stream\r']
        endstream_markers = [b'endstream', b'\nendstream', b'\rendstream']
        
        extracted_streams = []
        
        for stream_marker in stream_markers:
            start_pos = 0
            while True:
                stream_start = pdf_data.find(stream_marker, start_pos)
                if stream_start == -1:
                    break
                
                # Find corresponding endstream
                content_start = stream_start + len(stream_marker)
                stream_end = pdf_data.find(b'endstream', content_start)
                
                if stream_end != -1:
                    stream_content = pdf_data[content_start:stream_end]
                    
                    # Attempt to decompress if needed
                    decompressed_content = self._attempt_decompression(stream_content)
                    
                    # Extract text from stream
                    stream_text = self._extract_text_from_stream(decompressed_content)
                    if stream_text:
                        extracted_streams.append(stream_text)
                
                start_pos = stream_end + 1 if stream_end != -1 else stream_start + 1
        
        combined_text = ' '.join(extracted_streams)
        if len(combined_text.strip()) > 50:
            return combined_text
        
        raise Exception("Manual stream parsing found insufficient content")
    
    def _attempt_decompression(self, stream_content: bytes) -> bytes:
        """Attempt to decompress PDF stream content if compressed."""
        try:
            import zlib
            return zlib.decompress(stream_content)
        except:
            try:
                import gzip
                return gzip.decompress(stream_content)
            except:
                return stream_content  # Return as-is if decompression fails
    
    def _extract_text_from_stream(self, stream_content: bytes) -> str:
        """Extract text content from decompressed PDF stream."""
        try:
            decoded_stream = stream_content.decode('latin-1', errors='ignore')
            return self._extract_text_patterns(decoded_stream)
        except:
            return ""
    
    # Utility methods for robust extraction
    
    def _read_pdf_data(self, pdf_file) -> bytes:
        """Safely read PDF data from various input types."""
        if hasattr(pdf_file, 'read'):
            pdf_file.seek(0)
            data = pdf_file.read()
            pdf_file.seek(0)
            return data
        else:
            with open(pdf_file, 'rb') as f:
                return f.read()
    
    def _reset_file_pointer(self, pdf_file) -> None:
        """Reset file pointer if the file object supports it."""
        if hasattr(pdf_file, 'seek'):
            pdf_file.seek(0)
    
    def _clean_text_fragment(self, fragment: str) -> str:
        """Clean individual text fragments for better readability."""
        if not fragment:
            return ""
        
        # Remove non-printable characters
        printable_chars = []
        for char in fragment:
            if 32 <= ord(char) <= 126 or char in '\n\r\t':
                printable_chars.append(char)
            elif ord(char) > 126:  # Allow extended characters
                printable_chars.append(char)
            else:
                printable_chars.append(' ')
        
        cleaned = ''.join(printable_chars)
        
        # Normalize whitespace
        words = cleaned.split()
        return ' '.join(words) if words else ""
    
    def _is_meaningful_text(self, text: str) -> bool:
        """Determine if extracted text contains meaningful content."""
        if not text or len(text.strip()) < 3:
            return False
        
        # Check for reasonable character distribution
        alphanumeric_count = sum(1 for c in text if c.isalnum())
        total_chars = len(text.replace(' ', ''))
        
        if total_chars == 0:
            return False
        
        alphanumeric_ratio = alphanumeric_count / total_chars
        return alphanumeric_ratio > 0.3  # At least 30% alphanumeric
    
    def _calculate_text_quality_score(self, text: str) -> float:
        """Calculate quality score for extracted text."""
        if not text:
            return 0.0
        
        # Factors contributing to quality score
        length_score = min(len(text) / 1000, 1.0)  # Longer text generally better
        word_count = len(text.split())
        word_score = min(word_count / 100, 1.0)  # More words generally better
        
        # Check for common HR terms (bonus points)
        hr_terms = ['policy', 'employee', 'company', 'benefit', 'leave', 'work', 'staff']
        hr_term_count = sum(1 for term in hr_terms if term.lower() in text.lower())
        hr_bonus = min(hr_term_count * 0.1, 0.3)
        
        # Penalty for excessive repetition
        unique_words = len(set(text.lower().split()))
        repetition_penalty = max(0, (word_count - unique_words * 2) / word_count) if word_count > 0 else 0
        
        quality_score = (length_score * 0.3 + word_score * 0.4 + hr_bonus) * (1 - repetition_penalty)
        return min(quality_score, 1.0)
    
    def _validate_extraction_quality(self, text: str) -> bool:
        """Validate that extracted text meets minimum quality standards."""
        if not text or len(text.strip()) < 100:
            return False
        
        quality_score = self._calculate_text_quality_score(text)
        return quality_score > 0.3
    
    def _post_process_extracted_text(self, text: str) -> str:
        """Post-process extracted text for optimal readability."""
        if not text:
            return ""
        
        # Normalize line breaks and spacing
        lines = text.split('\n')
        processed_lines = []
        
        for line in lines:
            line = line.strip()
            if line and not line.startswith('---'):  # Remove page markers
                processed_lines.append(line)
        
        # Join lines with appropriate spacing
        result = '\n'.join(processed_lines)
        
        # Final cleanup
        while '\n\n\n' in result:
            result = result.replace('\n\n\n', '\n\n')
        
        return result.strip()
    
    def _execute_with_timeout(self, func, *args, timeout_seconds: int = 30):
        """Execute function with timeout protection."""
        # Simplified timeout implementation for basic protection
        start_time = time.time()
        try:
            result = func(*args)
            elapsed = time.time() - start_time
            if elapsed > timeout_seconds:
                st.warning(f"Operation took {elapsed:.1f}s (longer than expected)")
            return result
        except Exception as e:
            elapsed = time.time() - start_time
            if elapsed > timeout_seconds:
                raise Exception(f"Operation timed out after {elapsed:.1f}s")
            raise e
    
    def _record_success(self, method: str):
        """Record successful extraction for analytics."""
        self.extraction_stats['successes'] += 1
        if method not in self.extraction_stats['method_effectiveness']:
            self.extraction_stats['method_effectiveness'][method] = {'success': 0, 'total': 0}
        self.extraction_stats['method_effectiveness'][method]['success'] += 1
        self.extraction_stats['method_effectiveness'][method]['total'] += 1
    
    def _record_failure(self, method: str, error: str):
        """Record failed extraction for analytics."""
        if method not in self.extraction_stats['method_effectiveness']:
            self.extraction_stats['method_effectiveness'][method] = {'success': 0, 'total': 0}
        self.extraction_stats['method_effectiveness'][method]['total'] += 1
    
    def _handle_extraction_failure(self, pdf_file):
        """Provide comprehensive guidance when all extraction methods fail."""
        st.error("❌ All extraction methods failed. Comprehensive PDF analysis:")
        
        # Analyze PDF structure for specific guidance
        analysis_results = self._analyze_pdf_structure(pdf_file)
        
        col1, col2 = st.columns(2)
        
        with col1:
            st.markdown("**πŸ“Š PDF Analysis Results:**")
            for key, value in analysis_results.items():
                st.write(f"β€’ **{key}:** {value}")
        
        with col2:
            st.markdown("**πŸ› οΈ Recommended Solutions:**")
            solutions = self._generate_specific_solutions(analysis_results)
            for solution in solutions:
                st.write(f"β€’ {solution}")
        
        # Provide manual input option as last resort
        self._offer_manual_input_option()
    
    def _analyze_pdf_structure(self, pdf_file) -> Dict[str, str]:
        """Analyze PDF structure to provide specific guidance."""
        analysis = {}
        
        try:
            pdf_data = self._read_pdf_data(pdf_file)
            
            # Basic file analysis
            analysis['File Size'] = f"{len(pdf_data) / 1024:.1f} KB"
            analysis['PDF Version'] = self._detect_pdf_version(pdf_data)
            analysis['Encryption'] = 'Yes' if b'/Encrypt' in pdf_data else 'No'
            analysis['Images Present'] = 'Yes' if b'/Image' in pdf_data else 'No'
            analysis['Fonts Present'] = 'Yes' if b'/Font' in pdf_data else 'No'
            analysis['Text Objects'] = str(pdf_data.count(b'BT'))
            
            # Content type detection
            if pdf_data.count(b'BT') == 0 and b'/Image' in pdf_data:
                analysis['Content Type'] = 'Likely scanned/image-based'
            elif pdf_data.count(b'BT') > 0:
                analysis['Content Type'] = 'Text-based'
            else:
                analysis['Content Type'] = 'Unknown/Complex'
                
        except Exception as e:
            analysis['Analysis Error'] = str(e)
        
        return analysis
    
    def _detect_pdf_version(self, pdf_data: bytes) -> str:
        """Detect PDF version from header."""
        try:
            header = pdf_data[:20].decode('ascii', errors='ignore')
            if '%PDF-' in header:
                version_start = header.find('%PDF-') + 5
                version = header[version_start:version_start + 3]
                return version
        except:
            pass
        return 'Unknown'
    
    def _generate_specific_solutions(self, analysis: Dict[str, str]) -> List[str]:
        """Generate specific solutions based on PDF analysis."""
        solutions = []
        
        content_type = analysis.get('Content Type', '')
        encryption = analysis.get('Encryption', '')
        
        if 'scanned' in content_type.lower() or 'image' in content_type.lower():
            solutions.extend([
                "PDF appears to be scanned - use OCR software to convert to text",
                "Try Adobe Acrobat's 'Recognize Text' feature",
                "Consider re-creating document from original source"
            ])
        
        if encryption == 'Yes':
            solutions.append("Remove password protection before uploading")
        
        if analysis.get('Text Objects', '0') == '0':
            solutions.extend([
                "No text objects found - likely image-based content",
                "Export from original application (Word, Google Docs) as PDF"
            ])
        
        # Universal solutions
        solutions.extend([
            "Try 'Print to PDF' from any PDF viewer",
            "Use online PDF converter to optimize format",
            "Contact IT support for complex document conversion"
        ])
        
        return solutions
    
    def _offer_manual_input_option(self):
        """Offer manual text input as last resort."""
        with st.expander("πŸ–ŠοΈ Manual Text Input (Last Resort)", expanded=False):
            st.markdown("""
            If automatic extraction fails, you can manually input key policy content:
            """)
            
            manual_text = st.text_area(
                "Paste policy text here:",
                height=200,
                placeholder="Copy and paste the key content from your PDF here..."
            )
            
            if st.button("πŸ“ Process Manual Input") and manual_text:
                if len(manual_text.strip()) > 100:
                    st.success("βœ… Manual input received! Processing...")
                    return manual_text.strip()
                else:
                    st.warning("Please provide more substantial content (at least 100 characters)")
        
        return None
    
    # Required interface methods for compatibility
    
    def create_intelligent_chunks(self, text: str, metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
        """Create optimized text chunks for vector storage."""
        if not text or len(text.strip()) < 50:
            return []
        
        chunks = []
        chunk_size = self.config.CHUNK_SIZE
        overlap = self.config.CHUNK_OVERLAP
        
        # Intelligent sentence-based chunking
        sentences = self._split_into_sentences_robust(text)
        
        current_chunk = ""
        chunk_index = 0
        
        for sentence in sentences:
            potential_chunk = f"{current_chunk} {sentence}".strip() if current_chunk else sentence
            
            if len(potential_chunk) <= chunk_size:
                current_chunk = potential_chunk
            else:
                # Save current chunk if meaningful
                if current_chunk and len(current_chunk.strip()) >= 100:
                    chunks.append({
                        'content': current_chunk.strip(),
                        'metadata': {
                            **metadata,
                            'chunk_type': 'intelligent_semantic',
                            'chunk_index': chunk_index,
                            'extraction_method': 'bulletproof_processor'
                        }
                    })
                    chunk_index += 1
                
                # Start new chunk with smart overlap
                if overlap > 0 and current_chunk:
                    words = current_chunk.split()
                    overlap_words = words[-overlap:] if len(words) > overlap else words
                    current_chunk = " ".join(overlap_words) + " " + sentence
                else:
                    current_chunk = sentence
        
        # Process final chunk
        if current_chunk and len(current_chunk.strip()) >= 100:
            chunks.append({
                'content': current_chunk.strip(),
                'metadata': {
                    **metadata,
                    'chunk_type': 'intelligent_semantic',
                    'chunk_index': chunk_index,
                    'extraction_method': 'bulletproof_processor'
                }
            })
        
        return chunks
    
    def _split_into_sentences_robust(self, text: str) -> List[str]:
        """Robust sentence splitting optimized for HR documents."""
        sentences = []
        current_sentence = ""
        
        # Enhanced sentence boundary detection
        sentence_endings = '.!?'
        abbreviations = {'Mr.', 'Mrs.', 'Dr.', 'Inc.', 'Corp.', 'Ltd.', 'Co.', 'etc.', 'vs.'}
        
        i = 0
        while i < len(text):
            char = text[i]
            current_sentence += char
            
            if char in sentence_endings:
                # Check if this is a real sentence ending
                is_sentence_end = True
                
                # Check for abbreviations
                words_before = current_sentence.strip().split()
                if words_before:
                    last_word = words_before[-1]
                    if last_word in abbreviations:
                        is_sentence_end = False
                
                # Check if followed by lowercase (likely abbreviation)
                if i + 1 < len(text) and text[i + 1].islower():
                    is_sentence_end = False
                
                if is_sentence_end and len(current_sentence.strip()) > 10:
                    sentences.append(current_sentence.strip())
                    current_sentence = ""
                elif char == '\n' and current_sentence.strip():
                    # Force sentence break on newlines
                    sentences.append(current_sentence.strip())
                    current_sentence = ""
            
            i += 1
        
        # Add final sentence
        if current_sentence.strip() and len(current_sentence.strip()) > 10:
            sentences.append(current_sentence.strip())
        
        return sentences
    
    def generate_embeddings(self, chunks: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Generate embeddings with robust error handling."""
        if not chunks or not self.embedding_model:
            st.warning("⚠️ Embedding generation unavailable. Documents will be stored without embeddings.")
            return chunks
        
        enhanced_chunks = []
        progress_bar = st.progress(0)
        status_text = st.empty()
        
        for i, chunk in enumerate(chunks):
            try:
                progress = (i + 1) / len(chunks)
                progress_bar.progress(progress)
                status_text.text(f"Generating embeddings... {i + 1}/{len(chunks)}")
                
                # Generate embedding with error handling
                embedding = self.embedding_model.encode(
                    chunk['content'],
                    normalize_embeddings=True,
                    show_progress_bar=False
                ).tolist()
                
                enhanced_chunk = {
                    **chunk,
                    'embedding': embedding,
                    'embedding_model': 'all-MiniLM-L6-v2',
                    'processed_at': time.time()
                }
                enhanced_chunks.append(enhanced_chunk)
                
            except Exception as e:
                st.warning(f"Embedding generation failed for chunk {i}: {str(e)}")
                # Add chunk without embedding
                enhanced_chunks.append({
                    **chunk,
                    'embedding': None,
                    'embedding_error': str(e),
                    'processed_at': time.time()
                })
        
        progress_bar.empty()
        status_text.empty()
        return enhanced_chunks
    
    def calculate_document_hash(self, pdf_file) -> str:
        """Calculate document hash for deduplication."""
        hasher = hashlib.sha256()
        pdf_data = self._read_pdf_data(pdf_file)
        hasher.update(pdf_data)
        return hasher.hexdigest()
    
    def process_document(self, pdf_file, filename: str) -> Optional[Dict[str, Any]]:
        """Complete document processing pipeline with comprehensive error handling."""
        try:
            # Calculate document hash
            doc_hash = self.calculate_document_hash(pdf_file)
            
            # Extract text with bulletproof methods
            st.info(f"πŸ“„ Processing {filename} with bulletproof extraction...")
            text_content = self.extract_text_from_pdf(pdf_file)
            
            if not text_content:
                st.error("❌ Could not extract readable content from PDF")
                return None
            
            # Create comprehensive metadata
            metadata = {
                'source': filename,
                'document_hash': doc_hash,
                'processed_at': time.time(),
                'content_length': len(text_content),
                'document_type': 'hr_policy',
                'extraction_stats': self.extraction_stats,
                'processor_version': 'bulletproof_v1.0'
            }
            
            # Create intelligent chunks
            st.info("🧩 Creating intelligent text chunks...")
            chunks = self.create_intelligent_chunks(text_content, metadata)
            
            if not chunks:
                st.error("❌ Failed to create meaningful chunks from document")
                return None
            
            # Generate embeddings
            st.info("🧠 Generating semantic embeddings...")
            enhanced_chunks = self.generate_embeddings(chunks)
            
            # Prepare final document package
            processed_doc = {
                'filename': filename,
                'document_hash': doc_hash,
                'metadata': metadata,
                'chunks': enhanced_chunks,
                'chunk_count': len(enhanced_chunks),
                'total_tokens': sum(len(chunk['content'].split()) for chunk in enhanced_chunks),
                'processing_time': time.time() - metadata['processed_at']
            }
            
            st.success(f"βœ… Successfully processed {filename} into {len(enhanced_chunks)} chunks")
            return processed_doc
            
        except Exception as e:
            st.error(f"❌ Document processing failed: {str(e)}")
            return None
    
    def validate_pdf_file(self, pdf_file) -> bool:
        """Comprehensive PDF validation with helpful feedback."""
        try:
            # Basic file type validation
            if hasattr(pdf_file, 'type') and pdf_file.type != 'application/pdf':
                st.error("❌ Please upload a valid PDF file")
                return False
            
            # Size validation
            if hasattr(pdf_file, 'size'):
                if pdf_file.size > self.config.MAX_FILE_SIZE:
                    size_mb = self.config.MAX_FILE_SIZE / (1024*1024)
                    st.error(f"❌ File size exceeds {size_mb:.1f}MB limit")
                    return False
                
                if pdf_file.size < 100:
                    st.error("❌ File appears to be too small or corrupted")
                    return False
            
            # PDF signature validation
            try:
                pdf_data = self._read_pdf_data(pdf_file)
                if not pdf_data.startswith(b'%PDF'):
                    st.error("❌ Invalid PDF file format")
                    return False
                
                st.success("βœ… PDF file validation passed")
                return True
                
            except Exception as validation_error:
                st.warning(f"⚠️ PDF validation warning: {str(validation_error)}")
                return True  # Allow processing to continue
            
        except Exception as e:
            st.error(f"❌ File validation failed: {str(e)}")
            return False

# Replace the previous DocumentProcessor with our bulletproof version
DocumentProcessor = BulletproofDocumentProcessor