File size: 4,035 Bytes
e2ad6cc | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | import pdfplumber
import os
INPUT_PDF = r"Data\English_CORE2000.pdf"
OUTPUT_TXT = r"Data\source_text.txt"
def extract_sentences_visually():
all_sentences = []
print(f"Opening {INPUT_PDF}...")
with pdfplumber.open(INPUT_PDF) as pdf:
for i, page in enumerate(pdf.pages):
words = page.extract_words()
header_x = None
header_bottom = None
for j, word in enumerate(words):
if word['text'] == 'Sample' and j+1 < len(words):
next_word = words[j+1]
if next_word['text'] == 'Sentence':
header_x = word['x0']
header_bottom = word['bottom']
break
if header_x is not None:
crop_box = (
header_x - 5,
header_bottom + 5,
page.width,
page.height
)
try:
cropped_page = page.crop(crop_box)
text_block = cropped_page.extract_text()
if text_block:
lines = text_block.split('\n')
for line in lines:
clean_line = line.strip()
if len(clean_line) > 10:
all_sentences.append(clean_line)
except ValueError:
pass
if not all_sentences:
print("ERROR: Still found nothing.")
else:
with open(OUTPUT_TXT, "w", encoding="utf-8") as f:
f.write("\n".join(all_sentences))
print(f"Success! Extracted {len(all_sentences)} sentences to {OUTPUT_TXT}")
def clean_source_text(file_path):
if not os.path.exists(file_path):
print(f"Error: {file_path} not found.")
return
with open(file_path, 'r', encoding='utf-8') as f:
lines = f.readlines()
filtered_lines = []
removed_count = 0
for line in lines:
cleaned = line.replace('EnglishClass101.com', '')
stripped = cleaned.strip()
if not stripped:
continue
word_count = len(stripped.split())
if word_count < 4:
removed_count += 1
continue
filtered_lines.append(stripped)
with open(file_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(filtered_lines))
print("-" * 30)
print(f"CLEANING COMPLETE")
print(f"Original count: {len(lines)}")
print(f"Removed: {removed_count} sentences (2-3 words)")
print(f"Remaining: {len(filtered_lines)}")
print("-" * 30)
def analyze_lengths(file_path):
if not os.path.exists(file_path):
print(f"Error: {file_path} not found.")
return
counts = {
2: 0,
3: 0,
4: 0,
5: 0,
"over_5": 0
}
total_sentences = 0
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
word_count = len(line.split())
total_sentences += 1
if word_count == 2:
counts[2] += 1
elif word_count == 3:
counts[3] += 1
elif word_count == 4:
counts[4] += 1
elif word_count == 5:
counts[5] += 1
elif word_count > 5:
counts["over_5"] += 1
print(f"ANALYSIS RESULT")
print(f"Total Sentences: {total_sentences}")
print(f"2 words: {counts[2]}")
print(f"3 words: {counts[3]}")
print(f"4 words: {counts[4]}")
print(f"5 words: {counts[5]}")
print(f"> 5 words: {counts['over_5']}")
print("-" * 30)
if __name__ == "__main__":
extract_sentences_visually()
clean_source_text(OUTPUT_TXT)
analyze_lengths(OUTPUT_TXT) |