Spaces:
Sleeping
Sleeping
File size: 4,793 Bytes
04ac6c3 72d2612 04ac6c3 da5250c cf6c640 72d2612 cf6c640 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 72d2612 04ac6c3 b389215 72d2612 b389215 72d2612 b389215 04ac6c3 72d2612 04ac6c3 61c5b48 30476c0 61c5b48 30476c0 61c5b48 30476c0 61c5b48 30476c0 da5250c 30476c0 da5250c 30476c0 72d2612 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
from pptx import Presentation
from docx import Document
import pdfplumber
from reportlab.lib.pagesizes import letter
from pathlib import Path
import re
import pandas as pd
import pdfplumber
from concurrent.futures import ThreadPoolExecutor
from reportlab.platypus import SimpleDocTemplate, Paragraph
from reportlab.lib.styles import getSampleStyleSheet
def extract_text(file_path: Path, file_type: str) -> str:
text = ""
if file_type == "txt":
with open(file_path, "r", encoding="utf-8") as f:
text = f.read()
elif file_type == "docx":
doc = Document(file_path)
return "\n".join([para.text for para in doc.paragraphs if para.text.strip()])
elif file_type == "xlsx":
df = pd.read_excel(file_path, engine="openpyxl")
return df.to_string(index=False, header=False).strip()
elif file_type == "pptx":
prs = Presentation(file_path)
text_parts = []
for slide in prs.slides:
for shape in slide.shapes:
if shape.has_text_frame:
text_parts.append(shape.text_frame.text.strip())
elif shape.has_table:
for row in shape.table.rows:
for cell in row.cells:
if cell.text.strip():
text_parts.append(cell.text.strip())
return "\n".join(text_parts)
elif file_type == "pdf":
with pdfplumber.open(file_path) as pdf:
def extract_page(page):
return page.extract_text_simple() or ""
with ThreadPoolExecutor() as executor:
text_parts = list(executor.map(extract_page, pdf.pages))
return "\n".join(part for part in text_parts if part).strip()
return text
def save_file(text: str, file_type: str, output_path: Path):
if file_type == "docx":
doc = Document()
doc.add_paragraph(text)
doc.save(output_path)
elif file_type == "xlsx":
df = pd.DataFrame(text.split("\n"), columns=["Content"])
df.to_excel(output_path, index=False, engine="xlsxwriter")
elif file_type == "pptx":
prs = Presentation()
slide_layout = prs.slide_layouts[1]
text_lines = text.split('\n')
chunks = [text_lines[i:i+25] for i in range(0, len(text_lines), 25)]
for chunk in chunks:
slide = prs.slides.add_slide(slide_layout)
content = slide.shapes.placeholders[1]
text_frame = content.text_frame
text_frame.clear()
text_frame.text = "\n".join(chunk)
prs.save(output_path)
elif file_type == "pdf":
doc = SimpleDocTemplate(str(output_path), pagesize=letter)
styles = getSampleStyleSheet()
flowables = [Paragraph(line, styles["Normal"]) for line in text.split("\n") if line.strip()]
doc.build(flowables)
else:
with open(output_path, "w", encoding="utf-8") as f:
f.write(text)
def verify_summary(summary: str, original: str) -> str:
"""Simplified verification using word matching"""
original_lower = original.lower()
verified = []
for sentence in summary.split('.'):
sentence = sentence.strip()
if not sentence:
continue
# Count matching words (minimum 3 letters)
matches = 0
total_words = 0
for word in sentence.lower().split():
if len(word) >= 3 and word in original_lower:
matches += 1
total_words += 1
# Keep sentence if at least 30% of significant words match
if total_words > 0 and (matches / total_words) >= 0.3:
verified.append(sentence)
return '. '.join(verified) if verified else summary[:500]
def ensure_complete_sentences(text: str) -> str:
if not text or not isinstance(text, str):
return ""
try:
text = ' '.join(text.split())
sentences = re.split(r'(?<=[.!?])\s+', text)
valid_sentences = [
s.strip() for s in sentences
if s.strip() and s[-1] in {'.', '!', '?'}
]
reconstructed = ' '.join(valid_sentences)
# Final safety check
if not reconstructed.endswith(('.', '!', '?')):
last_break = max(
reconstructed.rfind('.'),
reconstructed.rfind('!'),
reconstructed.rfind('?')
)
if last_break > 0:
reconstructed = reconstructed[:last_break + 1]
else:
reconstructed = reconstructed + '.' if reconstructed else ""
return reconstructed
except Exception:
return text |