Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,16 +11,16 @@ from PIL import Image
|
|
| 11 |
|
| 12 |
logging.basicConfig(level=logging.ERROR)
|
| 13 |
device = -1 # CPU-only
|
| 14 |
-
print("β οΈ CPU-only.
|
| 15 |
|
| 16 |
-
# Load summarizer
|
| 17 |
try:
|
| 18 |
-
summarizer = pipeline("summarization", model="
|
| 19 |
except Exception as e:
|
| 20 |
print(f"β Summarizer model loading failed: {str(e)}")
|
| 21 |
exit(1)
|
| 22 |
|
| 23 |
-
# Load
|
| 24 |
try:
|
| 25 |
qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2", device=device)
|
| 26 |
except Exception as e:
|
|
@@ -62,8 +62,8 @@ def summarize_file(file_bytes):
|
|
| 62 |
if not text.strip():
|
| 63 |
return "β No text found", None
|
| 64 |
|
| 65 |
-
text = text[:
|
| 66 |
-
chunks = [text[i:i+
|
| 67 |
summaries = []
|
| 68 |
|
| 69 |
for i, chunk in enumerate(chunks):
|
|
@@ -79,7 +79,7 @@ def summarize_file(file_bytes):
|
|
| 79 |
chunk_result['status'] = 'skipped'
|
| 80 |
else:
|
| 81 |
try:
|
| 82 |
-
summary = summarizer(chunk, max_length=
|
| 83 |
summaries.append(f"### Chunk {i+1}\n{summary}")
|
| 84 |
chunk_result['status'] = 'summarized'
|
| 85 |
except Exception as e:
|
|
@@ -92,21 +92,29 @@ def summarize_file(file_bytes):
|
|
| 92 |
formatted_chunks = "\n\n---\n\n".join(summaries)
|
| 93 |
final_summary = f"""**Characters Processed**: {len(text)}
|
| 94 |
**Total Time**: {time.time()-start:.2f} seconds
|
| 95 |
-
|
| 96 |
## πΉ Summary by Chunks
|
| 97 |
-
|
| 98 |
{formatted_chunks}
|
| 99 |
"""
|
| 100 |
image = visualize_chunk_status(chunk_info)
|
| 101 |
return final_summary, image
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
def answer_question(file_bytes, question):
|
| 104 |
try:
|
| 105 |
doc = fitz.open(stream=file_bytes, filetype="pdf")
|
| 106 |
text = "".join(page.get_text("text") for page in doc)
|
| 107 |
text = re.sub(r"\s+", " ", text).strip()
|
| 108 |
text = "".join(c for c in text if ord(c) < 128)
|
| 109 |
-
context = text[:
|
| 110 |
except Exception as e:
|
| 111 |
return f"β Text extraction failed: {str(e)}"
|
| 112 |
|
|
@@ -114,7 +122,8 @@ def answer_question(file_bytes, question):
|
|
| 114 |
return "β οΈ Please enter a valid question."
|
| 115 |
|
| 116 |
try:
|
| 117 |
-
|
|
|
|
| 118 |
return f"**Answer**: {result['answer']}\n\n**Score**: {result['score']:.2f}"
|
| 119 |
except Exception as e:
|
| 120 |
return f"β QA failed: {str(e)}"
|
|
@@ -128,7 +137,7 @@ summarizer_ui = gr.Interface(
|
|
| 128 |
gr.Image(label="π Visual Process Flow", type="pil")
|
| 129 |
],
|
| 130 |
title="π AI-Powered PDF Summarizer",
|
| 131 |
-
description="Summarizes long PDFs and visualizes chunk-level processing
|
| 132 |
)
|
| 133 |
|
| 134 |
# Q&A UI
|
|
|
|
| 11 |
|
| 12 |
logging.basicConfig(level=logging.ERROR)
|
| 13 |
device = -1 # CPU-only
|
| 14 |
+
print("β οΈ CPU-only. Using faster models!")
|
| 15 |
|
| 16 |
+
# Load faster summarizer
|
| 17 |
try:
|
| 18 |
+
summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=device, torch_dtype=torch.float32)
|
| 19 |
except Exception as e:
|
| 20 |
print(f"β Summarizer model loading failed: {str(e)}")
|
| 21 |
exit(1)
|
| 22 |
|
| 23 |
+
# Load QA model
|
| 24 |
try:
|
| 25 |
qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2", device=device)
|
| 26 |
except Exception as e:
|
|
|
|
| 62 |
if not text.strip():
|
| 63 |
return "β No text found", None
|
| 64 |
|
| 65 |
+
text = text[:200000] # reduced slightly for speed
|
| 66 |
+
chunks = [text[i:i+1000] for i in range(0, len(text), 1000)][:5] # smaller + more chunks
|
| 67 |
summaries = []
|
| 68 |
|
| 69 |
for i, chunk in enumerate(chunks):
|
|
|
|
| 79 |
chunk_result['status'] = 'skipped'
|
| 80 |
else:
|
| 81 |
try:
|
| 82 |
+
summary = summarizer(chunk, max_length=50, min_length=10, do_sample=False)[0]['summary_text']
|
| 83 |
summaries.append(f"### Chunk {i+1}\n{summary}")
|
| 84 |
chunk_result['status'] = 'summarized'
|
| 85 |
except Exception as e:
|
|
|
|
| 92 |
formatted_chunks = "\n\n---\n\n".join(summaries)
|
| 93 |
final_summary = f"""**Characters Processed**: {len(text)}
|
| 94 |
**Total Time**: {time.time()-start:.2f} seconds
|
|
|
|
| 95 |
## πΉ Summary by Chunks
|
|
|
|
| 96 |
{formatted_chunks}
|
| 97 |
"""
|
| 98 |
image = visualize_chunk_status(chunk_info)
|
| 99 |
return final_summary, image
|
| 100 |
|
| 101 |
+
def find_relevant_passages(text, question, num_passages=3):
|
| 102 |
+
passages = re.split(r'(?<=[.?!])\s+', text)
|
| 103 |
+
scored = []
|
| 104 |
+
for passage in passages:
|
| 105 |
+
score = sum(1 for word in question.lower().split() if word in passage.lower())
|
| 106 |
+
scored.append((score, passage))
|
| 107 |
+
scored.sort(reverse=True)
|
| 108 |
+
best_passages = " ".join([p for _, p in scored[:num_passages]])
|
| 109 |
+
return best_passages
|
| 110 |
+
|
| 111 |
def answer_question(file_bytes, question):
|
| 112 |
try:
|
| 113 |
doc = fitz.open(stream=file_bytes, filetype="pdf")
|
| 114 |
text = "".join(page.get_text("text") for page in doc)
|
| 115 |
text = re.sub(r"\s+", " ", text).strip()
|
| 116 |
text = "".join(c for c in text if ord(c) < 128)
|
| 117 |
+
context = text[:200000]
|
| 118 |
except Exception as e:
|
| 119 |
return f"β Text extraction failed: {str(e)}"
|
| 120 |
|
|
|
|
| 122 |
return "β οΈ Please enter a valid question."
|
| 123 |
|
| 124 |
try:
|
| 125 |
+
relevant_context = find_relevant_passages(context, question)
|
| 126 |
+
result = qa_pipeline(question=question, context=relevant_context)
|
| 127 |
return f"**Answer**: {result['answer']}\n\n**Score**: {result['score']:.2f}"
|
| 128 |
except Exception as e:
|
| 129 |
return f"β QA failed: {str(e)}"
|
|
|
|
| 137 |
gr.Image(label="π Visual Process Flow", type="pil")
|
| 138 |
],
|
| 139 |
title="π AI-Powered PDF Summarizer",
|
| 140 |
+
description="Summarizes long PDFs and visualizes chunk-level processing."
|
| 141 |
)
|
| 142 |
|
| 143 |
# Q&A UI
|