theerasin commited on
Commit
6289bae
·
verified ·
1 Parent(s): 83dc3e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -19
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForQuestionAnswering
3
- from sentence_transformers import SentenceTransformer
4
  from pydantic import BaseModel, Field
5
  from typing import List
6
  from datetime import datetime
@@ -8,20 +7,24 @@ import PyPDF2
8
  from fpdf import FPDF
9
  from docx import Document
10
  import io
11
- import numpy as np
12
  from langchain_text_splitters import RecursiveCharacterTextSplitter
13
  from langchain_community.vectorstores import FAISS
14
  from langchain_core.documents import Document as LCDocument
15
  import time
16
 
17
- # === Load summarization model ===
18
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
19
 
20
- # === Load QA pipeline ===
21
- qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
 
 
22
 
23
- # === Load BGE-small embedding model ===
 
 
24
  embedding_model = SentenceTransformer("BAAI/bge-small-en-v1.5")
 
25
 
26
  # === Data models ===
27
  class KeyPoint(BaseModel):
@@ -39,9 +42,20 @@ def extract_text_from_pdf(pdf_file):
39
  return "".join(page.extract_text() for page in pdf_reader.pages)
40
 
41
  def analyze_text_structured(text):
42
- result = summarizer(text, max_length=200, min_length=50, do_sample=False)[0]["summary_text"]
43
- key_points = [KeyPoint(point=line.strip()) for line in result.split(". ") if line.strip()]
44
- return DocumentAnalysis(summary=Summary(summary=result), key_points=key_points)
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  def json_to_text(analysis):
47
  text_output = "=== Summary ===\n" + f"{analysis.summary.summary}\n\n"
@@ -74,9 +88,9 @@ def create_word_report(analysis):
74
  return docx_bytes.getvalue()
75
 
76
  # === Streamlit UI ===
77
- st.set_page_config(page_title="Chat With PDF (BART + BGE + RoBERTa)", page_icon="📄")
78
  st.title("📄 Chat With PDF")
79
- st.caption("Summarize and Chat with Documents using facebook/bart-large-cnn + BGE-small + RoBERTa QA")
80
 
81
  for key in ["current_file", "pdf_summary", "analysis_time", "pdf_report", "word_report", "vectorstore", "messages"]:
82
  if key not in st.session_state:
@@ -98,12 +112,10 @@ if uploaded_file is not None:
98
  analysis = analyze_text_structured(text)
99
  st.session_state.pdf_summary = analysis
100
 
101
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
102
- chunks = text_splitter.split_text(text)
103
  docs = [LCDocument(page_content=chunk) for chunk in chunks]
104
-
105
- vectors = embedding_model.encode([doc.page_content for doc in docs])
106
- st.session_state.vectorstore = FAISS.from_embeddings(docs, vectors)
107
 
108
  st.session_state.pdf_report = create_pdf_report(analysis)
109
  st.session_state.word_report = create_word_report(analysis)
@@ -143,12 +155,12 @@ if st.session_state.vectorstore is not None:
143
  with st.spinner("Searching..."):
144
  docs = st.session_state.vectorstore.similarity_search(prompt, k=3)
145
  context = "\n".join([doc.page_content for doc in docs])
146
- answer = qa_pipeline(question=prompt, context=context)["answer"]
147
  st.markdown(answer)
148
  st.session_state.messages.append({"role": "assistant", "content": answer})
149
 
150
  if st.session_state.analysis_time is not None:
151
  st.markdown(
152
- f'<div style="text-align:center; margin-top:2rem; color:gray;">Analysis Time: {st.session_state.analysis_time:.1f}s | Embedding: BGE Small v1.5</div>',
153
  unsafe_allow_html=True
154
  )
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForQuestionAnswering
 
3
  from pydantic import BaseModel, Field
4
  from typing import List
5
  from datetime import datetime
 
7
  from fpdf import FPDF
8
  from docx import Document
9
  import io
 
10
  from langchain_text_splitters import RecursiveCharacterTextSplitter
11
  from langchain_community.vectorstores import FAISS
12
  from langchain_core.documents import Document as LCDocument
13
  import time
14
 
15
+ # === Summarization model ===
16
  summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
17
 
18
+ # === QA model ===
19
+ qa_tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2")
20
+ qa_model = AutoModelForQuestionAnswering.from_pretrained("deepset/roberta-base-squad2")
21
+ qa_pipeline = pipeline("question-answering", model=qa_model, tokenizer=qa_tokenizer)
22
 
23
+ # === Embedding model ===
24
+ from sentence_transformers import SentenceTransformer
25
+ from langchain.embeddings import HuggingFaceEmbeddings
26
  embedding_model = SentenceTransformer("BAAI/bge-small-en-v1.5")
27
+ embedding_function = HuggingFaceEmbeddings(model=embedding_model)
28
 
29
  # === Data models ===
30
  class KeyPoint(BaseModel):
 
42
  return "".join(page.extract_text() for page in pdf_reader.pages)
43
 
44
  def analyze_text_structured(text):
45
+ splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
46
+ chunks = splitter.split_text(text)
47
+
48
+ summaries = []
49
+ for chunk in chunks:
50
+ try:
51
+ result = summarizer(chunk, max_length=200, min_length=50, do_sample=False)
52
+ summaries.append(result[0]["summary_text"])
53
+ except Exception:
54
+ summaries.append("")
55
+
56
+ full_summary = " ".join(summaries)
57
+ key_points = [KeyPoint(point=line.strip()) for line in full_summary.split(". ") if line.strip()]
58
+ return DocumentAnalysis(summary=Summary(summary=full_summary), key_points=key_points)
59
 
60
  def json_to_text(analysis):
61
  text_output = "=== Summary ===\n" + f"{analysis.summary.summary}\n\n"
 
88
  return docx_bytes.getvalue()
89
 
90
  # === Streamlit UI ===
91
+ st.set_page_config(page_title="Chat With PDF (BART + BGE)", page_icon="📄")
92
  st.title("📄 Chat With PDF")
93
+ st.caption("Summarize and Chat with Documents using facebook/bart-large-cnn + BGE-small Embeddings + RoBERTa QA")
94
 
95
  for key in ["current_file", "pdf_summary", "analysis_time", "pdf_report", "word_report", "vectorstore", "messages"]:
96
  if key not in st.session_state:
 
112
  analysis = analyze_text_structured(text)
113
  st.session_state.pdf_summary = analysis
114
 
115
+ splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
116
+ chunks = splitter.split_text(text)
117
  docs = [LCDocument(page_content=chunk) for chunk in chunks]
118
+ st.session_state.vectorstore = FAISS.from_documents(docs, embedding_function)
 
 
119
 
120
  st.session_state.pdf_report = create_pdf_report(analysis)
121
  st.session_state.word_report = create_word_report(analysis)
 
155
  with st.spinner("Searching..."):
156
  docs = st.session_state.vectorstore.similarity_search(prompt, k=3)
157
  context = "\n".join([doc.page_content for doc in docs])
158
+ answer = qa_pipeline({"question": prompt, "context": context})["answer"]
159
  st.markdown(answer)
160
  st.session_state.messages.append({"role": "assistant", "content": answer})
161
 
162
  if st.session_state.analysis_time is not None:
163
  st.markdown(
164
+ f'<div style="text-align:center; margin-top:2rem; color:gray;">Analysis Time: {st.session_state.analysis_time:.1f}s | Embedding: BGE-small v1.5 | QA: RoBERTa-SQuAD2</div>',
165
  unsafe_allow_html=True
166
  )