theerasin commited on
Commit
1c2fbbd
·
verified ·
1 Parent(s): 3201029

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -31
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
 
3
  from pydantic import BaseModel, Field
4
  from typing import List
5
  from datetime import datetime
@@ -7,35 +8,21 @@ import PyPDF2
7
  from fpdf import FPDF
8
  from docx import Document
9
  import io
 
10
  from langchain_text_splitters import RecursiveCharacterTextSplitter
11
  from langchain_community.vectorstores import FAISS
12
  from langchain_core.documents import Document as LCDocument
13
- from langchain_core.embeddings import Embeddings
14
- from sentence_transformers import SentenceTransformer
15
  import time
16
 
17
  # === Load summarization model ===
18
- tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn")
19
- model = AutoModelForSeq2SeqLM.from_pretrained("facebook/bart-large-cnn")
20
 
21
  # === Load QA pipeline ===
22
- qa_pipeline = pipeline("question-answering", model="facebook/bart-large-cnn", tokenizer=tokenizer)
23
 
24
- # === Load SentenceTransformer embedding model ===
25
  embedding_model = SentenceTransformer("BAAI/bge-small-en-v1.5")
26
 
27
- class CustomSentenceTransformer(Embeddings):
28
- def __init__(self, model):
29
- self.model = model
30
-
31
- def embed_documents(self, texts):
32
- return self.model.encode(texts, show_progress_bar=False).tolist()
33
-
34
- def embed_query(self, text):
35
- return self.model.encode(text, show_progress_bar=False).tolist()
36
-
37
- embedding_function = CustomSentenceTransformer(embedding_model)
38
-
39
  # === Data models ===
40
  class KeyPoint(BaseModel):
41
  point: str = Field(description="A key point extracted from the document.")
@@ -52,14 +39,9 @@ def extract_text_from_pdf(pdf_file):
52
  return "".join(page.extract_text() for page in pdf_reader.pages)
53
 
54
  def analyze_text_structured(text):
55
- inputs = tokenizer([text], max_length=1024, truncation=True, return_tensors="pt")
56
- summary_ids = model.generate(
57
- inputs["input_ids"], num_beams=4, length_penalty=2.0,
58
- max_length=200, min_length=50, early_stopping=True
59
- )
60
- summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
61
- key_points = [KeyPoint(point=line.strip()) for line in summary.split(". ") if line.strip()]
62
- return DocumentAnalysis(summary=Summary(summary=summary), key_points=key_points)
63
 
64
  def json_to_text(analysis):
65
  text_output = "=== Summary ===\n" + f"{analysis.summary.summary}\n\n"
@@ -92,9 +74,9 @@ def create_word_report(analysis):
92
  return docx_bytes.getvalue()
93
 
94
  # === Streamlit UI ===
95
- st.set_page_config(page_title="Chat With PDF (BART + BGE)", page_icon="📄")
96
  st.title("📄 Chat With PDF")
97
- st.caption("Summarize and Chat with Documents using facebook/bart-large-cnn + BGE Small Embeddings")
98
 
99
  for key in ["current_file", "pdf_summary", "analysis_time", "pdf_report", "word_report", "vectorstore", "messages"]:
100
  if key not in st.session_state:
@@ -120,7 +102,8 @@ if uploaded_file is not None:
120
  chunks = text_splitter.split_text(text)
121
  docs = [LCDocument(page_content=chunk) for chunk in chunks]
122
 
123
- st.session_state.vectorstore = FAISS.from_documents(docs, embedding_function)
 
124
 
125
  st.session_state.pdf_report = create_pdf_report(analysis)
126
  st.session_state.word_report = create_word_report(analysis)
@@ -160,7 +143,7 @@ if st.session_state.vectorstore is not None:
160
  with st.spinner("Searching..."):
161
  docs = st.session_state.vectorstore.similarity_search(prompt, k=3)
162
  context = "\n".join([doc.page_content for doc in docs])
163
- answer = qa_pipeline({"question": prompt, "context": context})["answer"]
164
  st.markdown(answer)
165
  st.session_state.messages.append({"role": "assistant", "content": answer})
166
 
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForQuestionAnswering
3
+ from sentence_transformers import SentenceTransformer
4
  from pydantic import BaseModel, Field
5
  from typing import List
6
  from datetime import datetime
 
8
  from fpdf import FPDF
9
  from docx import Document
10
  import io
11
+ import numpy as np
12
  from langchain_text_splitters import RecursiveCharacterTextSplitter
13
  from langchain_community.vectorstores import FAISS
14
  from langchain_core.documents import Document as LCDocument
 
 
15
  import time
16
 
17
  # === Load summarization model ===
18
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
 
19
 
20
  # === Load QA pipeline ===
21
+ qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
22
 
23
+ # === Load BGE-small embedding model ===
24
  embedding_model = SentenceTransformer("BAAI/bge-small-en-v1.5")
25
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  # === Data models ===
27
  class KeyPoint(BaseModel):
28
  point: str = Field(description="A key point extracted from the document.")
 
39
  return "".join(page.extract_text() for page in pdf_reader.pages)
40
 
41
  def analyze_text_structured(text):
42
+ result = summarizer(text, max_length=200, min_length=50, do_sample=False)[0]["summary_text"]
43
+ key_points = [KeyPoint(point=line.strip()) for line in result.split(". ") if line.strip()]
44
+ return DocumentAnalysis(summary=Summary(summary=result), key_points=key_points)
 
 
 
 
 
45
 
46
  def json_to_text(analysis):
47
  text_output = "=== Summary ===\n" + f"{analysis.summary.summary}\n\n"
 
74
  return docx_bytes.getvalue()
75
 
76
  # === Streamlit UI ===
77
+ st.set_page_config(page_title="Chat With PDF (BART + BGE + RoBERTa)", page_icon="📄")
78
  st.title("📄 Chat With PDF")
79
+ st.caption("Summarize and Chat with Documents using facebook/bart-large-cnn + BGE-small + RoBERTa QA")
80
 
81
  for key in ["current_file", "pdf_summary", "analysis_time", "pdf_report", "word_report", "vectorstore", "messages"]:
82
  if key not in st.session_state:
 
102
  chunks = text_splitter.split_text(text)
103
  docs = [LCDocument(page_content=chunk) for chunk in chunks]
104
 
105
+ vectors = embedding_model.encode([doc.page_content for doc in docs])
106
+ st.session_state.vectorstore = FAISS.from_embeddings(docs, vectors)
107
 
108
  st.session_state.pdf_report = create_pdf_report(analysis)
109
  st.session_state.word_report = create_word_report(analysis)
 
143
  with st.spinner("Searching..."):
144
  docs = st.session_state.vectorstore.similarity_search(prompt, k=3)
145
  context = "\n".join([doc.page_content for doc in docs])
146
+ answer = qa_pipeline(question=prompt, context=context)["answer"]
147
  st.markdown(answer)
148
  st.session_state.messages.append({"role": "assistant", "content": answer})
149