umerfarooq29 commited on
Commit
915ecb5
·
verified ·
1 Parent(s): e520e33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -87
app.py CHANGED
@@ -1,100 +1,46 @@
1
- import numpy as np
2
- import streamlit as st
3
- from transformers import BartTokenizer, BartForConditionalGeneration
4
- from sklearn.feature_extraction.text import TfidfVectorizer
5
- from sklearn.cluster import KMeans
6
- from nltk.tokenize import sent_tokenize
7
- import nltk
8
- import PyPDF2
9
- from io import BytesIO
10
-
11
- # --------------------------- DOWNLOAD NLTK DATA ---------------------------
12
- @st.cache_resource
13
- def download_nltk_data():
14
- try:
15
- nltk.data.find('tokenizers/punkt')
16
- except LookupError:
17
- nltk.download('punkt', quiet=True)
18
- nltk.download('punkt_tab', quiet=True)
19
-
20
- download_nltk_data()
21
-
22
- # --------------------------- LOAD MODEL ---------------------------
23
- @st.cache_resource
24
- def load_model():
25
- tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
26
- model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
27
- return tokenizer, model
28
-
29
- tokenizer, model = load_model()
30
-
31
- # --------------------------- HELPER FUNCTIONS ---------------------------
32
-
33
- def extract_text_from_pdf(file) -> str:
34
- """Extract text from uploaded PDF file."""
35
- pdf_reader = PyPDF2.PdfReader(BytesIO(file.read()))
36
- text = ""
37
- for page in pdf_reader.pages:
38
- text += page.extract_text() or ""
39
- return text.strip()
40
-
41
- def summarize(text, max_length=150, min_length=50):
42
- """Summarize a given text using BART."""
43
- inputs = tokenizer([text], max_length=1024, truncation=True, return_tensors='pt')
44
- summary_ids = model.generate(
45
  inputs['input_ids'],
46
  num_beams=4,
47
- max_length=max_length,
48
- min_length=min_length,
49
  early_stopping=True
50
  )
51
- return tokenizer.decode(summary_ids[0], skip_special_tokens=True)
 
52
 
53
- def cluster_documents(documents, n_clusters=3):
54
- """Cluster similar documents using TF-IDF + KMeans."""
55
- vectorizer = TfidfVectorizer(stop_words='english')
56
- X = vectorizer.fit_transform(documents)
57
- kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(X)
58
- return kmeans.labels_
59
-
60
- def chunk_text(text, max_words=1000):
61
- """Split long text into smaller chunks for summarization."""
62
- if len(text.split()) <= max_words:
63
- return [text]
64
-
65
- sentences = sent_tokenize(text)
66
- chunks, current_chunk, current_word_count = [], [], 0
67
-
68
- for sentence in sentences:
69
- sentence_words = len(sentence.split())
70
- if current_word_count + sentence_words <= max_words:
71
- current_chunk.append(sentence)
72
- current_word_count += sentence_words
73
- else:
74
- chunks.append(" ".join(current_chunk))
75
- current_chunk = [sentence]
76
- current_word_count = sentence_words
77
-
78
- if current_chunk:
79
- chunks.append(" ".join(current_chunk))
80
-
81
- return chunks
82
 
83
  def multi_document_summarize(documents):
84
- """Summarize multiple related documents using clustering + BART."""
85
  results = {
86
  'individual_summaries': [],
87
  'cluster_summaries': [],
88
  'final_summary': None
89
  }
90
 
91
-
92
- # 1️⃣ Individual summaries
93
  for doc in documents:
94
- chunks = chunk_text(doc)
95
- doc_summary = " ".join([summarize(chunk) for chunk in chunks])
96
  results['individual_summaries'].append(doc_summary)
97
-
98
  # 2️⃣ Clustering (if >1 doc)
99
  if len(documents) > 1:
100
  n_clusters = min(3, len(documents))
@@ -103,8 +49,7 @@ def multi_document_summarize(documents):
103
  for cluster_id in np.unique(clusters):
104
  cluster_docs = [doc for doc, c in zip(documents, clusters) if c == cluster_id]
105
  combined_text = " ".join(cluster_docs)
106
- chunks = chunk_text(combined_text)
107
- cluster_summary = " ".join([summarize(chunk) for chunk in chunks])
108
  results['cluster_summaries'].append({
109
  'doc_indices': [i for i, c in enumerate(clusters) if c == cluster_id],
110
  'summary': cluster_summary
@@ -112,13 +57,12 @@ def multi_document_summarize(documents):
112
 
113
  # 3️⃣ Final overall summary
114
  all_summaries = results['individual_summaries'] + [cs['summary'] for cs in results['cluster_summaries']]
115
- results['final_summary'] = summarize(" ".join(all_summaries), max_length=200, min_length=100)
116
  else:
117
  results['final_summary'] = results['individual_summaries'][0]
118
 
119
  return results
120
- #download
121
- st.download_button("Download Summary", results['final_summary'], file_name="summary.txt")
122
 
123
 
124
  # --------------------------- STREAMLIT UI ---------------------------
 
1
+ def summarize_large_text(text, max_length=150, min_length=50):
2
+ """Summarize long text by splitting into chunks and combining summaries."""
3
+ chunks = chunk_text(text, max_words=800)
4
+ summaries = []
5
+ for chunk in chunks:
6
+ inputs = tokenizer([chunk], max_length=1024, truncation=True, return_tensors='pt')
7
+ summary_ids = model.generate(
8
+ inputs['input_ids'],
9
+ num_beams=4,
10
+ max_length=max_length,
11
+ min_length=min_length,
12
+ early_stopping=True
13
+ )
14
+ chunk_summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
15
+ summaries.append(chunk_summary)
16
+
17
+ # Combine all chunk summaries and re-summarize them
18
+ combined_summary_text = " ".join(summaries)
19
+ inputs = tokenizer([combined_summary_text], max_length=1024, truncation=True, return_tensors='pt')
20
+ final_ids = model.generate(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  inputs['input_ids'],
22
  num_beams=4,
23
+ max_length=200,
24
+ min_length=80,
25
  early_stopping=True
26
  )
27
+ final_summary = tokenizer.decode(final_ids[0], skip_special_tokens=True)
28
+ return final_summary
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  def multi_document_summarize(documents):
32
+ """Summarize multiple related documents using clustering + improved BART summarization."""
33
  results = {
34
  'individual_summaries': [],
35
  'cluster_summaries': [],
36
  'final_summary': None
37
  }
38
 
39
+ # 1️⃣ Individual summaries (using improved function)
 
40
  for doc in documents:
41
+ doc_summary = summarize_large_text(doc)
 
42
  results['individual_summaries'].append(doc_summary)
43
+
44
  # 2️⃣ Clustering (if >1 doc)
45
  if len(documents) > 1:
46
  n_clusters = min(3, len(documents))
 
49
  for cluster_id in np.unique(clusters):
50
  cluster_docs = [doc for doc, c in zip(documents, clusters) if c == cluster_id]
51
  combined_text = " ".join(cluster_docs)
52
+ cluster_summary = summarize_large_text(combined_text)
 
53
  results['cluster_summaries'].append({
54
  'doc_indices': [i for i, c in enumerate(clusters) if c == cluster_id],
55
  'summary': cluster_summary
 
57
 
58
  # 3️⃣ Final overall summary
59
  all_summaries = results['individual_summaries'] + [cs['summary'] for cs in results['cluster_summaries']]
60
+ results['final_summary'] = summarize_large_text(" ".join(all_summaries))
61
  else:
62
  results['final_summary'] = results['individual_summaries'][0]
63
 
64
  return results
65
+
 
66
 
67
 
68
  # --------------------------- STREAMLIT UI ---------------------------