jaimin commited on
Commit
9c30e19
Β·
1 Parent(s): eaf5733

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -0
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #importing the necessary library
2
+ import re
3
+ import nltk
4
+ import spacy
5
+ import math
6
+ from nltk.tokenize import sent_tokenize
7
+ nltk.download('punkt')
8
+ from transformers import pipeline
9
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
10
+ import gradio as gr
11
+
12
+ # Defining a function to read in the text file
13
+ def read_in_text(url):
14
+ with open(url, 'r') as file:
15
+ article = file.read()
16
+ return article
17
+
18
+ def clean_text(url):
19
+ text = url
20
+ text = text.encode("ascii", errors="ignore").decode(
21
+ "ascii"
22
+ ) # remove non-ascii, Chinese characters
23
+
24
+ text = re.sub(r"\n", " ", text)
25
+ text = re.sub(r"\n\n", " ", text)
26
+ text = re.sub(r"\t", " ", text)
27
+ text = text.strip(" ")
28
+ text = re.sub(
29
+ " +", " ", text
30
+ ).strip() # get rid of multiple spaces and replace with a single
31
+ return text
32
+ #initailizing the model pipeline
33
+ from transformers import BartTokenizer, BartForConditionalGeneration
34
+
35
+ model = BartForConditionalGeneration.from_pretrained("sshleifer/distilbart-cnn-12-6")
36
+ tokenizer = BartTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
37
+ nlp = spacy.load("en_core_web_sm")
38
+
39
+ #Defining a function to get the summary of the article
40
+ def final_summary(file):
41
+ #reading in the text and tokenizing it into sentence
42
+ text = clean_text(file)
43
+ bullet_points = 10
44
+
45
+ while (bullet_points >= 10):
46
+
47
+ chunks = []
48
+ sentences = nlp(text)
49
+ for sentence in sentences.sents:
50
+ chunks.append(str(sentence))
51
+
52
+ output = []
53
+ sentences_remaining = len(chunks)
54
+ i = 0
55
+
56
+ #looping through the sentences in an equal batch based on their length and summarizing them
57
+ while sentences_remaining > 0:
58
+ chunks_remaining = math.ceil(sentences_remaining / 10.0)
59
+ next_chunk_size = math.ceil(sentences_remaining / chunks_remaining)
60
+ sentence = "".join(chunks[i:i+next_chunk_size])
61
+
62
+ i += next_chunk_size
63
+ sentences_remaining -= next_chunk_size
64
+
65
+ inputs = tokenizer(sentence, return_tensors="pt", padding="longest")
66
+ #inputs = inputs.to(DEVICE)
67
+ original_input_length = len(inputs["input_ids"][0])
68
+
69
+ # checking if the length of the input batch is less than 150
70
+ if original_input_length < 100:
71
+ split_sentences = nlp(sentence)
72
+ for split_sentence in split_sentences.sents:
73
+ output.append(str(split_sentence).rstrip("."))
74
+
75
+
76
+ # checking if the length of the input batch is greater than 1024
77
+ elif original_input_length > 1024:
78
+ sent = sent_tokenize(sentence)
79
+ length_sent = len(sent)
80
+
81
+ j = 0
82
+ sent_remaining = math.ceil(length_sent / 2)
83
+
84
+ # going through the batch that is greater than 1024 and dividing them
85
+ while length_sent > 0:
86
+ halved_sentence = "".join(sent[j:j+sent_remaining])
87
+ halved_inputs = tokenizer(halved_sentence, return_tensors="pt")
88
+ #halved_inputs = halved_inputs.to(DEVICE)
89
+ halved_summary_ids = model.generate(halved_inputs["input_ids"])
90
+ j += sent_remaining
91
+ length_sent -= sent_remaining
92
+
93
+ # checking if the length of the output summary is less than the original text
94
+ if len(halved_summary_ids[0]) < len(halved_inputs["input_ids"][0]):
95
+ halved_summary = tokenizer.batch_decode(halved_summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
96
+ output.append(halved_summary)
97
+
98
+ else:
99
+ summary_ids = model.generate(inputs["input_ids"])
100
+
101
+ if len(summary_ids[0]) < original_input_length:
102
+ summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
103
+ output.append(summary)
104
+
105
+ final_output = []
106
+ for paragraphs in output:
107
+ lines = paragraphs.split(" . ")
108
+ for line in lines:
109
+ final_output.append(line.replace(" .", "").strip())
110
+ text = ".".join(final_output)
111
+ bullet_points = len(final_output)
112
+
113
+
114
+ for i in range(len(final_output)):
115
+ final_output[i] = "* " + final_output[i] + "."
116
+
117
+ # final sentences are incoherent, so we will join them by bullet separator
118
+ summary_bullet = "\n".join(final_output)
119
+
120
+ return summary_bullet
121
+
122
+
123
+
124
+ #creating an interface for the headline generator using gradio
125
+ demo = gr.Interface(final_summary, inputs=[gr.inputs.Textbox(label="Drop your article here", optional=False)],
126
+ title = "ARTICLE SUMMARIZER",
127
+ outputs=[gr.outputs.Textbox(label="Summary")],
128
+ theme= "darkhuggingface")
129
+
130
+ #launching the app
131
+ if __name__ == "__main__":
132
+ demo.launch(debug=True)