File size: 6,148 Bytes
2968a44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import nltk
import ntlk_utils #nltk are download in different file
from nltk.corpus import wordnet as wn
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from time import sleep
from flashtext import KeywordProcessor
from pprint import pprint
import random
import pke
import traceback
import json
import requests
import string
import re
import string
import itertools
import streamlit as st
from transformers import T5ForConditionalGeneration,T5Tokenizer
from transformers import pipeline
import torch
import random
import numpy as np
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
set_seed(42)
summary_model = T5ForConditionalGeneration.from_pretrained('t5-base')
summary_tokenizer = T5Tokenizer.from_pretrained('t5-base')
question_model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_squad_v1')
question_tokenizer = T5Tokenizer.from_pretrained('ramsrigouthamg/t5_squad_v1')
#summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
st.header(" Question Creation ")
st.subheader(" Enter the text and click on generate question. Questions will be created automatically.")
text = st.text_area("Input the text to get questions",placeholder="Enter the text", height=200)
button = st.button("Generate Question")
def postprocesstext (content):
final=""
for sent in sent_tokenize(content):
sent = sent.capitalize()
final = final +" "+sent
return final
def summarizer(text,model,tokenizer):
text = text.strip().replace("\n"," ")
text = "summarize: "+text
print (text)
max_len = 512
encoding = tokenizer.encode_plus(text,max_length=max_len, pad_to_max_length=False,truncation=True, return_tensors="pt").to(device)
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
outs = model.generate(input_ids=input_ids,
attention_mask=attention_mask,
early_stopping=True,
num_beams=3,
num_return_sequences=1,
no_repeat_ngram_size=2,
min_length = 75,
max_length=300)
dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]
summary = dec[0]
summary = postprocesstext(summary)
summary= summary.strip()
print( "done from summarizer")
return summary
def get_nouns_multipartite(content):
out=[]
try:
extractor = pke.unsupervised.MultipartiteRank()
extractor.load_document(input=content,language='en')
# not contain punctuation marks or stopwords as candidates.
pos = {'PROPN','NOUN'}
#pos = {'PROPN','NOUN'}
stoplist = list(string.punctuation)
stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']
stoplist += stopwords.words('english')
# extractor.candidate_selection(pos=pos, stoplist=stoplist)
extractor.candidate_selection(pos=pos)
# 4. build the Multipartite graph and rank candidates using random walk,
# alpha controls the weight adjustment mechanism, see TopicRank for
# threshold/method parameters.
extractor.candidate_weighting(alpha=1.1,
threshold=0.75,
method='average')
keyphrases = extractor.get_n_best(n=15)
for val in keyphrases:
out.append(val[0])
except:
out = []
traceback.print_exc()
return out
def get_keywords(originaltext,summarytext):
keywords = get_nouns_multipartite(originaltext)
print ("keywords unsummarized: ",keywords)
keyword_processor = KeywordProcessor()
for keyword in keywords:
keyword_processor.add_keyword(keyword)
keywords_found = keyword_processor.extract_keywords(summarytext)
keywords_found = list(set(keywords_found))
print ("keywords_found in summarized: ",keywords_found)
important_keywords =[]
for keyword in keywords:
if keyword in keywords_found:
important_keywords.append(keyword)
return important_keywords[:4]
def get_question(context,answer,model,tokenizer):
text = "context: {} answer: {}".format(context,answer)
encoding = tokenizer.encode_plus(text,max_length=384, pad_to_max_length=False,truncation=True, return_tensors="pt").to(device)
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
outs = model.generate(input_ids=input_ids,
attention_mask=attention_mask,
early_stopping=True,
num_beams=5,
num_return_sequences=1,
no_repeat_ngram_size=2,
max_length=72)
dec = [tokenizer.decode(ids,skip_special_tokens=True) for ids in outs]
Question = dec[0].replace("question:","")
Question= Question.strip()
return Question
if text and button:
# summarized_text = summarizer(text,summary_model,summary_tokenizer)
summarized_text = summarizer(text,summary_model,summary_tokenizer)
puts ("stopping pankaj")
sleep(0.5)
puts("summry",summarized_text)
#summarized_text = summarizer(text, max_length=130, min_length=30, do_sample=False)
# summarized_text = "Musk tweeted that his electric vehicle-making company tesla will not accept payments in bitcoin because of environmental concerns. He also said that the company was working with developers of dogecoin to improve system transaction efficiency. The world's largest cryptocurrency hit a two-month low, while doge coin rallied by about 20 percent. Musk has in recent months often tweeted in support of crypto, but rarely for bitcoin."
imp_keywords = get_keywords(text,summarized_text)
for answer in imp_keywords:
ques = get_question(summarized_text,answer,question_model,question_tokenizer)
st.write(ques)
st.write(answer.capitalize())
st.write("\n") |