Spaces:
Runtime error
Runtime error
Update app.py
Browse filesNew T5 use case to summarize a paragraph of text.
app.py
CHANGED
|
@@ -230,26 +230,52 @@
|
|
| 230 |
# which provides the model with some form of input and then teaches it to generate some form of target text. This makes it possible to apply
|
| 231 |
# the same model, loss function, hyperparameters, and other settings to all of our varied sets of responsibilities.
|
| 232 |
|
| 233 |
-
from transformers import AutoModelWithLMHead, AutoTokenizer
|
| 234 |
-
import gradio as grad
|
| 235 |
|
| 236 |
-
text2text_tkn = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
| 237 |
-
mdl = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
| 238 |
|
| 239 |
-
def text2text(context,answer):
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
|
| 252 |
-
grad.
|
|
|
|
| 253 |
|
|
|
|
| 254 |
|
| 255 |
|
|
|
|
| 230 |
# which provides the model with some form of input and then teaches it to generate some form of target text. This makes it possible to apply
|
| 231 |
# the same model, loss function, hyperparameters, and other settings to all of our varied sets of responsibilities.
|
| 232 |
|
| 233 |
+
# from transformers import AutoModelWithLMHead, AutoTokenizer
|
| 234 |
+
# import gradio as grad
|
| 235 |
|
| 236 |
+
# text2text_tkn = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
| 237 |
+
# mdl = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
| 238 |
|
| 239 |
+
# def text2text(context,answer):
|
| 240 |
+
# input_text = "answer: %s context: %s </s>" % (answer, context)
|
| 241 |
+
# features = text2text_tkn ([input_text], return_tensors='pt')
|
| 242 |
+
# output = mdl.generate(input_ids=features['input_ids'],
|
| 243 |
+
# attention_mask=features['attention_mask'],
|
| 244 |
+
# max_length=64)
|
| 245 |
+
# response=text2text_tkn.decode(output[0])
|
| 246 |
+
# return response
|
| 247 |
+
|
| 248 |
+
# context=grad.Textbox(lines=10, label="English", placeholder="Context")
|
| 249 |
+
# ans=grad.Textbox(lines=1, label="Answer")
|
| 250 |
+
# out=grad.Textbox(lines=1, label="Genereated Question")
|
| 251 |
+
|
| 252 |
+
# grad.Interface(text2text, inputs=[context,ans], outputs=out).launch()
|
| 253 |
|
| 254 |
+
#-----------------------------------------------------------------------------------
|
| 255 |
+
# 11. Text-to-Text Generation using the T5 model - second use case summarizes a paragraph of text.
|
| 256 |
+
|
| 257 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
| 258 |
+
import gradio as grad
|
| 259 |
+
|
| 260 |
+
text2text_tkn = AutoTokenizer.from_pretrained("deep-learning-analytics/wikihow-t5-small")
|
| 261 |
+
mdl = AutoModelWithLMHead.from_pretrained("deep-learning-analytics/wikihow-t5-small")
|
| 262 |
+
|
| 263 |
+
def text2text_summary(para):
|
| 264 |
+
initial_txt = para.strip().replace("\n","")
|
| 265 |
+
tkn_text = text2text_tkn.encode(initial_txt, return_tensors="pt")
|
| 266 |
+
tkn_ids = mdl.generate(
|
| 267 |
+
tkn_text,
|
| 268 |
+
max_length=250,
|
| 269 |
+
num_beams=5,
|
| 270 |
+
repetition_penalty=2.5,
|
| 271 |
+
early_stopping=True
|
| 272 |
+
)
|
| 273 |
+
response = text2text_tkn.decode(tkn_ids[0], skip_special_tokens=True)
|
| 274 |
+
return response
|
| 275 |
|
| 276 |
+
para=grad.Textbox(lines=10, label="Paragraph", placeholder="Copy paragraph")
|
| 277 |
+
out=grad.Textbox(lines=1, label="Summary")
|
| 278 |
|
| 279 |
+
grad.Interface(text2text_summary, inputs=para, outputs=out).launch()
|
| 280 |
|
| 281 |
|