Маликов Дмитрий Романович
Add application
5fe52a8
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
large_model_name = "DmitryMalikov/t5-base-question-gen"
small_model_name = "DmitryMalikov/t5-small-question-gen"
tokenizers = {
"Base T5": AutoTokenizer.from_pretrained(large_model_name),
"Small T5": AutoTokenizer.from_pretrained(small_model_name),
}
models = {
"Base T5": AutoModelForSeq2SeqLM.from_pretrained(large_model_name),
"Small T5": AutoModelForSeq2SeqLM.from_pretrained(small_model_name),
}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for model in models.values():
model.to(device)
def generate_question(context, model_choice):
tokenizer = tokenizers[model_choice]
model = models[model_choice]
input_text = context.strip()
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=256).to(device)
outputs = model.generate(
**inputs,
max_length=64,
num_beams=4,
early_stopping=True
)
question = tokenizer.decode(outputs[0], skip_special_tokens=True)
return question
iface = gr.Interface(
fn=generate_question,
inputs=[
gr.Textbox(label="Context", lines=5, placeholder="Enter text context for question..."),
gr.Dropdown(choices=["Base T5", "Small T5"], label="Choose model", value="Small T5")
],
outputs=gr.Textbox(label="Generated question"),
title="Question generation based on context",
description="Enter text and receive question that can be answered with given context."
)
if __name__ == "__main__":
iface.launch()