File size: 1,612 Bytes
5fe52a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM


large_model_name = "DmitryMalikov/t5-base-question-gen"
small_model_name = "DmitryMalikov/t5-small-question-gen"

tokenizers = {
    "Base T5": AutoTokenizer.from_pretrained(large_model_name),
    "Small T5": AutoTokenizer.from_pretrained(small_model_name),
}
models = {
    "Base T5": AutoModelForSeq2SeqLM.from_pretrained(large_model_name),
    "Small T5": AutoModelForSeq2SeqLM.from_pretrained(small_model_name),
}

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
for model in models.values():
    model.to(device)


def generate_question(context, model_choice):
    tokenizer = tokenizers[model_choice]
    model = models[model_choice]

    input_text = context.strip()
    inputs = tokenizer(input_text, return_tensors="pt", truncation=True, max_length=256).to(device)

    outputs = model.generate(
        **inputs,
        max_length=64,
        num_beams=4,
        early_stopping=True
    )
    question = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return question


iface = gr.Interface(
    fn=generate_question,
    inputs=[
        gr.Textbox(label="Context", lines=5, placeholder="Enter text context for question..."),
        gr.Dropdown(choices=["Base T5", "Small T5"], label="Choose model", value="Small T5")
    ],
    outputs=gr.Textbox(label="Generated question"),
    title="Question generation based on context",
    description="Enter text and receive question that can be answered with given context."
)

if __name__ == "__main__":
    iface.launch()