File size: 637 Bytes
2914ce8
5c45864
2914ce8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import gradio
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

model_name = 't5-small'

tokenizer = AutoTokenizer.from_pretrained(model_name)

finetuned_model = AutoModelForSeq2SeqLM.from_pretrained("finetuned_model_2_epoch")

question = gradio.Textbox("question")
context = gradio.Textbox("context")

prompt = f"""Tables:
{context}

Question:
{question}

Answer:
"""

inputs = tokenizer(prompt, return_tensors='pt')

output = tokenizer.decode(
    finetuned_model.generate(
        inputs["input_ids"],
        max_new_tokens=200,
    )[0],
    skip_special_tokens=True
)

print(f'MODEL GENERATION - ZERO SHOT:\n{output}')