| import gradio |
| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
| model_name = 't5-small' |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
| finetuned_model = AutoModelForSeq2SeqLM.from_pretrained("finetuned_model_2_epoch") |
|
|
| question = gradio.Textbox("question") |
| context = gradio.Textbox("context") |
|
|
| prompt = f"""Tables: |
| {context} |
| |
| Question: |
| {question} |
| |
| Answer: |
| """ |
|
|
| inputs = tokenizer(prompt, return_tensors='pt') |
|
|
| output = tokenizer.decode( |
| finetuned_model.generate( |
| inputs["input_ids"], |
| max_new_tokens=200, |
| )[0], |
| skip_special_tokens=True |
| ) |
|
|
| print(f'MODEL GENERATION - ZERO SHOT:\n{output}') |