| import gradio as gr | |
| from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
| def get_output(question, context): | |
| model_name = 't5-small' | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| finetuned_model = AutoModelForSeq2SeqLM.from_pretrained("finetuned_model_2_epoch") | |
| prompt = f"""Tables: | |
| {context} | |
| Question: | |
| {question} | |
| Answer: | |
| """ | |
| inputs = tokenizer(prompt, return_tensors='pt') | |
| output = tokenizer.decode( | |
| finetuned_model.generate( | |
| inputs["input_ids"], | |
| max_new_tokens=200, | |
| )[0], | |
| skip_special_tokens=True | |
| ) | |
| return output | |
| interface = gr.Interface(fn=get_output, inputs = ["text", "text"], outputs=["text"]) | |
| interface.launch() |