Daniton's picture
Update app.py
e7a587f
raw
history blame contribute delete
697 Bytes
# importing libraries
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# load the pretrained model
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
# define a predict function to generate text
def generate_text(prompt):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
generated_text = model.generate(input_ids, max_length=100, top_p=0.9, top_k=40)
text = tokenizer.decode(generated_text[0], skip_special_tokens=True)
return text
# create a Gradio interface for the text generator
gr.Interface(fn=generate_text, inputs="text", outputs="text").launch()