File size: 697 Bytes
abdc8c5
57c9ca2
 
 
 
 
 
 
 
 
 
e7a587f
57c9ca2
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# importing libraries
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# load the pretrained model
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")

# define a predict function to generate text
def generate_text(prompt):
    input_ids = tokenizer.encode(prompt, return_tensors="pt")
    generated_text = model.generate(input_ids, max_length=100, top_p=0.9, top_k=40)
    text = tokenizer.decode(generated_text[0], skip_special_tokens=True)
    return text

# create a Gradio interface for the text generator
gr.Interface(fn=generate_text, inputs="text", outputs="text").launch()