story / app.py
Writo's picture
Update app.py
ebe514a
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load the model
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2")
def generate_text(text_input, max_length, temperature, top_k):
input_ids = tokenizer.encode(text_input, return_tensors='pt')
output = model.generate(input_ids, max_length=max_length, temperature=temperature, top_k=top_k, do_sample=True)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Create a Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=2, placeholder="Enter a prompt..."),
gr.Slider(minimum=1, maximum=500, step=1, value=100, label='Max Length'),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.8, label='Temperature'),
gr.Slider(minimum=100, maximum=1000, step=50, value=100, label='Top K')
],
outputs='text',
title="GPT-2 Text Generator"
)
# Run the interface
iface.launch()