| import gradio as gr | |
| from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
| tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
| model = GPT2LMHeadModel.from_pretrained("gpt2") | |
| def generate_text(prompt): | |
| input_ids = tokenizer.encode(prompt, return_tensors="pt").to('cuda') | |
| generated_text = model.generate(input_ids, max_length=1024, temperature=0.7, top_k=50, top_p=0.9, do_sample=True, num_return_sequences=1) | |
| return tokenizer.decode(generated_text.squeeze(), skip_special_tokens=True) | |
| iface = gr.Interface(generate_text, input_type="text", output_type="text", title="GPT-2 Text Generator") | |
| iface.launch() | |