import gradio as gr from transformers import pipeline import torch from transformers import GPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") # add the EOS token as PAD token to avoid warnings model = GPT2LMHeadModel.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id) def predict(inputtext): # encode context the generation is conditioned on input_ids = tokenizer.encode(inputtext, return_tensors='pt') # generate text until the output length (which includes the context length) reaches 50 greedy_output = model.generate(input_ids, max_length=50) print("Output:\n" + 100 * '-') print(tokenizer.decode(greedy_output[0], skip_special_tokens=True)) gr.Interface( predict, inputs=gr.inputs.Textbox(label="Text"), outputs=gr.outputs.Label(), title="Hot Dog? Or Not?", ).launch()