import gradio as gr #def greet(name): # return "Hello " + name + "!" #iface = gr.Interface(fn=greet, inputs="text", outputs="text") #iface.launch() # Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="gpt2") # Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") #get the text text_input = "One upon a time there was a tree" max_length = 100 temperature = 0.8 top_k = 100 input_ids = tokenizer.encode(text_input,return_tensors='pt') output = model.generate(input_ids, max_length=max_length, temperature=temperature, top_k=top_k, do_sample = True) response = tokenizer.decode(output[0], skip_special_token=True) print(response)