import gradio as gr from transformers import AutoTokenizer, TFAutoModelForCausalLM def get_response(text): tokenizer = AutoTokenizer.from_pretrained("gpt2") model = TFAutoModelForCausalLM.from_pretrained("gpt2") model.config.pad_token_id = model.config.eos_token_id inputs = tokenizer([text], return_tensors="tf") generated = model.generate(**inputs, do_sample=True, seed=(42, 0), max_new_tokens=20, temperature=.1) return tokenizer.decode(generated[0]) iface = gr.Interface(fn=get_response, inputs="text", outputs="text") iface.launch()