Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| # Initializing the Hugging Face InferenceClient | |
| import os | |
| token = os.getenv("HF_TOKEN") | |
| client = InferenceClient(model="gpt2", token=token) | |
| def generate_text(prompt): | |
| # Calling the model to directly get the response as text | |
| generated_text = client.text_generation(prompt, max_new_tokens=140) | |
| # Since the response is plain text, directly return it | |
| return generated_text | |
| # Creating a Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_text, | |
| inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."), | |
| outputs="text", | |
| title="GPT-2 Text Generator", | |
| description="This Gradio app generates text using the GPT-2 model. Enter a prompt and see how GPT-2 completes it." | |
| ) | |
| # Launching the interface. | |
| iface.launch() | |