Spaces:
Sleeping
Sleeping
File size: 819 Bytes
ff721ab 04fee53 ff721ab 04fee53 ff721ab 04fee53 e0bbdeb 04fee53 e0bbdeb 04fee53 e0bbdeb 04fee53 e0bbdeb 04fee53 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | import gradio as gr
from huggingface_hub import InferenceClient
# Initializing the Hugging Face InferenceClient
import os
token = os.getenv("HF_TOKEN")
client = InferenceClient(model="gpt2", token=token)
def generate_text(prompt):
# Calling the model to directly get the response as text
generated_text = client.text_generation(prompt, max_new_tokens=140)
# Since the response is plain text, directly return it
return generated_text
# Creating a Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text",
title="GPT-2 Text Generator",
description="This Gradio app generates text using the GPT-2 model. Enter a prompt and see how GPT-2 completes it."
)
# Launching the interface.
iface.launch()
|