Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| # Load model | |
| pipe = pipeline("text-generation", model="gpt2") # Replace with your Hugging Face model ID | |
| def generate_response(prompt, max_length, temperature): | |
| output = pipe( | |
| prompt, | |
| max_length=max_length, | |
| temperature=temperature, | |
| do_sample=True, | |
| pad_token_id=pipe.tokenizer.eos_token_id | |
| ) | |
| return output[0]["generated_text"] | |
| # Gradio Interface | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# 📝 AI Text Generator") | |
| gr.Markdown("Enter your prompt and generate creative text using a Hugging Face model.") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| placeholder="Type your prompt here...", | |
| lines=4 | |
| ) | |
| max_length = gr.Slider(20, 500, value=200, step=10, label="Max Length") | |
| temperature = gr.Slider(0.1, 1.5, value=0.7, step=0.1, label="Temperature") | |
| generate_btn = gr.Button("Generate") | |
| with gr.Column(scale=3): | |
| output = gr.Textbox(label="Generated Output", lines=10) | |
| generate_btn.click( | |
| fn=generate_response, | |
| inputs=[prompt, max_length, temperature], | |
| outputs=[output] | |
| ) | |
| demo.launch() | |