Erik
Update app.py from anycoder
d5986d2 verified
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the Hugging Face Inference client
client = InferenceClient()
def generate_text(prompt):
try:
# Generate text using the Inference API
generated_text = client.text_generation(
prompt,
model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
max_new_tokens=200,
do_sample=True,
temperature=0.7
)
return generated_text
except Exception as e:
return f"Error generating text: {str(e)}"
# Create the Gradio interface with modern theme
with gr.Blocks() as demo:
gr.Markdown("# SmolLM Text Generation 🚀")
gr.Markdown("Built with [anycoder](https://huggingface.co/spaces/akhaliq/anycoder)")
with gr.Row():
with gr.Column():
input_text = gr.Textbox(
label="Input Prompt",
placeholder="Enter your prompt here...",
lines=5
)
generate_btn = gr.Button("Generate", variant="primary")
with gr.Column():
output_text = gr.Textbox(
label="Generated Text",
interactive=False,
lines=10
)
# Add examples
examples = [
["Explain quantum computing in simple terms"],
["Write a short story about a robot learning to love"],
["How do I make a perfect omelette?"]
]
gr.Examples(
examples=examples,
inputs=input_text,
outputs=output_text,
fn=generate_text,
cache_examples=True
)
generate_btn.click(
fn=generate_text,
inputs=input_text,
outputs=output_text,
api_visibility="public"
)
# Launch with modern theme and settings
demo.launch(
theme=gr.themes.Soft(primary_hue="blue"),
footer_links=[
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
{"label": "Model", "url": "https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct"}
]
)