import gradio as gr from huggingface_hub import InferenceClient from PIL import Image import io # Initialize the client (Uses Hugging Face's free inference API) client = InferenceClient("lllyasviel/sd-controlnet-scribble") def generate_image(sketch, prompt): if sketch is None: return None # The model expects a prompt to guide the style full_prompt = f"Professional photography, high quality, realistic, {prompt}" # Generate the image # Note: 'sketch' from Gradio is a dictionary with 'composite' or a PIL image image = client.image_to_image(sketch, prompt=full_prompt) return image # Build a stylish UI with gr.Blocks() as demo: gr.Markdown("# 🎨 Sketch-to-Photo AI") gr.Markdown("Draw something simple on the left, type what it is, and watch it turn into a photo!") with gr.Row(): with gr.Column(): # The Sketchpad input input_sketch = gr.Sketchpad(type="pil", label="Draw Here") prompt_input = gr.Textbox(placeholder="What did you draw? (e.g., 'A sunset over mountains')", label="Description") btn = gr.Button("Magic Generate ✨") with gr.Column(): output_image = gr.Image(label="AI Result") btn.click(fn=generate_image, inputs=[input_sketch, prompt_input], outputs=output_image, show_progress=True) if __name__ == "__main__": demo.launch()