# Create a text-to-image interface with options to choose schedulers, sampling steps, and no default dark theme. import gradio as gr import numpy as np from PIL import Image import time # Define a function to generate an image based on the input text, scheduler, and sampling steps. def generate_image(text, scheduler, sampling_steps): # For demonstration purposes, we will generate a random image. # In a real application, this function would call a text-to-image model. rng = np.random.default_rng() image = rng.random(size=(600, 600, 3)) # Simulate the effect of different schedulers and sampling steps if scheduler == "Euler a": image = np.clip(image * (1 + sampling_steps / 100), 0, 1) elif scheduler == "DPM++ 2M Karras": image = np.clip(image * (1 + sampling_steps / 50), 0, 1) elif scheduler == "Euler": image = np.clip(image * (1 + sampling_steps / 20), 0, 1) # Generate a unique filename for the image using the current timestamp timestamp = int(time.time() * 1000) filename = f"image_{timestamp}.webp" # Convert the numpy array to a PIL image and save it in WEBP format image_pil = Image.fromarray((image * 255).astype(np.uint8)) image_pil.save(filename, format="WEBP") return filename # Create a Gradio interface with text input, scheduler dropdown, sampling steps slider, and no default dark theme. with gr.Blocks() as demo: with gr.Row(): text_input = gr.Textbox(label="Input Text") scheduler_dropdown = gr.Dropdown(choices=["Euler a", "DPM++ 2M Karras", "Euler"], value="Euler a", label="Scheduler") sampling_steps_slider = gr.Slider(1, 100, value=50, step=1, label="Sampling Steps") with gr.Row(): generate_button = gr.Button("Generate Image") output_image = gr.Image(label="Generated Image") # Define the event listener for the generate button. generate_button.click( fn=generate_image, inputs=[text_input, scheduler_dropdown, sampling_steps_slider], outputs=output_image ) # Launch the Gradio interface. if __name__ == "__main__": gr.load("models/stabilityai/stable-diffusion-3.5-large").launch() demo.launch(show_error=True)