| | import torch |
| | import gradio as gr |
| | from diffusers import StableDiffusionPipeline |
| |
|
| | |
| | model_path = "/Users/arthurdufour/Documents/ComfyUI/models/checkpoints/v1-5-pruned-emaonly.safetensors" |
| |
|
| | |
| | pipeline = StableDiffusionPipeline.from_single_file(model_path, torch_dtype=torch.float32) |
| |
|
| | |
| | device = "mps" if torch.backends.mps.is_available() else "cpu" |
| | pipeline.to(device) |
| |
|
| | def generate_image(positive_prompt, negative_prompt, steps, seed): |
| | torch.mps.empty_cache() |
| | generator = torch.manual_seed(int(seed)) |
| |
|
| | try: |
| | image = pipeline( |
| | prompt=positive_prompt, |
| | negative_prompt=negative_prompt if "negative_prompt" in pipeline.__call__.__code__.co_varnames else None, |
| | num_inference_steps=int(steps), |
| | width=512, |
| | height=512, |
| | generator=generator |
| | ).images[0] |
| | except Exception as e: |
| | return f"Erreur : {str(e)}" |
| |
|
| | return image |
| |
|
| | |
| | with gr.Blocks() as demo: |
| | gr.Markdown("## Génération d'images Stable Diffusion (MPS)") |
| |
|
| | with gr.Row(): |
| | prompt_input = gr.Textbox(label="Prompt Positif", value="a horse") |
| | negative_input = gr.Textbox(label="Prompt Négatif", value="text, watermark") |
| |
|
| | with gr.Row(): |
| | steps_slider = gr.Slider(1, 50, 20, step=1, label="Nombre de Steps") |
| | seed_input = gr.Number(value=580029479038533, label="Seed") |
| |
|
| | output_image = gr.Image(label="Image Générée") |
| |
|
| | generate_button = gr.Button("Générer") |
| | generate_button.click(generate_image, inputs=[prompt_input, negative_input, steps_slider, seed_input], outputs=output_image) |
| |
|
| | |
| | demo.launch() |
| |
|