| | import tkinter as tk |
| | from tkinter import scrolledtext, messagebox |
| | import torch |
| | from diffusers import DiffusionPipeline |
| | import subprocess |
| | import sys |
| |
|
| | |
| | def install_packages(): |
| | try: |
| | subprocess.check_call([sys.executable, "-m", "pip", "install", "transformers", "accelerate", "safetensors"]) |
| | messagebox.showinfo("Success", "Packages installed successfully!") |
| | except subprocess.CalledProcessError: |
| | messagebox.showerror("Error", "Failed to install packages.") |
| |
|
| | |
| | def get_device(): |
| | |
| | if torch.cuda.is_available(): |
| | |
| | device_name = torch.cuda.get_device_name(0).lower() |
| | if "mx" in device_name: |
| | print(f"Using NVIDIA MX GPU: {device_name}") |
| | return "cuda" |
| | else: |
| | |
| | print(f"Using CUDA-enabled GPU: {device_name}") |
| | return "cuda" |
| | |
| | |
| | |
| | else: |
| | print("No CUDA device detected. Trying to run with iGPU (integrated GPU).") |
| | return "cpu" |
| |
|
| | |
| | device = get_device() |
| |
|
| | |
| | pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True, variant="fp16") |
| | pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) |
| | pipe.enable_model_cpu_offload() |
| |
|
| | |
| | base = DiffusionPipeline.from_pretrained( |
| | "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", use_safetensors=True |
| | ) |
| | base.to(device) |
| |
|
| | refiner = DiffusionPipeline.from_pretrained( |
| | "stabilityai/stable-diffusion-xl-refiner-1.0", |
| | text_encoder_2=base.text_encoder_2, |
| | vae=base.vae, |
| | variant="fp16", |
| | use_safetensors=True, |
| | ) |
| | refiner.to(device) |
| |
|
| | |
| | n_steps = 40 |
| | high_noise_frac = 0.8 |
| |
|
| | def run_inference(): |
| | main_prompt = main_prompt_text.get("1.0", tk.END).strip() |
| | negative_prompt = negative_prompt_text.get("1.0", tk.END).strip() if negative_prompt_text.get("1.0", tk.END).strip() else None |
| | |
| | |
| | result = pipe(main_prompt, negative_prompt=negative_prompt) |
| | output_text.delete("1.0", tk.END) |
| | output_text.insert(tk.END, str(result)) |
| |
|
| | |
| | image = base( |
| | prompt=main_prompt, |
| | num_inference_steps=n_steps, |
| | denoising_end=high_noise_frac, |
| | output_type="latent", |
| | ).images |
| | image = refiner( |
| | prompt=main_prompt, |
| | num_inference_steps=n_steps, |
| | denoising_start=high_noise_frac, |
| | image=image, |
| | ).images[0] |
| |
|
| | |
| | output_text.insert(tk.END, "Image generated successfully.") |
| |
|
| | |
| | root = tk.Tk() |
| | root.title("Inference GUI") |
| |
|
| | |
| | tk.Label(root, text="Main Prompt:").pack() |
| | main_prompt_text = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=50, height=10) |
| | main_prompt_text.pack() |
| |
|
| | |
| | tk.Label(root, text="Negative Prompt (optional):").pack() |
| | negative_prompt_text = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=50, height=5) |
| | negative_prompt_text.pack() |
| |
|
| | |
| | run_button = tk.Button(root, text="Run Inference", command=run_inference) |
| | run_button.pack() |
| |
|
| | |
| | install_button = tk.Button(root, text="Install Packages", command=install_packages) |
| | install_button.pack() |
| |
|
| | |
| | output_text = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=50, height=10) |
| | output_text.pack() |
| |
|
| | |
| | root.mainloop() |
| |
|