|
|
|
|
|
from diffusers import StableDiffusionPipeline |
|
|
import torch |
|
|
import gradio as gr |
|
|
from huggingface_hub import hf_hub_download |
|
|
import os |
|
|
|
|
|
|
|
|
MODEL_REPO_ID = "uhralk/Indigo_Furry_mix" |
|
|
CHECKPOINT_FILENAME = "indigo_Furrymix_v120_hybrid_fin_fp16.safetensors" |
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
dtype = torch.float16 if device == "cuda" else torch.float32 |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
print(f"Downloading checkpoint: {CHECKPOINT_FILENAME} from {MODEL_REPO_ID}") |
|
|
checkpoint_local_path = hf_hub_download(repo_id=MODEL_REPO_ID, filename=CHECKPOINT_FILENAME) |
|
|
print(f"Checkpoint downloaded to: {checkpoint_local_path}") |
|
|
except Exception as e: |
|
|
print(f"Failed to download checkpoint: {e}") |
|
|
exit() |
|
|
|
|
|
|
|
|
try: |
|
|
print(f"Attempting to load pipeline from single file: {checkpoint_local_path} on {device}") |
|
|
pipe = StableDiffusionPipeline.from_single_file( |
|
|
checkpoint_local_path, |
|
|
torch_dtype=dtype, |
|
|
use_safetensors=True, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
) |
|
|
pipe.to(device) |
|
|
print(f"Model loaded successfully from single file on {device}") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"Error loading model directly from single file on {device}: {e}") |
|
|
print("Attempting to load on CPU with float32 as a fallback (may be very slow)...") |
|
|
try: |
|
|
pipe = StableDiffusionPipeline.from_single_file( |
|
|
checkpoint_local_path, |
|
|
torch_dtype=torch.float32, |
|
|
use_safetensors=True, |
|
|
) |
|
|
pipe.to("cpu") |
|
|
print("Model forced loaded on CPU.") |
|
|
except Exception as e_cpu: |
|
|
print(f"Critical error: Failed to load model even from single file on CPU: {e_cpu}") |
|
|
exit() |
|
|
|
|
|
|
|
|
def generate_image(prompt, negative_prompt, num_inference_steps, guidance_scale, seed): |
|
|
if not prompt: |
|
|
return None, "Please enter a prompt." |
|
|
|
|
|
generator = torch.Generator(device=device).manual_seed(seed) if seed != -1 else None |
|
|
|
|
|
try: |
|
|
image = pipe( |
|
|
prompt=prompt, |
|
|
negative_prompt=negative_prompt, |
|
|
num_inference_steps=int(num_inference_steps), |
|
|
guidance_scale=float(guidance_scale), |
|
|
generator=generator |
|
|
).images[0] |
|
|
return image, None |
|
|
except Exception as e: |
|
|
return None, f"Error during image generation: {e}" |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=generate_image, |
|
|
inputs=[ |
|
|
gr.Textbox(label="Prompt", placeholder="A furry creature with indigo fur, in a magical forest, whimsical art, highly detailed"), |
|
|
gr.Textbox(label="Negative Prompt (optional)", placeholder="blurry, ugly, deformed, low quality, bad anatomy"), |
|
|
gr.Slider(minimum=10, maximum=100, value=25, step=1, label="Inference Steps"), |
|
|
gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale"), |
|
|
gr.Number(label="Seed (-1 for random)", value=-1, precision=0) |
|
|
], |
|
|
outputs=[ |
|
|
gr.Image(type="pil", label="Generated Image"), |
|
|
gr.Textbox(label="Status/Error") |
|
|
], |
|
|
title="Indigo Furry Mix - Text-to-Image Generator", |
|
|
description="Generate images of furry characters with the Indigo Furry Mix model.", |
|
|
examples=[ |
|
|
["An indigo furry warrior, wielding a glowing sword, intricate armor, epic fantasy art", "blurry, low quality"], |
|
|
["A cute indigo furry wizard casting a spell, magical effects, cartoon style", "disfigured, ugly"], |
|
|
] |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch() |