File size: 4,479 Bytes
c6adcbf 3df0ca4 c6adcbf c89b4e7 c6adcbf d57e93c 3df0ca4 3a999ae c6adcbf 3a999ae c6adcbf c89b4e7 3df0ca4 c89b4e7 c6adcbf c89b4e7 d57e93c c89b4e7 c6adcbf d57e93c c89b4e7 c6adcbf d57e93c 3a999ae c6adcbf d57e93c c6adcbf d57e93c c89b4e7 d57e93c c6adcbf d57e93c c6adcbf d57e93c 3a999ae c6adcbf 73222b2 3df0ca4 d57e93c 73222b2 d57e93c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
# app.py
from diffusers import StableDiffusionPipeline
import torch
import gradio as gr
from huggingface_hub import hf_hub_download # Keep this import
import os
# Define the model ID for the specific checkpoint
MODEL_REPO_ID = "uhralk/Indigo_Furry_mix"
CHECKPOINT_FILENAME = "indigo_Furrymix_v120_hybrid_fin_fp16.safetensors"
# Determine the device (GPU or CPU)
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if device == "cuda" else torch.float32
# --- NEW LOADING LOGIC ---
# 1. Download the specific .safetensors file to the Hugging Face cache
try:
print(f"Downloading checkpoint: {CHECKPOINT_FILENAME} from {MODEL_REPO_ID}")
checkpoint_local_path = hf_hub_download(repo_id=MODEL_REPO_ID, filename=CHECKPOINT_FILENAME)
print(f"Checkpoint downloaded to: {checkpoint_local_path}")
except Exception as e:
print(f"Failed to download checkpoint: {e}")
exit() # Critical failure if we can't even download the file
# 2. Load the pipeline from the downloaded single file
try:
print(f"Attempting to load pipeline from single file: {checkpoint_local_path} on {device}")
pipe = StableDiffusionPipeline.from_single_file(
checkpoint_local_path, # Pass the local file path here
torch_dtype=dtype,
use_safetensors=True,
# You might need to specify base model parameters if they're not fully embedded.
# For a full merged checkpoint like this, often default inference components
# from Stable Diffusion 1.5 are compatible.
# It usually tries to infer these or fall back to SD 1.5 defaults.
# If it complains about missing components, we might add:
# vae="runwayml/stable-diffusion-v1-5",
# text_encoder="runwayml/stable-diffusion-v1-5",
# tokenizer="runwayml/stable-diffusion-v1-5",
# scheduler="runwayml/stable-diffusion-v1-5",
)
pipe.to(device)
print(f"Model loaded successfully from single file on {device}")
except Exception as e:
print(f"Error loading model directly from single file on {device}: {e}")
print("Attempting to load on CPU with float32 as a fallback (may be very slow)...")
try:
pipe = StableDiffusionPipeline.from_single_file(
checkpoint_local_path, # Pass the local file path here again
torch_dtype=torch.float32, # Force float32 for CPU
use_safetensors=True,
)
pipe.to("cpu")
print("Model forced loaded on CPU.")
except Exception as e_cpu:
print(f"Critical error: Failed to load model even from single file on CPU: {e_cpu}")
exit() # Exit if model cannot be loaded at all
# 3. Define the Gradio inference function
def generate_image(prompt, negative_prompt, num_inference_steps, guidance_scale, seed):
if not prompt:
return None, "Please enter a prompt."
generator = torch.Generator(device=device).manual_seed(seed) if seed != -1 else None
try:
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=int(num_inference_steps),
guidance_scale=float(guidance_scale),
generator=generator
).images[0]
return image, None
except Exception as e:
return None, f"Error during image generation: {e}"
# 4. Create the Gradio Interface
iface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Prompt", placeholder="A furry creature with indigo fur, in a magical forest, whimsical art, highly detailed"),
gr.Textbox(label="Negative Prompt (optional)", placeholder="blurry, ugly, deformed, low quality, bad anatomy"),
gr.Slider(minimum=10, maximum=100, value=25, step=1, label="Inference Steps"),
gr.Slider(minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale"),
gr.Number(label="Seed (-1 for random)", value=-1, precision=0)
],
outputs=[
gr.Image(type="pil", label="Generated Image"),
gr.Textbox(label="Status/Error")
],
title="Indigo Furry Mix - Text-to-Image Generator",
description="Generate images of furry characters with the Indigo Furry Mix model.",
examples=[
["An indigo furry warrior, wielding a glowing sword, intricate armor, epic fantasy art", "blurry, low quality"],
["A cute indigo furry wizard casting a spell, magical effects, cartoon style", "disfigured, ugly"],
]
)
if __name__ == "__main__":
iface.launch() |