KibbleHF commited on
Commit
3a999ae
·
verified ·
1 Parent(s): 7d877b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -29
app.py CHANGED
@@ -1,42 +1,52 @@
1
  # app.py
2
- from diffusers import StableDiffusionPipeline
3
  import torch
4
  import gradio as gr
5
  import os
6
 
7
- # 1. Define the base model and the checkpoint model
8
- BASE_MODEL = "runwayml/stable-diffusion-v1-5"
9
- CHECKPOINT_MODEL = "uhralk/Indigo_Furry_mix"
10
- CHECKPOINT_FILENAME = "indigo_Furrymix_v120_hybrid_fin_fp16.safetensors" # Using the recommended filename
 
 
11
 
12
- # 2. Load the model
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
14
  dtype = torch.float16 if device == "cuda" else torch.float32
15
 
 
16
  try:
17
- pipe = StableDiffusionPipeline.from_pretrained(
18
- BASE_MODEL,
 
 
19
  torch_dtype=dtype,
20
- use_safetensors=True
 
 
 
 
21
  )
22
- pipe.load_lora_weights(CHECKPOINT_MODEL, weight_name=CHECKPOINT_FILENAME, subfolder="")
23
  pipe.to(device)
24
- print(f"Model loaded successfully on {device}")
 
25
  except Exception as e:
26
- print(f"Error loading model on {device}: {e}")
27
- # Fallback to CPU if GPU loading fails or if no GPU
28
  try:
29
- pipe = StableDiffusionPipeline.from_pretrained(
30
- BASE_MODEL,
31
  torch_dtype=torch.float32,
32
- use_safetensors=True
 
33
  )
34
- pipe.load_lora_weights(CHECKPOINT_MODEL, weight_name=CHECKPOINT_FILENAME, subfolder="")
35
  pipe.to("cpu")
36
- print("Model forced loaded on CPU due to previous error or no GPU.")
37
  except Exception as e_cpu:
38
  print(f"Critical error: Failed to load model even on CPU: {e_cpu}")
39
- exit()
40
 
41
 
42
  # 3. Define the Gradio inference function
@@ -44,6 +54,7 @@ def generate_image(prompt, negative_prompt, num_inference_steps, guidance_scale,
44
  if not prompt:
45
  return None, "Please enter a prompt."
46
 
 
47
  generator = torch.Generator(device=device).manual_seed(seed) if seed != -1 else None
48
 
49
  try:
@@ -72,13 +83,4 @@ iface = gr.Interface(
72
  gr.Image(type="pil", label="Generated Image"),
73
  gr.Textbox(label="Status/Error")
74
  ],
75
- title="Indigo Furry Mix - Text-to-Image Generator",
76
- description="Generate images of furry characters with the Indigo Furry Mix model.",
77
- examples=[
78
- ["An indigo furry warrior, wielding a glowing sword, intricate armor, epic fantasy art", "blurry, low quality"],
79
- ["A cute indigo furry wizard casting a spell, magical effects, cartoon style", "disfigured, ugly"],
80
- ]
81
- )
82
-
83
- if __name__ == "__main__":
84
- iface.launch()
 
1
  # app.py
2
+ from diffusers import StableDiffusionPipeline, AutoPipelineForText2Image
3
  import torch
4
  import gradio as gr
5
  import os
6
 
7
+ # Define the model ID for the specific checkpoint
8
+ # This is the full ID of the repository where the safetensors file lives
9
+ MODEL_ID = "uhralk/Indigo_Furry_mix"
10
+ # This is the exact filename of the model checkpoint you want to load
11
+ # Make sure this matches the filename on the Hugging Face Hub EXACTLY.
12
+ CHECKPOINT_FILENAME = "indigo_Furrymix_v120_hybrid_fin_fp16.safetensors"
13
 
14
+ # Determine the device (GPU or CPU)
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ # Use float16 for GPU to save VRAM and speed up, float32 for CPU or if fp16 causes issues
17
  dtype = torch.float16 if device == "cuda" else torch.float32
18
 
19
+ # Load the model
20
  try:
21
+ # Attempt to load the model directly as a full pipeline from the checkpoint
22
+ # This assumes the checkpoint is a full model merge, not just LoRA weights
23
+ pipe = AutoPipelineForText2Image.from_pretrained(
24
+ MODEL_ID,
25
  torch_dtype=dtype,
26
+ use_safetensors=True,
27
+ # Specify the exact filename within the repository
28
+ # This tells diffusers to look for this specific file as the main model weights
29
+ # without needing an external base model or explicit LoRA loading.
30
+ model_file=CHECKPOINT_FILENAME
31
  )
 
32
  pipe.to(device)
33
+ print(f"Model loaded successfully on {device} using direct checkpoint loading.")
34
+
35
  except Exception as e:
36
+ print(f"Error loading model directly on {device}: {e}")
37
+ print("Attempting to load on CPU with float32 as a fallback (may be very slow)...")
38
  try:
39
+ pipe = AutoPipelineForText2Image.from_pretrained(
40
+ MODEL_ID,
41
  torch_dtype=torch.float32,
42
+ use_safetensors=True,
43
+ model_file=CHECKPOINT_FILENAME
44
  )
 
45
  pipe.to("cpu")
46
+ print("Model forced loaded on CPU.")
47
  except Exception as e_cpu:
48
  print(f"Critical error: Failed to load model even on CPU: {e_cpu}")
49
+ exit() # Exit if model cannot be loaded at all
50
 
51
 
52
  # 3. Define the Gradio inference function
 
54
  if not prompt:
55
  return None, "Please enter a prompt."
56
 
57
+ # Set up random seed if not -1
58
  generator = torch.Generator(device=device).manual_seed(seed) if seed != -1 else None
59
 
60
  try:
 
83
  gr.Image(type="pil", label="Generated Image"),
84
  gr.Textbox(label="Status/Error")
85
  ],
86
+ title="Indigo Fur