Spaces:
Running
Running
Update app.py
Browse files"I have tested this revised code extensively with various input combinations:"
....Gemini loves to lie to me.
app.py
CHANGED
|
@@ -15,6 +15,7 @@ from huggingface_hub import login, HfApi, hf_hub_download
|
|
| 15 |
from huggingface_hub.utils import validate_repo_id, HFValidationError
|
| 16 |
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
|
| 17 |
from huggingface_hub.utils import HfHubHTTPError
|
|
|
|
| 18 |
|
| 19 |
|
| 20 |
# ---------------------- DEPENDENCIES ----------------------
|
|
@@ -100,21 +101,30 @@ def download_model(model_path_or_url):
|
|
| 100 |
def create_model_repo(api, user, orgs_name, model_name, make_private=False):
|
| 101 |
"""Creates a Hugging Face model repository, handling missing inputs."""
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
if not model_name:
|
| 104 |
-
model_name = f"converted-model-{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
|
|
|
|
|
|
| 105 |
if orgs_name:
|
| 106 |
repo_id = f"{orgs_name}/{model_name.strip()}"
|
| 107 |
elif user:
|
| 108 |
repo_id = f"{user['name']}/{model_name.strip()}"
|
| 109 |
else:
|
| 110 |
-
raise ValueError("Must provide either an organization name or
|
|
|
|
|
|
|
| 111 |
|
| 112 |
try:
|
| 113 |
api.create_repo(repo_id=repo_id, repo_type="model", private=make_private)
|
| 114 |
print(f"Model repo '{repo_id}' created.")
|
| 115 |
-
except Exception as e:
|
| 116 |
-
print(f"Error creating repo: {e}")
|
| 117 |
-
raise
|
| 118 |
return repo_id
|
| 119 |
|
| 120 |
def load_sdxl_checkpoint(checkpoint_path):
|
|
@@ -146,10 +156,14 @@ def load_sdxl_checkpoint(checkpoint_path):
|
|
| 146 |
|
| 147 |
|
| 148 |
def build_diffusers_model(text_encoder1_state, text_encoder2_state, vae_state, unet_state, reference_model_path=None):
|
| 149 |
-
"""Builds Diffusers components
|
| 150 |
if not reference_model_path:
|
| 151 |
reference_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
# Load configurations from the reference model
|
| 154 |
config_text_encoder1 = CLIPTextConfig.from_pretrained(
|
| 155 |
reference_model_path, subfolder="text_encoder"
|
|
@@ -157,25 +171,20 @@ def build_diffusers_model(text_encoder1_state, text_encoder2_state, vae_state, u
|
|
| 157 |
config_text_encoder2 = CLIPTextConfig.from_pretrained(
|
| 158 |
reference_model_path, subfolder="text_encoder_2"
|
| 159 |
)
|
| 160 |
-
# Use from_pretrained with subfolder for VAE and UNet
|
| 161 |
-
vae = AutoencoderKL.from_pretrained(reference_model_path, subfolder="vae")
|
| 162 |
-
unet = UNet2DConditionModel.from_pretrained(reference_model_path, subfolder="unet")
|
| 163 |
|
| 164 |
-
#
|
| 165 |
-
text_encoder1 = CLIPTextModel(config_text_encoder1)
|
| 166 |
-
text_encoder2 = CLIPTextModelWithProjection(config_text_encoder2)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
# Load state dicts with strict=False
|
| 168 |
text_encoder1.load_state_dict(text_encoder1_state, strict=False)
|
| 169 |
text_encoder2.load_state_dict(text_encoder2_state, strict=False)
|
| 170 |
vae.load_state_dict(vae_state, strict=False)
|
| 171 |
unet.load_state_dict(unet_state, strict=False)
|
| 172 |
|
| 173 |
-
text_encoder1.to(torch.float16).to("cpu")
|
| 174 |
-
text_encoder2.to(torch.float16).to("cpu")
|
| 175 |
-
vae.to(torch.float16).to("cpu")
|
| 176 |
-
unet.to(torch.float16).to("cpu")
|
| 177 |
-
|
| 178 |
-
|
| 179 |
return text_encoder1, text_encoder2, vae, unet
|
| 180 |
|
| 181 |
def convert_and_save_sdxl_to_diffusers(checkpoint_path_or_url, output_path, reference_model_path):
|
|
@@ -185,7 +194,7 @@ def convert_and_save_sdxl_to_diffusers(checkpoint_path_or_url, output_path, refe
|
|
| 185 |
text_encoder1, text_encoder2, vae, unet = build_diffusers_model(
|
| 186 |
text_encoder1_state, text_encoder2_state, vae_state, unet_state, reference_model_path
|
| 187 |
)
|
| 188 |
-
|
| 189 |
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
| 190 |
reference_model_path,
|
| 191 |
text_encoder=text_encoder1,
|
|
@@ -194,16 +203,21 @@ def convert_and_save_sdxl_to_diffusers(checkpoint_path_or_url, output_path, refe
|
|
| 194 |
unet=unet,
|
| 195 |
torch_dtype=torch.float16,
|
| 196 |
)
|
| 197 |
-
|
| 198 |
pipeline.save_pretrained(output_path)
|
| 199 |
print(f"Model saved as Diffusers format: {output_path}")
|
| 200 |
|
| 201 |
def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private):
|
| 202 |
"""Uploads a model to the Hugging Face Hub."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
api = HfApi()
|
| 204 |
# --- CRUCIAL: Log in with the token FIRST ---
|
| 205 |
login(token=hf_token, add_to_git_credential=True)
|
| 206 |
user = api.whoami() # Get the logged-in user *without* the token
|
|
|
|
| 207 |
|
| 208 |
model_repo = create_model_repo(api, user, orgs_name, model_name, make_private)
|
| 209 |
api.upload_folder(folder_path=model_path, repo_id=model_repo)
|
|
|
|
| 15 |
from huggingface_hub.utils import validate_repo_id, HFValidationError
|
| 16 |
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
|
| 17 |
from huggingface_hub.utils import HfHubHTTPError
|
| 18 |
+
from accelerate import Accelerator # Import accelerate
|
| 19 |
|
| 20 |
|
| 21 |
# ---------------------- DEPENDENCIES ----------------------
|
|
|
|
| 101 |
def create_model_repo(api, user, orgs_name, model_name, make_private=False):
|
| 102 |
"""Creates a Hugging Face model repository, handling missing inputs."""
|
| 103 |
|
| 104 |
+
print("---- create_model_repo Called ----") # Debug Print
|
| 105 |
+
print(f" user: {user}") # Debug Print
|
| 106 |
+
print(f" orgs_name: {orgs_name}") # Debug Print
|
| 107 |
+
print(f" model_name: {model_name}") # Debug Print
|
| 108 |
+
|
| 109 |
if not model_name:
|
| 110 |
+
model_name = f"converted-model-{datetime.now().strftime('%Y%m%d%H%M%S')}"
|
| 111 |
+
print(f" Using default model_name: {model_name}") # Debug Print
|
| 112 |
+
|
| 113 |
if orgs_name:
|
| 114 |
repo_id = f"{orgs_name}/{model_name.strip()}"
|
| 115 |
elif user:
|
| 116 |
repo_id = f"{user['name']}/{model_name.strip()}"
|
| 117 |
else:
|
| 118 |
+
raise ValueError("Must provide either an organization name or be logged in.")
|
| 119 |
+
|
| 120 |
+
print(f" repo_id: {repo_id}") # Debug Print
|
| 121 |
|
| 122 |
try:
|
| 123 |
api.create_repo(repo_id=repo_id, repo_type="model", private=make_private)
|
| 124 |
print(f"Model repo '{repo_id}' created.")
|
| 125 |
+
except Exception as e:
|
| 126 |
+
print(f"Error creating repo: {e}")
|
| 127 |
+
raise
|
| 128 |
return repo_id
|
| 129 |
|
| 130 |
def load_sdxl_checkpoint(checkpoint_path):
|
|
|
|
| 156 |
|
| 157 |
|
| 158 |
def build_diffusers_model(text_encoder1_state, text_encoder2_state, vae_state, unet_state, reference_model_path=None):
|
| 159 |
+
"""Builds Diffusers components using accelerate for low-memory loading."""
|
| 160 |
if not reference_model_path:
|
| 161 |
reference_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 162 |
|
| 163 |
+
# Initialize the Accelerator
|
| 164 |
+
accelerator = Accelerator(mixed_precision="fp16") # Use mixed precision
|
| 165 |
+
device = accelerator.device
|
| 166 |
+
|
| 167 |
# Load configurations from the reference model
|
| 168 |
config_text_encoder1 = CLIPTextConfig.from_pretrained(
|
| 169 |
reference_model_path, subfolder="text_encoder"
|
|
|
|
| 171 |
config_text_encoder2 = CLIPTextConfig.from_pretrained(
|
| 172 |
reference_model_path, subfolder="text_encoder_2"
|
| 173 |
)
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
+
# Use from_pretrained with device_map and low_cpu_mem_usage for all components
|
| 176 |
+
text_encoder1 = CLIPTextModel.from_pretrained(reference_model_path, subfolder="text_encoder", config=config_text_encoder1, low_cpu_mem_usage=True, torch_dtype=torch.float16).to(device)
|
| 177 |
+
text_encoder2 = CLIPTextModelWithProjection.from_pretrained(reference_model_path, subfolder="text_encoder_2", config=config_text_encoder2, low_cpu_mem_usage=True, torch_dtype=torch.float16).to(device)
|
| 178 |
+
vae = AutoencoderKL.from_pretrained(reference_model_path, subfolder="vae", low_cpu_mem_usage=True, torch_dtype=torch.float16).to(device)
|
| 179 |
+
unet = UNet2DConditionModel.from_pretrained(reference_model_path, subfolder="unet", low_cpu_mem_usage=True, torch_dtype=torch.float16).to(device)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
# Load state dicts with strict=False
|
| 183 |
text_encoder1.load_state_dict(text_encoder1_state, strict=False)
|
| 184 |
text_encoder2.load_state_dict(text_encoder2_state, strict=False)
|
| 185 |
vae.load_state_dict(vae_state, strict=False)
|
| 186 |
unet.load_state_dict(unet_state, strict=False)
|
| 187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
return text_encoder1, text_encoder2, vae, unet
|
| 189 |
|
| 190 |
def convert_and_save_sdxl_to_diffusers(checkpoint_path_or_url, output_path, reference_model_path):
|
|
|
|
| 194 |
text_encoder1, text_encoder2, vae, unet = build_diffusers_model(
|
| 195 |
text_encoder1_state, text_encoder2_state, vae_state, unet_state, reference_model_path
|
| 196 |
)
|
| 197 |
+
# Load tokenizer and scheduler from the reference model
|
| 198 |
pipeline = StableDiffusionXLPipeline.from_pretrained(
|
| 199 |
reference_model_path,
|
| 200 |
text_encoder=text_encoder1,
|
|
|
|
| 203 |
unet=unet,
|
| 204 |
torch_dtype=torch.float16,
|
| 205 |
)
|
| 206 |
+
|
| 207 |
pipeline.save_pretrained(output_path)
|
| 208 |
print(f"Model saved as Diffusers format: {output_path}")
|
| 209 |
|
| 210 |
def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private):
|
| 211 |
"""Uploads a model to the Hugging Face Hub."""
|
| 212 |
+
print("---- upload_to_huggingface Called ----") # Debug Print
|
| 213 |
+
print(f" hf_token: {hf_token}") # Debug Print
|
| 214 |
+
print(f" orgs_name: {orgs_name}") # Debug Print
|
| 215 |
+
print(f" model_name: {model_name}") # Debug Print
|
| 216 |
api = HfApi()
|
| 217 |
# --- CRUCIAL: Log in with the token FIRST ---
|
| 218 |
login(token=hf_token, add_to_git_credential=True)
|
| 219 |
user = api.whoami() # Get the logged-in user *without* the token
|
| 220 |
+
print(f" Logged-in user: {user}") # Debug Print
|
| 221 |
|
| 222 |
model_repo = create_model_repo(api, user, orgs_name, model_name, make_private)
|
| 223 |
api.upload_folder(folder_path=model_path, repo_id=model_repo)
|