Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,7 +13,6 @@ mod.rgb_to_grayscale = F.rgb_to_grayscale
|
|
| 13 |
sys.modules["torchvision.transforms.functional_tensor"] = mod
|
| 14 |
# ---------------------------------------------------------------------------
|
| 15 |
|
| 16 |
-
|
| 17 |
import os, subprocess, cv2, torch, spaces, gradio as gr, numpy as np
|
| 18 |
from pathlib import Path
|
| 19 |
from PIL import Image
|
|
@@ -128,16 +127,16 @@ face_app = FaceAnalysis(name="buffalo_l", root=str(CACHE_ROOT), providers=provid
|
|
| 128 |
face_app.prepare(ctx_id=(0 if torch.cuda.is_available() else -1), det_size=(640, 640))
|
| 129 |
|
| 130 |
# ControlNet + SD γγ€γγ©γ€γ³
|
| 131 |
-
controlnet = ControlNetModel.from_pretrained(
|
| 132 |
-
"InstantX/InstantID", subfolder="ControlNetModel", torch_dtype=dtype
|
| 133 |
-
)
|
| 134 |
pipe = StableDiffusionPipeline.from_single_file(
|
| 135 |
BASE_CKPT, torch_dtype=dtype, safety_checker=None, use_safetensors=True, clip_skip=2
|
| 136 |
)
|
| 137 |
pipe.vae = AutoencoderKL.from_pretrained(
|
| 138 |
"stabilityai/sd-vae-ft-mse", torch_dtype=dtype
|
| 139 |
).to(device)
|
| 140 |
-
pipe.controlnet = controlnet
|
| 141 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
| 142 |
pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++"
|
| 143 |
)
|
|
@@ -185,12 +184,9 @@ except Exception as e:
|
|
| 185 |
# 4. γγγ³γγ & ηζι’ζ°
|
| 186 |
##############################################################################
|
| 187 |
BASE_PROMPT = (
|
| 188 |
-
"
|
| 189 |
-
"
|
| 190 |
-
"
|
| 191 |
-
"textured skin, high detail, shot on Canon EOS R5, 85 mm f/1.4, ISO 200,\n"
|
| 192 |
-
"<lora:ip-adapter-faceid-plusv2_sd15_lora:0.65>, (face),\n"
|
| 193 |
-
"(aesthetic:1.1), (cinematic:0.8)"
|
| 194 |
)
|
| 195 |
NEG_PROMPT = (
|
| 196 |
"ng_deepnegative_v1_75t, CyberRealistic_Negative-neg, UnrealisticDream, "
|
|
@@ -221,8 +217,8 @@ def generate(
|
|
| 221 |
prompt=prompt,
|
| 222 |
negative_prompt=neg,
|
| 223 |
ip_adapter_image=img_in,
|
| 224 |
-
image=img_in,
|
| 225 |
-
controlnet_conditioning_scale=0.9,
|
| 226 |
num_inference_steps=int(steps) + 5,
|
| 227 |
guidance_scale=cfg,
|
| 228 |
width=int(w),
|
|
|
|
| 13 |
sys.modules["torchvision.transforms.functional_tensor"] = mod
|
| 14 |
# ---------------------------------------------------------------------------
|
| 15 |
|
|
|
|
| 16 |
import os, subprocess, cv2, torch, spaces, gradio as gr, numpy as np
|
| 17 |
from pathlib import Path
|
| 18 |
from PIL import Image
|
|
|
|
| 127 |
face_app.prepare(ctx_id=(0 if torch.cuda.is_available() else -1), det_size=(640, 640))
|
| 128 |
|
| 129 |
# ControlNet + SD γγ€γγ©γ€γ³
|
| 130 |
+
#controlnet = ControlNetModel.from_pretrained(
|
| 131 |
+
# "InstantX/InstantID", subfolder="ControlNetModel", torch_dtype=dtype
|
| 132 |
+
#)
|
| 133 |
pipe = StableDiffusionPipeline.from_single_file(
|
| 134 |
BASE_CKPT, torch_dtype=dtype, safety_checker=None, use_safetensors=True, clip_skip=2
|
| 135 |
)
|
| 136 |
pipe.vae = AutoencoderKL.from_pretrained(
|
| 137 |
"stabilityai/sd-vae-ft-mse", torch_dtype=dtype
|
| 138 |
).to(device)
|
| 139 |
+
#pipe.controlnet = controlnet
|
| 140 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
| 141 |
pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++"
|
| 142 |
)
|
|
|
|
| 184 |
# 4. γγγ³γγ & ηζι’ζ°
|
| 185 |
##############################################################################
|
| 186 |
BASE_PROMPT = (
|
| 187 |
+
"masterpiece, ultra-realistic photo of {subject}, "
|
| 188 |
+
"cinematic lighting, shallow depth of field, textured skin, "
|
| 189 |
+
"Canon EOS R5 85 mm f/1.4, <lora:ip-adapter-faceid-plusv2_sd15_lora:0.65>"
|
|
|
|
|
|
|
|
|
|
| 190 |
)
|
| 191 |
NEG_PROMPT = (
|
| 192 |
"ng_deepnegative_v1_75t, CyberRealistic_Negative-neg, UnrealisticDream, "
|
|
|
|
| 217 |
prompt=prompt,
|
| 218 |
negative_prompt=neg,
|
| 219 |
ip_adapter_image=img_in,
|
| 220 |
+
#image=img_in,
|
| 221 |
+
#controlnet_conditioning_scale=0.9,
|
| 222 |
num_inference_steps=int(steps) + 5,
|
| 223 |
guidance_scale=cfg,
|
| 224 |
width=int(w),
|