Nad54 commited on
Commit
9cb49df
·
verified ·
1 Parent(s): 0da1dec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -60
app.py CHANGED
@@ -1,12 +1,12 @@
1
- # app.py — InstantID SDXL + (optionnel) IP-Adapter Style (2D total)
2
- # Hugging Face Space – prêt à déployer
3
 
4
- # 0) Environnement AVANT TOUT IMPORT
5
  import os, sys
6
- os.environ["OMP_NUM_THREADS"] = "4" # safe pour libgomp
7
  os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
8
 
9
- # rendre importable ./instantid ( se trouve pipeline_stable_diffusion_xl_instantid_full.py)
10
  sys.path.insert(0, os.path.abspath("./instantid"))
11
 
12
  # 1) Imports
@@ -16,22 +16,27 @@ from PIL import Image, ImageOps, ImageDraw
16
  from huggingface_hub import hf_hub_download
17
  from diffusers.models import ControlNetModel
18
 
 
 
 
19
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
20
  DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
21
 
22
- # 2) Chemins & Hub
23
- ASSETS_REPO = "InstantX/InstantID" # poids InstantID: IdentityNet + ip-adapter instantid
24
  CHECKPOINTS_DIR = "./checkpoints"
25
  CN_LOCAL_DIR = os.path.join(CHECKPOINTS_DIR, "ControlNetModel")
26
  IP_ADAPTER_LOCAL = os.path.join(CHECKPOINTS_DIR, "ip-adapter.bin")
27
 
28
- # IP-Adapter Style (SDXL) — pour forcer le rendu 2D
29
  IP_STYLE_REPO = "h94/IP-Adapter"
30
  IP_STYLE_SUBFOLDER = "sdxl_models"
31
  IP_STYLE_WEIGHT = "ip-adapter_sdxl.bin"
32
- IP_STYLE_LOCAL = os.path.join(CHECKPOINTS_DIR, "ip-adapter_sdxl.bin")
33
 
34
- # 3) Téléchargements sûrs (détecte fichiers vides)
 
 
 
 
35
  def safe_download(repo, filename, local_dir, min_bytes, label, subfolder=None):
36
  os.makedirs(local_dir, exist_ok=True)
37
  local_path = os.path.join(local_dir, os.path.basename(filename))
@@ -60,16 +65,11 @@ def ensure_assets_or_download():
60
  # IdentityNet (ControlNet) + ip-adapter (InstantID)
61
  safe_download(ASSETS_REPO, "ControlNetModel/config.json", CHECKPOINTS_DIR, 1_000, "IdentityNet config")
62
  safe_download(ASSETS_REPO, "ControlNetModel/diffusion_pytorch_model.safetensors", CHECKPOINTS_DIR, 100_000_000, "IdentityNet weights")
63
- safe_download(ASSETS_REPO, "ip-adapter.bin", CHECKPOINTS_DIR, 100_000_000, "IP-Adapter InstantID")
64
- # IP-Adapter Style (SDXL)
65
- p = safe_download(IP_STYLE_REPO, IP_STYLE_WEIGHT, CHECKPOINTS_DIR, 20_000_000, "IP-Adapter Style (SDXL)", subfolder=IP_STYLE_SUBFOLDER)
66
- try:
67
- if not os.path.exists(IP_STYLE_LOCAL):
68
- import shutil; shutil.copy2(p, IP_STYLE_LOCAL)
69
- except Exception as e:
70
- print(f"ℹ️ Copie locale IP-Adapter Style ignorée: {e}")
71
 
72
- # 4) Import dynamique de la pipeline InstantID SDXL (officielle)
73
  def import_pipeline_or_fail():
74
  candidates = [
75
  "./instantid/pipeline_stable_diffusion_xl_instantid_full.py",
@@ -77,14 +77,13 @@ def import_pipeline_or_fail():
77
  ]
78
  pipeline_file = next((p for p in candidates if os.path.exists(p)), None)
79
  if pipeline_file is None:
80
- raise RuntimeError("❌ Fichier pipeline manquant.\nPlace `pipeline_stable_diffusion_xl_instantid_full.py` dans ./instantid/")
81
  if os.path.getsize(pipeline_file) < 1024:
82
- raise RuntimeError("❌ Fichier pipeline trop petit (vide ?). Colle la version SDXL officielle.")
83
 
84
  spec = importlib.util.spec_from_file_location("instantid_pipeline", pipeline_file)
85
  mod = importlib.util.module_from_spec(spec)
86
  spec.loader.exec_module(mod)
87
- # Chercher la classe SDXL officielle
88
  for name, obj in vars(mod).items():
89
  if isinstance(obj, type) and "InstantID" in name and hasattr(obj, "from_pretrained"):
90
  print(f"✅ Pipeline trouvée : {name}")
@@ -92,7 +91,7 @@ def import_pipeline_or_fail():
92
  avail = [n for n, o in vars(mod).items() if isinstance(o, type)]
93
  raise RuntimeError("❌ Aucune classe pipeline InstantID trouvée. Classes dispo: " + ", ".join(avail))
94
 
95
- # 5) draw_kps local (remplace la dépendance draw_kps du repo)
96
  def draw_kps_local(img_pil, kps):
97
  w, h = img_pil.size
98
  out = Image.new("RGB", (w, h), "white")
@@ -102,30 +101,30 @@ def draw_kps_local(img_pil, kps):
102
  d.ellipse((x - r, y - r, x + r, y + r), fill="black")
103
  return out
104
 
105
- # 6) Chargement pipeline (ControlNet = objet unique)
106
  load_logs = []
107
  HAS_STYLE_ADAPTER = False
 
108
  try:
 
109
  SDXLInstantID = import_pipeline_or_fail()
110
  ensure_assets_or_download()
111
 
112
- BASE_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
113
- load_logs.append(f"Chargement base: {BASE_MODEL}")
114
-
115
  controlnet_identitynet = ControlNetModel.from_pretrained(CN_LOCAL_DIR, torch_dtype=DTYPE)
116
 
117
  pipe = SDXLInstantID.from_pretrained(
118
  BASE_MODEL,
119
- controlnet=controlnet_identitynet, # objet unique
120
  torch_dtype=DTYPE,
121
  safety_checker=None,
122
  feature_extractor=None,
123
  ).to(DEVICE)
124
 
125
- # Charger l’IP-Adapter InstantID (identité)
126
  pipe.load_ip_adapter_instantid(IP_ADAPTER_LOCAL)
127
 
128
- # Charger (optionnel) un IP-Adapter Style SDXL, nommé "style"
129
  try:
130
  pipe.load_ip_adapter(
131
  IP_STYLE_REPO,
@@ -137,8 +136,8 @@ try:
137
  HAS_STYLE_ADAPTER = True
138
  except Exception as e:
139
  load_logs.append(f"ℹ️ IP-Adapter Style non chargé: {e}")
140
- HAS_STYLE_ADAPTER = False
141
 
 
142
  if DEVICE == "cuda":
143
  if hasattr(pipe, "image_proj_model"): pipe.image_proj_model.to("cuda")
144
  if hasattr(pipe, "unet"): pipe.unet.to("cuda")
@@ -151,8 +150,7 @@ except Exception:
151
  if pipe is None:
152
  raise RuntimeError("Échec de chargement du pipeline.\n" + "\n".join(load_logs))
153
 
154
- # 7) InsightFace (robuste : antelopev2 buffalo_l)
155
- from insightface.app import FaceAnalysis
156
  def load_face_analyser():
157
  errors = []
158
  for name in ("antelopev2", "buffalo_l"):
@@ -168,7 +166,6 @@ def load_face_analyser():
168
 
169
  fa = load_face_analyser()
170
 
171
- # — util pour extraire embedding visage + landmarks (kps) depuis la photo
172
  def extract_face_embed_and_kps(pil_img):
173
  import numpy as np, cv2
174
  img_cv2 = cv2.cvtColor(np.array(pil_img.convert("RGB")), cv2.COLOR_RGB2BGR)
@@ -176,15 +173,11 @@ def extract_face_embed_and_kps(pil_img):
176
  if not faces:
177
  raise ValueError("Aucun visage détecté dans la photo.")
178
  face = faces[-1]
179
- face_emb = face["embedding"] # <— Embedding InsightFace attendu par la pipeline SDXL officielle
180
  kps_img = draw_kps_local(pil_img, face["kps"])
181
- # Convertir en torch tensor si besoin (la pipeline accepte souvent ndarray directement)
182
- if isinstance(face_emb, (list, tuple)):
183
- import numpy as np
184
- face_emb = np.array(face_emb)
185
  return face_emb, kps_img
186
 
187
- # 8) Génération (Option A : on passe l’embedding InsightFace -> image_embeds)
188
  def generate(face_image, style_image, prompt, negative_prompt,
189
  identity_strength, adapter_strength, style_strength,
190
  steps, cfg, width, height, seed):
@@ -194,15 +187,15 @@ def generate(face_image, style_image, prompt, negative_prompt,
194
 
195
  gen = None if seed is None or int(seed) < 0 else torch.Generator(device=DEVICE).manual_seed(int(seed))
196
 
197
- # Préparer visage carré (512) pour détection consistante
198
  face = ImageOps.exif_transpose(face_image).convert("RGB")
199
  ms = min(face.size); x = (face.width - ms) // 2; y = (face.height - ms) // 2
200
  face_sq = face.crop((x, y, x + ms, y + ms)).resize((512, 512), Image.Resampling.LANCZOS)
201
 
202
- # Embedding InsightFace + landmarks (kps)
203
  face_emb, kps_img = extract_face_embed_and_kps(face_sq)
204
 
205
- # Régler l’échelle des IP-Adapters (identité & style)
206
  try:
207
  if HAS_STYLE_ADAPTER and style_image is not None:
208
  pipe.set_ip_adapter_scale({"instantid": float(adapter_strength), "style": float(style_strength)})
@@ -211,7 +204,7 @@ def generate(face_image, style_image, prompt, negative_prompt,
211
  except Exception as e:
212
  print(f"ℹ️ set_ip_adapter_scale ignoré: {e}")
213
 
214
- # Compat multi-ControlNet
215
  cn = getattr(pipe, "controlnet", None)
216
  if isinstance(cn, (list, tuple)):
217
  n_cn = len(cn)
@@ -223,12 +216,12 @@ def generate(face_image, style_image, prompt, negative_prompt,
223
  scale_val = float(identity_strength)
224
  scale_arg = [scale_val] * n_cn if n_cn > 1 else ([scale_val] if isinstance(cn, (list, tuple)) else scale_val)
225
 
226
- # Préparer kwargs (NOTE: image_embeds = face_emb)
227
  gen_kwargs = dict(
228
- prompt=prompt.strip(),
229
  negative_prompt=(negative_prompt or "").strip(),
230
- image=image_arg, # IdentityNet (landmarks)
231
- image_embeds=face_emb, # <— embedding InsightFace
232
  controlnet_conditioning_scale=scale_arg,
233
  num_inference_steps=int(steps),
234
  guidance_scale=float(cfg),
@@ -237,7 +230,7 @@ def generate(face_image, style_image, prompt, negative_prompt,
237
  generator=gen,
238
  )
239
 
240
- # Fournir l’image de style à l’IP-Adapter Style si dispo
241
  if HAS_STYLE_ADAPTER and style_image is not None:
242
  try:
243
  gen_kwargs["ip_adapter_image"] = ImageOps.exif_transpose(style_image).convert("RGB")
@@ -252,31 +245,31 @@ def generate(face_image, style_image, prompt, negative_prompt,
252
  except Exception:
253
  return None, "Erreur:\n" + traceback.format_exc(), "\n".join(load_logs)
254
 
255
- # 9) UI
256
  EX_PROMPT = (
257
- "one piece style, Eiichiro Oda style, anime portrait, upper body, pirate outfit, straw hat, "
258
- "clean lineart, cel shading, vibrant colors, expressive eyes, symmetrical face, looking at camera, "
259
- "dynamic composition, simple background"
260
  )
261
  EX_NEG = (
262
  "realistic, photo, photorealistic, skin pores, complex lighting, "
263
  "low quality, worst quality, lowres, blurry, noisy, watermark, text, logo, jpeg artifacts, "
264
- "bad anatomy, distorted eyes, deformed, multiple faces, nsfw"
265
  )
266
 
267
  with gr.Blocks(css="footer{display:none !important}") as demo:
268
- gr.Markdown("# 🏴‍☠️ One Piece – InstantID SDXL + IP-Adapter Style (2D total) — Option A officielle")
269
 
270
  with gr.Row():
271
  with gr.Column():
272
  face_image = gr.Image(type="pil", label="Photo visage (obligatoire)", height=260)
273
- style_image = gr.Image(type="pil", label="Image de style (IP-Adapter) — optionnel", height=260)
274
- prompt = gr.Textbox(label="Prompt", value=EX_PROMPT, lines=3)
275
- negative= gr.Textbox(label="Negative Prompt", value=EX_NEG, lines=3)
 
276
 
277
  with gr.Row():
278
  identity_strength = gr.Slider(0.2, 1.5, 0.95, 0.05, label="Fidélité visage (IdentityNet)")
279
- adapter_strength = gr.Slider(0.1, 1.5, 0.85, 0.05, label="Détails anime (InstantID IP-Adapter)")
280
 
281
  style_strength = gr.Slider(0.1, 1.5, 0.95, 0.05, label="Force style (IP-Adapter Style)")
282
 
@@ -304,6 +297,6 @@ with gr.Blocks(css="footer{display:none !important}") as demo:
304
  outputs=[out_image, err_box, log_box],
305
  )
306
 
307
- demo.queue()
308
  if __name__ == "__main__":
309
  demo.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)
 
1
+ # app.py — InstantID SDXL (officiel) + IP-Adapter Style (optionnel, rendu 2D)
2
+ # Hugging Face Space ready
3
 
4
+ # 0) Environnement AVANT imports
5
  import os, sys
6
+ os.environ["OMP_NUM_THREADS"] = "4"
7
  os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1")
8
 
9
+ # rendre importable ./instantid (pipeline officielle à placer ici)
10
  sys.path.insert(0, os.path.abspath("./instantid"))
11
 
12
  # 1) Imports
 
16
  from huggingface_hub import hf_hub_download
17
  from diffusers.models import ControlNetModel
18
 
19
+ # InsightFace
20
+ from insightface.app import FaceAnalysis
21
+
22
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
23
  DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
24
 
25
+ # 2) Chemins & Hub (poids InstantID officiels + IP-Adapter Style SDXL)
26
+ ASSETS_REPO = "InstantX/InstantID"
27
  CHECKPOINTS_DIR = "./checkpoints"
28
  CN_LOCAL_DIR = os.path.join(CHECKPOINTS_DIR, "ControlNetModel")
29
  IP_ADAPTER_LOCAL = os.path.join(CHECKPOINTS_DIR, "ip-adapter.bin")
30
 
 
31
  IP_STYLE_REPO = "h94/IP-Adapter"
32
  IP_STYLE_SUBFOLDER = "sdxl_models"
33
  IP_STYLE_WEIGHT = "ip-adapter_sdxl.bin"
 
34
 
35
+ # Modèle de base : SDXL (tu peux remplacer par un checkpoint stylé anime/one-piece-like)
36
+ BASE_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
37
+ # Exemple alternatif (anime) : "wangqixun/YamerMIX_v8"
38
+
39
+ # 3) Téléchargements sûrs
40
  def safe_download(repo, filename, local_dir, min_bytes, label, subfolder=None):
41
  os.makedirs(local_dir, exist_ok=True)
42
  local_path = os.path.join(local_dir, os.path.basename(filename))
 
65
  # IdentityNet (ControlNet) + ip-adapter (InstantID)
66
  safe_download(ASSETS_REPO, "ControlNetModel/config.json", CHECKPOINTS_DIR, 1_000, "IdentityNet config")
67
  safe_download(ASSETS_REPO, "ControlNetModel/diffusion_pytorch_model.safetensors", CHECKPOINTS_DIR, 100_000_000, "IdentityNet weights")
68
+ safe_download(ASSETS_REPO, "ip-adapter.bin", CHECKPOINTS_DIR, 100_000_000, "IP-Adapter (InstantID)")
69
+ # IP-Adapter Style (SDXL) — optionnel
70
+ safe_download(IP_STYLE_REPO, IP_STYLE_WEIGHT, CHECKPOINTS_DIR, 20_000_000, "IP-Adapter Style (SDXL)", subfolder=IP_STYLE_SUBFOLDER)
 
 
 
 
 
71
 
72
+ # 4) Import dynamique de la pipeline SDXL officielle
73
  def import_pipeline_or_fail():
74
  candidates = [
75
  "./instantid/pipeline_stable_diffusion_xl_instantid_full.py",
 
77
  ]
78
  pipeline_file = next((p for p in candidates if os.path.exists(p)), None)
79
  if pipeline_file is None:
80
+ raise RuntimeError("❌ Pipeline manquante. Place `pipeline_stable_diffusion_xl_instantid_full.py` dans ./instantid/")
81
  if os.path.getsize(pipeline_file) < 1024:
82
+ raise RuntimeError("❌ Pipeline trop petite (vide ?). Utilise la version SDXL officielle.")
83
 
84
  spec = importlib.util.spec_from_file_location("instantid_pipeline", pipeline_file)
85
  mod = importlib.util.module_from_spec(spec)
86
  spec.loader.exec_module(mod)
 
87
  for name, obj in vars(mod).items():
88
  if isinstance(obj, type) and "InstantID" in name and hasattr(obj, "from_pretrained"):
89
  print(f"✅ Pipeline trouvée : {name}")
 
91
  avail = [n for n, o in vars(mod).items() if isinstance(o, type)]
92
  raise RuntimeError("❌ Aucune classe pipeline InstantID trouvée. Classes dispo: " + ", ".join(avail))
93
 
94
+ # 5) util dessin landmarks (kps)
95
  def draw_kps_local(img_pil, kps):
96
  w, h = img_pil.size
97
  out = Image.new("RGB", (w, h), "white")
 
101
  d.ellipse((x - r, y - r, x + r, y + r), fill="black")
102
  return out
103
 
104
+ # 6) Chargement pipeline
105
  load_logs = []
106
  HAS_STYLE_ADAPTER = False
107
+
108
  try:
109
+ # a) pipeline
110
  SDXLInstantID = import_pipeline_or_fail()
111
  ensure_assets_or_download()
112
 
113
+ # b) IdentityNet (ControlNet)
 
 
114
  controlnet_identitynet = ControlNetModel.from_pretrained(CN_LOCAL_DIR, torch_dtype=DTYPE)
115
 
116
  pipe = SDXLInstantID.from_pretrained(
117
  BASE_MODEL,
118
+ controlnet=controlnet_identitynet, # objet unique
119
  torch_dtype=DTYPE,
120
  safety_checker=None,
121
  feature_extractor=None,
122
  ).to(DEVICE)
123
 
124
+ # c) IP-Adapter InstantID (identité)
125
  pipe.load_ip_adapter_instantid(IP_ADAPTER_LOCAL)
126
 
127
+ # d) IP-Adapter Style SDXL (optionnel), nommé "style"
128
  try:
129
  pipe.load_ip_adapter(
130
  IP_STYLE_REPO,
 
136
  HAS_STYLE_ADAPTER = True
137
  except Exception as e:
138
  load_logs.append(f"ℹ️ IP-Adapter Style non chargé: {e}")
 
139
 
140
+ # e) devices
141
  if DEVICE == "cuda":
142
  if hasattr(pipe, "image_proj_model"): pipe.image_proj_model.to("cuda")
143
  if hasattr(pipe, "unet"): pipe.unet.to("cuda")
 
150
  if pipe is None:
151
  raise RuntimeError("Échec de chargement du pipeline.\n" + "\n".join(load_logs))
152
 
153
+ # 7) InsightFace (comme le Space officiel : CPUExecutionProvider)
 
154
  def load_face_analyser():
155
  errors = []
156
  for name in ("antelopev2", "buffalo_l"):
 
166
 
167
  fa = load_face_analyser()
168
 
 
169
  def extract_face_embed_and_kps(pil_img):
170
  import numpy as np, cv2
171
  img_cv2 = cv2.cvtColor(np.array(pil_img.convert("RGB")), cv2.COLOR_RGB2BGR)
 
173
  if not faces:
174
  raise ValueError("Aucun visage détecté dans la photo.")
175
  face = faces[-1]
176
+ face_emb = face["embedding"] # attendu par la pipeline officielle
177
  kps_img = draw_kps_local(pil_img, face["kps"])
 
 
 
 
178
  return face_emb, kps_img
179
 
180
+ # 8) Génération
181
  def generate(face_image, style_image, prompt, negative_prompt,
182
  identity_strength, adapter_strength, style_strength,
183
  steps, cfg, width, height, seed):
 
187
 
188
  gen = None if seed is None or int(seed) < 0 else torch.Generator(device=DEVICE).manual_seed(int(seed))
189
 
190
+ # visage carré 512 pour détection stable
191
  face = ImageOps.exif_transpose(face_image).convert("RGB")
192
  ms = min(face.size); x = (face.width - ms) // 2; y = (face.height - ms) // 2
193
  face_sq = face.crop((x, y, x + ms, y + ms)).resize((512, 512), Image.Resampling.LANCZOS)
194
 
195
+ # InsightFace : embedding + landmarks
196
  face_emb, kps_img = extract_face_embed_and_kps(face_sq)
197
 
198
+ # IP-Adapter scales
199
  try:
200
  if HAS_STYLE_ADAPTER and style_image is not None:
201
  pipe.set_ip_adapter_scale({"instantid": float(adapter_strength), "style": float(style_strength)})
 
204
  except Exception as e:
205
  print(f"ℹ️ set_ip_adapter_scale ignoré: {e}")
206
 
207
+ # compat multi-ControlNet (même si on en a qu’un)
208
  cn = getattr(pipe, "controlnet", None)
209
  if isinstance(cn, (list, tuple)):
210
  n_cn = len(cn)
 
216
  scale_val = float(identity_strength)
217
  scale_arg = [scale_val] * n_cn if n_cn > 1 else ([scale_val] if isinstance(cn, (list, tuple)) else scale_val)
218
 
219
+ # kwargs d’inférence
220
  gen_kwargs = dict(
221
+ prompt=(prompt or "").strip(),
222
  negative_prompt=(negative_prompt or "").strip(),
223
+ image=image_arg, # IdentityNet (landmarks)
224
+ image_embeds=face_emb, # embedding InsightFace
225
  controlnet_conditioning_scale=scale_arg,
226
  num_inference_steps=int(steps),
227
  guidance_scale=float(cfg),
 
230
  generator=gen,
231
  )
232
 
233
+ # passer l’image de style à l’IP-Adapter Style (si dispo + fournie)
234
  if HAS_STYLE_ADAPTER and style_image is not None:
235
  try:
236
  gen_kwargs["ip_adapter_image"] = ImageOps.exif_transpose(style_image).convert("RGB")
 
245
  except Exception:
246
  return None, "Erreur:\n" + traceback.format_exc(), "\n".join(load_logs)
247
 
248
+ # 9) UI (One Piece-friendly par défaut, mais neutre)
249
  EX_PROMPT = (
250
+ "one piece style, Eiichiro Oda style, anime portrait, upper body, pirate outfit, "
251
+ "clean lineart, cel shading, vibrant colors, expressive eyes, dynamic composition, simple background"
 
252
  )
253
  EX_NEG = (
254
  "realistic, photo, photorealistic, skin pores, complex lighting, "
255
  "low quality, worst quality, lowres, blurry, noisy, watermark, text, logo, jpeg artifacts, "
256
+ "bad anatomy, deformed, multiple faces, nsfw"
257
  )
258
 
259
  with gr.Blocks(css="footer{display:none !important}") as demo:
260
+ gr.Markdown("# 🏴‍☠️ InstantID SDXL + IP-Adapter Style (2D) — visage perso One Piece")
261
 
262
  with gr.Row():
263
  with gr.Column():
264
  face_image = gr.Image(type="pil", label="Photo visage (obligatoire)", height=260)
265
+ style_image = gr.Image(type="pil", label="Image de style (optionnel)", height=260,
266
+ info="Poster/planche One Piece, ou visuel manga servant de style global")
267
+ prompt = gr.Textbox(label="Prompt", value=EX_PROMPT, lines=3)
268
+ negative = gr.Textbox(label="Negative Prompt", value=EX_NEG, lines=3)
269
 
270
  with gr.Row():
271
  identity_strength = gr.Slider(0.2, 1.5, 0.95, 0.05, label="Fidélité visage (IdentityNet)")
272
+ adapter_strength = gr.Slider(0.1, 1.5, 0.85, 0.05, label="Détails anime (InstantID)")
273
 
274
  style_strength = gr.Slider(0.1, 1.5, 0.95, 0.05, label="Force style (IP-Adapter Style)")
275
 
 
297
  outputs=[out_image, err_box, log_box],
298
  )
299
 
300
+ demo.queue(api_open=False)
301
  if __name__ == "__main__":
302
  demo.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)