primerz commited on
Commit
f786a41
·
verified ·
1 Parent(s): 0fdeccf

Upload 6 files

Browse files
Files changed (4) hide show
  1. app.py +86 -93
  2. config.py +3 -19
  3. generator.py +238 -814
  4. models.py +101 -352
app.py CHANGED
@@ -1,15 +1,14 @@
1
  """
2
- Pixagram AI Pixel Art Generator - Gradio Interface (FIXED)
3
  """
4
  import spaces
5
  import gradio as gr
6
  import os
7
- import gc
8
- import torch
9
 
10
- from config import PRESETS, DEFAULT_PARAMS, TRIGGER_WORD, LORA_CHOICES
11
  from generator import RetroArtConverter
12
 
 
13
  # Initialize converter
14
  print("Initializing RetroArt Converter...")
15
  converter = RetroArtConverter()
@@ -32,7 +31,7 @@ def apply_preset(preset_name):
32
  )
33
 
34
 
35
- @spaces.GPU(duration=60)
36
  def process_image(
37
  image,
38
  prompt,
@@ -41,7 +40,6 @@ def process_image(
41
  guidance_scale,
42
  depth_control_scale,
43
  identity_control_scale,
44
- lora_choice,
45
  lora_scale,
46
  identity_preservation,
47
  strength,
@@ -55,11 +53,6 @@ def process_image(
55
  return None, None
56
 
57
  try:
58
- # ADDED: Clear GPU cache before generation
59
- if torch.cuda.is_available():
60
- torch.cuda.empty_cache()
61
- gc.collect()
62
-
63
  # Generate retro art
64
  result = converter.generate_retro_art(
65
  input_image=image,
@@ -69,7 +62,6 @@ def process_image(
69
  guidance_scale=guidance_scale,
70
  depth_control_scale=depth_control_scale,
71
  identity_control_scale=identity_control_scale,
72
- lora_choice=lora_choice,
73
  lora_scale=lora_scale,
74
  identity_preservation=identity_preservation,
75
  strength=strength,
@@ -97,31 +89,12 @@ def process_image(
97
 
98
  caption_text = "\n".join(captions) if captions else None
99
 
100
- # ADDED: Clear cache after generation
101
- if torch.cuda.is_available():
102
- torch.cuda.empty_cache()
103
- gc.collect()
104
-
105
  return result, caption_text
106
 
107
- except torch.cuda.OutOfMemoryError as e:
108
- # ADDED: Better OOM error handling
109
- print(f"[ERROR] GPU Out of Memory: {e}")
110
- if torch.cuda.is_available():
111
- torch.cuda.empty_cache()
112
- gc.collect()
113
- raise gr.Error("GPU ran out of memory. Try: 1) Using a smaller image, 2) Reducing inference steps, or 3) Waiting and trying again.")
114
-
115
  except Exception as e:
116
  print(f"Error: {e}")
117
  import traceback
118
  traceback.print_exc()
119
-
120
- # ADDED: Cleanup on error
121
- if torch.cuda.is_available():
122
- torch.cuda.empty_cache()
123
- gc.collect()
124
-
125
  raise gr.Error(f"Generation failed: {str(e)}")
126
 
127
 
@@ -131,29 +104,10 @@ def get_model_status():
131
  if converter.models_loaded:
132
  status_text = "**[OK] Loaded Models:**\n"
133
  status_text += f"- Custom Checkpoint (Horizon): {'[OK] Loaded' if converter.models_loaded['custom_checkpoint'] else '[OK] Using SDXL base'}\n"
134
-
135
- # Updated LORA status
136
- lora_status = 'Disabled'
137
- if converter.models_loaded['lora']:
138
- loaded_count = sum(1 for loaded in converter.loaded_loras.values() if loaded)
139
- if loaded_count > 0:
140
- lora_status = f"[OK] Loaded {loaded_count}/3"
141
- else:
142
- lora_status = "[ERROR] All failed"
143
- status_text += f"- LORAs (Retro, VGA, ...): {lora_status}\n"
144
-
145
- status_text += f"- InstantID: {'[OK] Loaded' if converter.models_loaded['instantid'] else ' Disabled'}\n"
146
-
147
- # Show depth detector type
148
- depth_type = converter.models_loaded.get('depth_type', 'unknown')
149
- depth_loaded = converter.models_loaded.get('depth_detector', False)
150
- if depth_loaded and depth_type:
151
- status_text += f"- Depth Detector: [OK] {depth_type.upper()} Loaded\n"
152
- else:
153
- status_text += f"- Depth Detector: Fallback (grayscale)\n"
154
-
155
- status_text += f"- MediapipeFace: {'[OK] Loaded' if converter.models_loaded.get('mediapipe_face', False) else ' Disabled'}\n"
156
- status_text += f"- IP-Adapter (Face Embeddings): {'[OK] Loaded' if converter.models_loaded.get('ip_adapter', False) else ' Keypoints only'}\n"
157
  return status_text
158
  return "**Model status unavailable**"
159
 
@@ -218,20 +172,18 @@ with gr.Blocks(title="Pixagram - AI Pixel Art Generator", theme=gr.themes.Soft()
218
  # Scheduler info
219
  scheduler_info = f"""
220
  **[CONFIG] Advanced Configuration:**
221
- - Pipeline: **Img2Img** (structure preservation)
222
- - Face System: **CLIP + InsightFace + MediapipeFace** (triple detection)
223
- - **Depth Detection:** Hierarchical (Leres Zoe Midas) - best available automatically selected
224
- - **[ADVANCED] Enhanced Resampler:** 10 layers, 20 heads (+3-5% quality)
225
- - **[ADVANCED] Adaptive Attention:** Context-aware scaling (+2-3% quality)
226
- - **[ADVANCED] Multi-Scale Processing:** 3-scale face analysis (+1-2% quality)
227
  - **[ADVANCED] Adaptive Parameters:** Auto-adjust for face quality (+2-3% consistency)
228
  - **[ADVANCED] Face-Aware Color Matching:** LAB space with saturation preservation (+1-2% quality)
229
  - Scheduler: **LCM** (12 steps, fast generation)
230
  - Recommended CFG: **1.15-1.5** (optimized for LCM)
231
- - Identity Boost: **1.15x** (for maximum face fidelity)
232
  - CLIP Skip: **2** (enhanced style control)
233
  - LORA Trigger: `{TRIGGER_WORD}` (auto-added)
234
- - **Total Improvement:** +10-15% over base = **96-99% face similarity**
235
  """
236
  gr.Markdown(scheduler_info)
237
 
@@ -241,18 +193,18 @@ with gr.Blocks(title="Pixagram - AI Pixel Art Generator", theme=gr.themes.Soft()
241
 
242
  prompt = gr.Textbox(
243
  label="Prompt (trigger word auto-added)",
244
- value="",
245
  lines=3,
246
  info=f"'{TRIGGER_WORD}' will be automatically added"
247
  )
248
 
249
  negative_prompt = gr.Textbox(
250
  label="Negative Prompt",
251
- value="",
252
  lines=2
253
  )
254
 
255
- with gr.Accordion(f" LCM Settings", open=True):
256
  # Preset selector
257
  with gr.Row():
258
  gr.Markdown("### Quick Presets (Click to apply)")
@@ -279,7 +231,7 @@ with gr.Blocks(title="Pixagram - AI Pixel Art Generator", theme=gr.themes.Soft()
279
  maximum=50,
280
  value=DEFAULT_PARAMS['num_inference_steps'],
281
  step=1,
282
- label=f" Inference Steps (LCM optimized for 12)"
283
  )
284
 
285
  with gr.Row():
@@ -310,22 +262,15 @@ with gr.Blocks(title="Pixagram - AI Pixel Art Generator", theme=gr.themes.Soft()
310
  label="Depth ControlNet Scale"
311
  )
312
 
313
- lora_choice = gr.Dropdown(
314
- label="LORA Style",
315
- choices=LORA_CHOICES,
316
- value=DEFAULT_PARAMS['lora_choice'],
317
- )
318
-
319
- with gr.Row():
320
  lora_scale = gr.Slider(
321
- minimum=0.0,
322
  maximum=2.0,
323
  value=DEFAULT_PARAMS['lora_scale'],
324
  step=0.05,
325
- label="LORA Scale\nIntensity for selected style"
326
  )
327
 
328
- with gr.Accordion("🎭 InstantID Settings (for portraits)", open=True):
329
  identity_control_scale = gr.Slider(
330
  minimum=0.3,
331
  maximum=1.5,
@@ -394,58 +339,105 @@ with gr.Blocks(title="Pixagram - AI Pixel Art Generator", theme=gr.themes.Soft()
394
  - With optimizations: 96-99% face similarity
395
  - Ultra Fidelity preset: 97-99%+ face similarity
396
 
397
- **[GPU] ZeroGPU Info:**
398
- - Timeout: 120 seconds per generation
399
- - First generation may take longer (model loading)
400
- - Use smaller images (< 2MP) for faster processing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  """)
402
 
403
- all_sliders = [strength, guidance_scale, identity_preservation, lora_scale,
404
- depth_control_scale, identity_control_scale,
405
- preset_status]
406
-
407
  preset_btn_1.click(
408
  fn=lambda: apply_preset("Ultra Fidelity"),
409
  inputs=[],
410
- outputs=all_sliders
 
411
  )
412
 
413
  preset_btn_2.click(
414
  fn=lambda: apply_preset("Premium Portrait"),
415
  inputs=[],
416
- outputs=all_sliders
 
417
  )
418
 
419
  preset_btn_3.click(
420
  fn=lambda: apply_preset("Balanced Portrait"),
421
  inputs=[],
422
- outputs=all_sliders
 
423
  )
424
 
425
  preset_btn_4.click(
426
  fn=lambda: apply_preset("Artistic Excellence"),
427
  inputs=[],
428
- outputs=all_sliders
 
429
  )
430
 
431
  preset_btn_5.click(
432
  fn=lambda: apply_preset("Style Focus"),
433
  inputs=[],
434
- outputs=all_sliders
 
435
  )
436
 
437
  preset_btn_6.click(
438
  fn=lambda: apply_preset("Subtle Enhancement"),
439
  inputs=[],
440
- outputs=all_sliders
 
441
  )
442
 
443
  generate_btn.click(
444
  fn=process_image,
445
  inputs=[
446
  input_image, prompt, negative_prompt, steps, guidance_scale,
447
- depth_control_scale, identity_control_scale,
448
- lora_choice, lora_scale, identity_preservation, strength, enable_color_matching,
449
  consistency_mode, seed_input, enable_captions
450
  ],
451
  outputs=[output_image, caption_output]
@@ -457,5 +449,6 @@ if __name__ == "__main__":
457
  demo.launch(
458
  server_name="0.0.0.0",
459
  server_port=7860,
 
460
  show_api=True
461
- )
 
1
  """
2
+ Pixagram AI Pixel Art Generator - Gradio Interface
3
  """
4
  import spaces
5
  import gradio as gr
6
  import os
 
 
7
 
8
+ from config import PRESETS, DEFAULT_PARAMS, TRIGGER_WORD
9
  from generator import RetroArtConverter
10
 
11
+
12
  # Initialize converter
13
  print("Initializing RetroArt Converter...")
14
  converter = RetroArtConverter()
 
31
  )
32
 
33
 
34
+ @spaces.GPU(duration=35)
35
  def process_image(
36
  image,
37
  prompt,
 
40
  guidance_scale,
41
  depth_control_scale,
42
  identity_control_scale,
 
43
  lora_scale,
44
  identity_preservation,
45
  strength,
 
53
  return None, None
54
 
55
  try:
 
 
 
 
 
56
  # Generate retro art
57
  result = converter.generate_retro_art(
58
  input_image=image,
 
62
  guidance_scale=guidance_scale,
63
  depth_control_scale=depth_control_scale,
64
  identity_control_scale=identity_control_scale,
 
65
  lora_scale=lora_scale,
66
  identity_preservation=identity_preservation,
67
  strength=strength,
 
89
 
90
  caption_text = "\n".join(captions) if captions else None
91
 
 
 
 
 
 
92
  return result, caption_text
93
 
 
 
 
 
 
 
 
 
94
  except Exception as e:
95
  print(f"Error: {e}")
96
  import traceback
97
  traceback.print_exc()
 
 
 
 
 
 
98
  raise gr.Error(f"Generation failed: {str(e)}")
99
 
100
 
 
104
  if converter.models_loaded:
105
  status_text = "**[OK] Loaded Models:**\n"
106
  status_text += f"- Custom Checkpoint (Horizon): {'[OK] Loaded' if converter.models_loaded['custom_checkpoint'] else '[OK] Using SDXL base'}\n"
107
+ status_text += f"- LORA (RetroArt): {'[OK] Loaded' if converter.models_loaded['lora'] else ' Disabled'}\n"
108
+ status_text += f"- InstantID Pipeline: {'[OK] Loaded with Face + Depth' if converter.models_loaded['instantid'] else ' Disabled'}\n"
109
+ status_text += f"- Zoe Depth: {'[OK] Loaded' if converter.models_loaded['zoe_depth'] else ' Fallback'}\n"
110
+ status_text += "- IP-Adapter: [OK] Built into InstantID pipeline\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  return status_text
112
  return "**Model status unavailable**"
113
 
 
172
  # Scheduler info
173
  scheduler_info = f"""
174
  **[CONFIG] Advanced Configuration:**
175
+ - Pipeline: **InstantID Img2Img** (native face preservation)
176
+ - Face System: **InstantID + InsightFace** (512D embeddings → 16×2048D)
177
+ - **[INSTANTID] Built-in Resampler:** 4 layers, 20 heads (official architecture)
178
+ - **[INSTANTID] IP-Adapter:** Native attention processors
179
+ - **[INSTANTID] Dual ControlNets:** Face keypoints + Depth
 
180
  - **[ADVANCED] Adaptive Parameters:** Auto-adjust for face quality (+2-3% consistency)
181
  - **[ADVANCED] Face-Aware Color Matching:** LAB space with saturation preservation (+1-2% quality)
182
  - Scheduler: **LCM** (12 steps, fast generation)
183
  - Recommended CFG: **1.15-1.5** (optimized for LCM)
 
184
  - CLIP Skip: **2** (enhanced style control)
185
  - LORA Trigger: `{TRIGGER_WORD}` (auto-added)
186
+ - **Expected Quality:** 95-98% face similarity with InstantID
187
  """
188
  gr.Markdown(scheduler_info)
189
 
 
193
 
194
  prompt = gr.Textbox(
195
  label="Prompt (trigger word auto-added)",
196
+ value=" ",
197
  lines=3,
198
  info=f"'{TRIGGER_WORD}' will be automatically added"
199
  )
200
 
201
  negative_prompt = gr.Textbox(
202
  label="Negative Prompt",
203
+ value=" ",
204
  lines=2
205
  )
206
 
207
+ with gr.Accordion(f" LCM Settings", open=True):
208
  # Preset selector
209
  with gr.Row():
210
  gr.Markdown("### Quick Presets (Click to apply)")
 
231
  maximum=50,
232
  value=DEFAULT_PARAMS['num_inference_steps'],
233
  step=1,
234
+ label=f" Inference Steps (LCM optimized for 12)"
235
  )
236
 
237
  with gr.Row():
 
262
  label="Depth ControlNet Scale"
263
  )
264
 
 
 
 
 
 
 
 
265
  lora_scale = gr.Slider(
266
+ minimum=0.5,
267
  maximum=2.0,
268
  value=DEFAULT_PARAMS['lora_scale'],
269
  step=0.05,
270
+ label="RetroArt LORA Scale\nLower = more realistic"
271
  )
272
 
273
+ with gr.Accordion(" InstantID Settings (for portraits)", open=True):
274
  identity_control_scale = gr.Slider(
275
  minimum=0.3,
276
  maximum=1.5,
 
339
  - With optimizations: 96-99% face similarity
340
  - Ultra Fidelity preset: 97-99%+ face similarity
341
 
342
+ **[PRESETS] Optimized Preset Guide:**
343
+ - **Ultra Fidelity:** 96-98% similarity, minimal transformation
344
+ - **Premium Portrait:** 94-96% similarity, excellent balance (recommended)
345
+ - **Balanced Portrait:** 90-93% similarity, good balance
346
+ - **Artistic Excellence:** 88-91% similarity, creative with likeness
347
+ - **Style Focus:** 83-87% similarity, maximum pixel art
348
+ - **Subtle Enhancement:** 97-99% similarity, photo-realistic
349
+
350
+ **[ADAPTIVE] Automatic Adjustments:**
351
+ - Small faces (< 50K px): Boosts identity preservation to 1.8
352
+ - Low confidence (< 80%): Increases identity control to 0.9
353
+ - Profile views (> 20° yaw): Enhances preservation to 1.7
354
+ - Good quality faces: Uses your selected parameters
355
+
356
+ **[PARAMETERS] Parameter Relationships:**
357
+ - **Strength** (most important): Controls transformation intensity
358
+ - `0.38-0.45`: Maximum fidelity (Ultra/Subtle presets)
359
+ - `0.48-0.55`: Balanced quality (Premium/Balanced presets)
360
+ - `0.58-0.68`: Artistic freedom (Artistic/Style presets)
361
+ - **Identity Preservation**: Face embedding strength (auto-boosted 1.15x)
362
+ - **Guidance Scale (CFG)**: LCM-optimized range 1.1-1.5
363
+ - **LORA Scale**: Pixel art intensity (inverse to identity)
364
+
365
+ **[CONSISTENCY] Consistency Mode Benefits:**
366
+ - Validates parameter combinations for predictability
367
+ - Prevents identity-LORA conflicts
368
+ - Keeps CFG in optimal LCM range
369
+ - Balances ControlNet scales
370
+ - Recommended: Always ON
371
+
372
+ **[SEED] Reproducibility:**
373
+ - **-1:** Random, explore variations
374
+ - **Fixed (e.g., 42):** Identical results for testing
375
+
376
+ **[WORKFLOW] Recommended Workflow:**
377
+ 1. Upload high-res portrait (face > 30% of frame)
378
+ 2. Select preset (start with Premium Portrait)
379
+ 3. Enable Consistency Mode (ON by default)
380
+ 4. First generation: See quality level
381
+ 5. If adjusting: Change ONE parameter at a time
382
+ 6. Fix seed for consistent testing
383
+
384
+ **[TECHNICAL] System Details:**
385
+ - Enhanced Resampler: 10 layers, 20 heads, 1280 dim
386
+ - Attention: Adaptive per-layer scaling
387
+ - Face Processing: Multi-scale (0.75x, 1x, 1.25x)
388
+ - Color Matching: LAB space, face-aware masking
389
+ - Resolution: Auto-optimized to 896x1152 or 832x1216
390
  """)
391
 
392
+ # Preset button click events
 
 
 
393
  preset_btn_1.click(
394
  fn=lambda: apply_preset("Ultra Fidelity"),
395
  inputs=[],
396
+ outputs=[strength, guidance_scale, identity_preservation, lora_scale,
397
+ depth_control_scale, identity_control_scale, preset_status]
398
  )
399
 
400
  preset_btn_2.click(
401
  fn=lambda: apply_preset("Premium Portrait"),
402
  inputs=[],
403
+ outputs=[strength, guidance_scale, identity_preservation, lora_scale,
404
+ depth_control_scale, identity_control_scale, preset_status]
405
  )
406
 
407
  preset_btn_3.click(
408
  fn=lambda: apply_preset("Balanced Portrait"),
409
  inputs=[],
410
+ outputs=[strength, guidance_scale, identity_preservation, lora_scale,
411
+ depth_control_scale, identity_control_scale, preset_status]
412
  )
413
 
414
  preset_btn_4.click(
415
  fn=lambda: apply_preset("Artistic Excellence"),
416
  inputs=[],
417
+ outputs=[strength, guidance_scale, identity_preservation, lora_scale,
418
+ depth_control_scale, identity_control_scale, preset_status]
419
  )
420
 
421
  preset_btn_5.click(
422
  fn=lambda: apply_preset("Style Focus"),
423
  inputs=[],
424
+ outputs=[strength, guidance_scale, identity_preservation, lora_scale,
425
+ depth_control_scale, identity_control_scale, preset_status]
426
  )
427
 
428
  preset_btn_6.click(
429
  fn=lambda: apply_preset("Subtle Enhancement"),
430
  inputs=[],
431
+ outputs=[strength, guidance_scale, identity_preservation, lora_scale,
432
+ depth_control_scale, identity_control_scale, preset_status]
433
  )
434
 
435
  generate_btn.click(
436
  fn=process_image,
437
  inputs=[
438
  input_image, prompt, negative_prompt, steps, guidance_scale,
439
+ depth_control_scale, identity_control_scale, lora_scale,
440
+ identity_preservation, strength, enable_color_matching,
441
  consistency_mode, seed_input, enable_captions
442
  ],
443
  outputs=[output_image, caption_output]
 
449
  demo.launch(
450
  server_name="0.0.0.0",
451
  server_port=7860,
452
+ share=True,
453
  show_api=True
454
+ )
config.py CHANGED
@@ -16,11 +16,7 @@ HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN", None)
16
  # Model files
17
  MODEL_FILES = {
18
  "checkpoint": "horizon.safetensors",
19
- # --- START FIX: Define all LORA files ---
20
- "lora_retroart": "retroart.safetensors",
21
- "lora_vga": "vga.safetensors",
22
- "lora_lucasart": "lucasart.safetensors",
23
- # --- END FIX ---
24
  "vae": "pixelate.safetensors"
25
  }
26
 
@@ -33,15 +29,8 @@ INSTANTID_CONFIG = {
33
  "face_model_repo": "DIAMONIK7777/antelopev2"
34
  }
35
 
36
- # --- START FIX: Update TRIGGER_WORD to be a dictionary ---
37
  # LORA configuration
38
- TRIGGER_WORD = {
39
- "RetroArt": "p1x3l4rt, pixel art",
40
- "VGA": "dosvga style",
41
- "LucasArt": "lucasarts style",
42
- "None": "" # No trigger word when no LORA is selected
43
- }
44
- # --- END FIX ---
45
 
46
  # Face detection configuration
47
  FACE_DETECTION_CONFIG = {
@@ -72,8 +61,7 @@ DEFAULT_PARAMS = {
72
  "identity_preservation": 1.2,
73
  "enable_color_matching": False,
74
  "consistency_mode": True,
75
- "seed": -1,
76
- "lora_choice": "RetroArt"
77
  }
78
 
79
  # Optimized preset configurations
@@ -217,7 +205,3 @@ print(f" Dtype: {dtype}")
217
  print(f" Model Repo: {MODEL_REPO}")
218
  print(f" HuggingFace Token: {'Set' if HUGGINGFACE_TOKEN else 'Not set (using IP-based access)'}")
219
  print(f" InstantID: Enabled")
220
-
221
-
222
- # This list must match the LORA adapter names loaded in models.py
223
- LORA_CHOICES = ["RetroArt", "VGA", "LucasArt", "None"]
 
16
  # Model files
17
  MODEL_FILES = {
18
  "checkpoint": "horizon.safetensors",
19
+ "lora": "retroart.safetensors",
 
 
 
 
20
  "vae": "pixelate.safetensors"
21
  }
22
 
 
29
  "face_model_repo": "DIAMONIK7777/antelopev2"
30
  }
31
 
 
32
  # LORA configuration
33
+ TRIGGER_WORD = "p1x3l4rt, pixel art"
 
 
 
 
 
 
34
 
35
  # Face detection configuration
36
  FACE_DETECTION_CONFIG = {
 
61
  "identity_preservation": 1.2,
62
  "enable_color_matching": False,
63
  "consistency_mode": True,
64
+ "seed": -1
 
65
  }
66
 
67
  # Optimized preset configurations
 
205
  print(f" Model Repo: {MODEL_REPO}")
206
  print(f" HuggingFace Token: {'Set' if HUGGINGFACE_TOKEN else 'Not set (using IP-based access)'}")
207
  print(f" InstantID: Enabled")
 
 
 
 
generator.py CHANGED
@@ -1,37 +1,30 @@
1
  """
2
- Generation logic for Pixagram AI Pixel Art Generator
 
3
  """
4
- import gc
5
  import torch
6
  import numpy as np
7
  import cv2
8
  from PIL import Image
9
- import torch.nn.functional as F
10
- from torchvision import transforms
11
- import traceback
12
- from safetensors.torch import load_file
13
- from huggingface_hub import hf_hub_download
14
 
15
  from config import (
16
- device, dtype, TRIGGER_WORD, MULTI_SCALE_FACTORS,
17
- ADAPTIVE_THRESHOLDS, ADAPTIVE_PARAMS, CAPTION_CONFIG, IDENTITY_BOOST_MULTIPLIER,
18
- MODEL_FILES # Import MODEL_FILES
19
  )
20
  from utils import (
21
- sanitize_text, enhanced_color_match, color_match, create_face_mask,
22
- draw_kps, get_demographic_description, calculate_optimal_size, enhance_face_crop
23
  )
24
  from models import (
25
- load_face_analysis, load_depth_detector, load_controlnets, load_image_encoder,
26
- load_sdxl_pipeline, load_loras, setup_ip_adapter,
27
- setup_compel,
28
- setup_scheduler, optimize_pipeline, load_caption_model, set_clip_skip,
29
- load_mediapipe_face_detector
30
  )
31
 
32
 
33
  class RetroArtConverter:
34
- """Main class for retro art generation"""
35
 
36
  def __init__(self):
37
  self.device = device
@@ -40,173 +33,72 @@ class RetroArtConverter:
40
  'custom_checkpoint': False,
41
  'lora': False,
42
  'instantid': False,
43
- 'depth_detector': False,
44
- 'depth_type': None,
45
- 'ip_adapter': False,
46
- 'mediapipe_face': False
47
  }
48
- self.loaded_loras = {} # Store status of each LORA
49
 
50
- # Initialize face analysis (InsightFace)
51
  self.face_app, self.face_detection_enabled = load_face_analysis()
52
 
53
- # Load MediapipeFaceDetector (alternative face detection)
54
- self.mediapipe_face, mediapipe_success = load_mediapipe_face_detector()
55
- self.models_loaded['mediapipe_face'] = mediapipe_success
56
-
57
- # Load Depth detector with fallback hierarchy (Leres → Zoe → Midas)
58
- self.depth_detector, self.depth_type, depth_success = load_depth_detector()
59
- self.models_loaded['depth_detector'] = depth_success
60
- self.models_loaded['depth_type'] = self.depth_type
61
-
62
- # Load ControlNets
63
- # Now unpacks 3 models + success boolean
64
- controlnet_depth, self.controlnet_instantid, instantid_success = load_controlnets()
65
- self.controlnet_depth = controlnet_depth
66
- self.instantid_enabled = instantid_success
67
- self.models_loaded['instantid'] = instantid_success
68
-
69
- # Load image encoder
70
- if self.instantid_enabled:
71
- self.image_encoder = load_image_encoder()
72
- else:
73
- self.image_encoder = None
74
-
75
- # --- FIX START: Robust ControlNet Loading ---
76
- # Determine which controlnets to use
77
-
78
- # Store booleans for which models are active
79
- self.instantid_active = self.instantid_enabled and self.controlnet_instantid is not None
80
- self.depth_active = self.controlnet_depth is not None
81
-
82
- # Build the list of *active* controlnet models
83
- controlnets = []
84
- if self.instantid_active:
85
- controlnets.append(self.controlnet_instantid)
86
- print(" [CN] InstantID (Identity) active")
87
- else:
88
- print(" [CN] InstantID (Identity) DISABLED")
89
-
90
- if self.depth_active:
91
- controlnets.append(self.controlnet_depth)
92
- print(" [CN] Depth active")
93
- else:
94
- print(" [CN] Depth DISABLED")
95
-
96
- if not controlnets:
97
- print("[WARNING] No ControlNets loaded!")
98
 
99
- print(f"Initializing with {len(controlnets)} active ControlNet(s)")
 
 
 
100
 
101
- # Load SDXL pipeline
102
- # Pass the filtered list (or None if empty)
103
- self.pipe, checkpoint_success = load_sdxl_pipeline(controlnets if controlnets else None)
104
- # --- FIX END ---
105
 
 
 
106
  self.models_loaded['custom_checkpoint'] = checkpoint_success
107
 
108
- # Load LORAs
109
- self.loaded_loras, lora_success = load_loras(self.pipe)
110
  self.models_loaded['lora'] = lora_success
111
 
112
- # Setup IP-Adapter
113
- if self.instantid_active and self.image_encoder is not None: # <-- Check instantid_active
114
- self.image_proj_model, ip_adapter_success = setup_ip_adapter(self.pipe, self.image_encoder)
115
- self.models_loaded['ip_adapter'] = ip_adapter_success
116
- else:
117
- print("[INFO] Face preservation: IP-Adapter disabled (InstantID model failed or encoder failed)")
118
- self.models_loaded['ip_adapter'] = False
119
- self.image_proj_model = None
120
-
121
- # --- START FIX: Setup Compel and get handler ---
122
- self.compel, self.handler, self.use_compel = setup_compel(self.pipe)
123
- # --- END FIX ---
124
 
125
- # Setup LCM scheduler
126
  setup_scheduler(self.pipe)
127
 
128
- # Optimize pipeline
129
  optimize_pipeline(self.pipe)
130
 
131
  # Load caption model
132
  self.caption_processor, self.caption_model, self.caption_enabled, self.caption_model_type = load_caption_model()
133
 
134
- # Report caption model status
135
- if self.caption_enabled and self.caption_model is not None:
136
- if self.caption_model_type == "git":
137
- print(" [OK] Using GIT for detailed captions")
138
- elif self.caption_model_type == "blip":
139
- print(" [OK] Using BLIP for standard captions")
140
- else:
141
- print(" [OK] Caption model loaded")
142
-
143
-
144
  # Set CLIP skip
145
  set_clip_skip(self.pipe)
146
 
147
- # Track controlnet configuration
148
- self.using_multiple_controlnets = isinstance(controlnets, list)
149
- print(f"Pipeline initialized with {'multiple' if self.using_multiple_controlnets else 'single'} ControlNet(s)")
150
-
151
- # Print model status
152
  self._print_status()
153
 
154
- print(" [OK] Model initialization complete!")
155
 
156
  def _print_status(self):
157
  """Print model loading status"""
158
  print("\n=== MODEL STATUS ===")
159
  for model, loaded in self.models_loaded.items():
160
- if model == 'lora':
161
- lora_status = 'DISABLED'
162
- if loaded:
163
- loaded_count = sum(1 for status in self.loaded_loras.values() if status)
164
- lora_status = f"[OK] LOADED ({loaded_count}/3)"
165
- print(f"loras: {lora_status}")
166
- else:
167
- status = "[OK] LOADED" if loaded else "[FALLBACK/DISABLED]"
168
- print(f"{model}: {status}")
169
  print("===================\n")
170
-
171
- print("=== UPGRADE VERIFICATION ===")
172
- try:
173
- # --- FIX: Corrected import paths and class names ---
174
- from resampler import Resampler
175
- from attention_processor import IPAttnProcessor2_0
176
-
177
- resampler_check = isinstance(self.image_proj_model, Resampler) if hasattr(self, 'image_proj_model') and self.image_proj_model is not None else False
178
- custom_attn_check = any(isinstance(p, IPAttnProcessor2_0) for p in self.pipe.unet.attn_processors.values()) if hasattr(self, 'pipe') else False
179
- # --- END FIX ---
180
-
181
- print(f"Enhanced Perceiver Resampler: {'[OK] ACTIVE' if resampler_check else '[INFO] Not active'}")
182
- print(f"Enhanced IP-Adapter Attention: {'[OK] ACTIVE' if custom_attn_check else '[INFO] Not active'}")
183
-
184
- if resampler_check and custom_attn_check:
185
- print("[SUCCESS] Face preservation upgrade fully active")
186
- print(" Expected improvement: +10-15% face similarity")
187
- elif resampler_check or custom_attn_check:
188
- print("[PARTIAL] Some upgrades active")
189
- else:
190
- print("[INFO] Using standard components")
191
- except Exception as e:
192
- print(f"[INFO] Verification skipped: {e}")
193
- print("============================\n")
194
-
195
 
196
  def get_depth_map(self, image):
197
- """
198
- Generate depth map using available depth detector.
199
- Supports: LeresDetector, ZoeDetector, or MidasDetector.
200
- """
201
- if self.depth_detector is not None:
202
  try:
203
  if image.mode != 'RGB':
204
  image = image.convert('RGB')
205
 
206
- orig_width, orig_height = image.size
207
- orig_width = int(orig_width)
208
- orig_height = int(orig_height)
209
 
 
210
  target_width = int((orig_width // 64) * 64)
211
  target_height = int((orig_height // 64) * 64)
212
 
@@ -216,110 +108,37 @@ class RetroArtConverter:
216
  size_for_depth = (int(target_width), int(target_height))
217
  image_for_depth = image.resize(size_for_depth, Image.LANCZOS)
218
 
219
- if target_width != orig_width or target_height != orig_height:
220
- print(f"[DEPTH] Resized for {self.depth_type.upper()}Detector: {orig_width}x{orig_height} -> {target_width}x{target_height}")
221
-
222
- # Use torch.no_grad() and clear cache
223
- with torch.no_grad():
224
- # --- FIX: Move model to GPU for inference and back to CPU ---
225
- self.depth_detector.to(self.device)
226
- depth_image = self.depth_detector(image_for_depth)
227
- self.depth_detector.to("cpu")
228
-
229
- # ADDED: Clear GPU cache after depth detection
230
- if torch.cuda.is_available():
231
- torch.cuda.empty_cache()
232
 
233
- depth_width, depth_height = depth_image.size
234
- if depth_width != orig_width or depth_height != orig_height:
235
- depth_image = depth_image.resize((int(orig_width), int(orig_height)), Image.LANCZOS)
236
-
237
- print(f"[DEPTH] {self.depth_type.upper()} depth map generated: {orig_width}x{orig_height}")
238
- return depth_image
239
 
 
 
240
  except Exception as e:
241
- print(f"[DEPTH] {self.depth_type.upper()}Detector failed ({e}), falling back to grayscale depth")
242
- # ADDED: Clear cache on error
243
- if torch.cuda.is_available():
244
- torch.cuda.empty_cache()
245
-
246
- gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
247
- depth_colored = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
248
- return Image.fromarray(depth_colored)
249
  else:
250
- print("[DEPTH] No depth detector available, using grayscale fallback")
251
- gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
252
- depth_colored = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
253
- return Image.fromarray(depth_colored)
254
-
255
 
256
- # --- START FIX: Updated function to use lora_choice ---
257
- def add_trigger_word(self, prompt, lora_choice="RetroArt"):
258
  """Add trigger word to prompt if not present"""
259
-
260
- # Get the correct trigger word from the config dictionary
261
- trigger = TRIGGER_WORD.get(lora_choice, TRIGGER_WORD["RetroArt"])
262
-
263
- if not trigger:
264
- return prompt
265
-
266
- if trigger.lower() not in prompt.lower():
267
  if not prompt or not prompt.strip():
268
- return trigger
269
- # Prepend the trigger word as requested
270
- return f"{trigger}, {prompt}"
271
  return prompt
272
- # --- END FIX ---
273
-
274
- def extract_multi_scale_face(self, face_crop, face):
275
- """
276
- Extract face features at multiple scales for better detail.
277
- +1-2% improvement in face preservation.
278
- """
279
- try:
280
- multi_scale_embeds = []
281
-
282
- for scale in MULTI_SCALE_FACTORS:
283
- # Resize
284
- w, h = face_crop.size
285
- scaled_size = (int(w * scale), int(h * scale))
286
- scaled_crop = face_crop.resize(scaled_size, Image.LANCZOS)
287
-
288
- # Pad/crop back to original
289
- scaled_crop = scaled_crop.resize((w, h), Image.LANCZOS)
290
-
291
- # Extract features
292
- scaled_array = cv2.cvtColor(np.array(scaled_crop), cv2.COLOR_RGB2BGR)
293
- scaled_faces = self.face_app.get(scaled_array)
294
-
295
- if len(scaled_faces) > 0:
296
- multi_scale_embeds.append(scaled_faces[0].normed_embedding)
297
-
298
- # Average embeddings
299
- if len(multi_scale_embeds) > 0:
300
- averaged = np.mean(multi_scale_embeds, axis=0)
301
- # Renormalize
302
- averaged = averaged / np.linalg.norm(averaged)
303
- print(f"[MULTI-SCALE] Combined {len(multi_scale_embeds)} scales")
304
- return averaged
305
-
306
- return face.normed_embedding
307
-
308
- except Exception as e:
309
- print(f"[MULTI-SCALE] Failed: {e}, using single scale")
310
- return face.normed_embedding
311
 
312
  def detect_face_quality(self, face):
313
- """
314
- Detect face quality and adaptively adjust parameters.
315
- +2-3% consistency improvement.
316
- """
317
  try:
318
  bbox = face.bbox
319
  face_size = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
320
  det_score = float(face.det_score) if hasattr(face, 'det_score') else 1.0
321
 
322
- # Small face -> boost identity preservation
323
  if face_size < ADAPTIVE_THRESHOLDS['small_face_size']:
324
  return ADAPTIVE_PARAMS['small_face'].copy()
325
 
@@ -327,638 +146,243 @@ class RetroArtConverter:
327
  elif det_score < ADAPTIVE_THRESHOLDS['low_confidence']:
328
  return ADAPTIVE_PARAMS['low_confidence'].copy()
329
 
330
- # Good quality face - use provided parameters
 
 
 
 
 
 
 
 
331
  return None
332
 
333
  except Exception as e:
334
  print(f"[ADAPTIVE] Quality detection failed: {e}")
335
  return None
336
 
337
- def validate_and_adjust_parameters(self, strength, guidance_scale, lora_scale,
338
- identity_preservation, identity_control_scale,
339
- depth_control_scale, consistency_mode=True):
340
- """
341
- Enhanced parameter validation with stricter rules for consistency.
342
- """
343
- if consistency_mode:
344
- print("[CONSISTENCY] Applying strict parameter validation...")
345
- adjustments = []
346
-
347
- # Rule 1: Strong inverse relationship between identity and LORA
348
- if identity_preservation > 1.2:
349
- original_lora = lora_scale
350
- lora_scale = min(lora_scale, 1.0)
351
- if abs(lora_scale - original_lora) > 0.01:
352
- adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (high identity)")
353
-
354
- # Rule 2: Strength-based profile activation
355
- if strength < 0.5:
356
- # Maximum preservation mode
357
- if identity_preservation < 1.3:
358
- original_identity = identity_preservation
359
- identity_preservation = 1.3
360
- adjustments.append(f"Identity: {original_identity:.2f}->{identity_preservation:.2f} (max preservation)")
361
- if lora_scale > 0.9:
362
- original_lora = lora_scale
363
- lora_scale = 0.9
364
- adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (max preservation)")
365
- if guidance_scale > 1.3:
366
- original_cfg = guidance_scale
367
- guidance_scale = 1.3
368
- adjustments.append(f"CFG: {original_cfg:.2f}->{guidance_scale:.2f} (max preservation)")
369
-
370
- elif strength > 0.7:
371
- # Artistic transformation mode
372
- if identity_preservation > 1.0:
373
- original_identity = identity_preservation
374
- identity_preservation = 1.0
375
- adjustments.append(f"Identity: {original_identity:.2f}->{identity_preservation:.2f} (artistic mode)")
376
- if lora_scale < 1.2:
377
- original_lora = lora_scale
378
- lora_scale = 1.2
379
- adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (artistic mode)")
380
-
381
- # Rule 3: CFG-LORA relationship
382
- if guidance_scale > 1.4 and lora_scale > 1.2:
383
- original_lora = lora_scale
384
- lora_scale = 1.1
385
- adjustments.append(f"LORA: {original_lora:.2f}->{lora_scale:.2f} (high CFG detected)")
386
-
387
- # Rule 4: LCM sweet spot enforcement
388
- original_cfg = guidance_scale
389
- guidance_scale = max(1.0, min(guidance_scale, 1.5))
390
- if abs(guidance_scale - original_cfg) > 0.01:
391
- adjustments.append(f"CFG: {original_cfg:.2f}->{guidance_scale:.2f} (LCM optimal)")
392
-
393
- # Rule 5: ControlNet balance
394
- # MODIFIED: Only sum *active* controlnets
395
- total_control = 0
396
- if self.instantid_active:
397
- total_control += identity_control_scale
398
- if self.depth_active:
399
- total_control += depth_control_scale
400
-
401
- if total_control > 2.0: # Increased max total from 1.7 to 2.0
402
- scale_factor = 2.0 / total_control
403
- original_id_ctrl = identity_control_scale
404
- original_depth_ctrl = depth_control_scale
405
-
406
- # Only scale active controlnets
407
- if self.instantid_active:
408
- identity_control_scale *= scale_factor
409
- if self.depth_active:
410
- depth_control_scale *= scale_factor
411
-
412
-
413
- adjustments.append(f"ControlNets balanced: ID {original_id_ctrl:.2f}->{identity_control_scale:.2f}, Depth {original_depth_ctrl:.2f}->{depth_control_scale:.2f}")
414
-
415
- # Report adjustments
416
- if adjustments:
417
- print(" [OK] Applied adjustments:")
418
- for adj in adjustments:
419
- print(f" - {adj}")
420
- else:
421
- print(" [OK] Parameters already optimal")
422
-
423
- return strength, guidance_scale, lora_scale, identity_preservation, identity_control_scale, depth_control_scale
424
-
425
- def generate_caption(self, image, max_length=None, num_beams=None):
426
- """Generate a descriptive caption for the image (supports BLIP-2, GIT, BLIP)."""
427
  if not self.caption_enabled or self.caption_model is None:
428
  return None
429
 
430
- # Set defaults based on model type
431
- if max_length is None:
432
- if self.caption_model_type == "blip2":
433
- max_length = 50 # BLIP-2 can handle longer captions
434
- elif self.caption_model_type == "git":
435
- max_length = 40 # GIT also produces good long captions
436
- else:
437
- max_length = CAPTION_CONFIG['max_length'] # BLIP base (20)
438
-
439
- if num_beams is None:
440
- num_beams = CAPTION_CONFIG['num_beams']
441
-
442
  try:
443
- # --- FIX: Move model to GPU for inference and back to CPU ---
444
- self.caption_model.to(self.device)
445
-
446
- if self.caption_model_type == "blip2":
447
- # BLIP-2 specific processing
448
- inputs = self.caption_processor(image, return_tensors="pt").to(self.device, self.dtype)
449
-
450
- with torch.no_grad():
451
- output = self.caption_model.generate(
452
- **inputs,
453
- max_length=max_length,
454
- num_beams=num_beams,
455
- min_length=10, # Encourage longer captions
456
- length_penalty=1.0,
457
- repetition_penalty=1.5,
458
- early_stopping=True
459
- )
460
-
461
- caption = self.caption_processor.decode(output[0], skip_special_tokens=True)
462
-
463
- elif self.caption_model_type == "git":
464
- # GIT specific processing
465
- inputs = self.caption_processor(images=image, return_tensors="pt").to(self.device, self.dtype)
466
-
467
- with torch.no_grad():
468
- output = self.caption_model.generate(
469
- pixel_values=inputs.pixel_values,
470
- max_length=max_length,
471
- num_beams=num_beams,
472
- min_length=10,
473
- length_penalty=1.0,
474
- repetition_penalty=1.5,
475
- early_stopping=True
476
- )
477
-
478
- caption = self.caption_processor.batch_decode(output, skip_special_tokens=True)[0]
479
-
480
  else:
481
- # BLIP base processing
482
- inputs = self.caption_processor(image, return_tensors="pt").to(self.device, self.dtype)
483
-
484
- with torch.no_grad():
485
- output = self.caption_model.generate(
486
- **inputs,
487
- max_length=max_length,
488
- num_beams=num_beams,
489
- early_stopping=True
490
- )
491
-
492
- caption = self.caption_processor.decode(output[0], skip_special_tokens=True)
493
 
494
- self.caption_model.to("cpu")
495
- return caption.strip()
496
-
497
  except Exception as e:
498
- print(f"Caption generation failed: {e}")
499
- self.caption_model.to("cpu")
500
  return None
501
-
502
- # --- START FIX: Add logic for loading pivotal tuning (TI) weights ---
503
- def load_pivotal_lora(self, lora_choice):
504
- """
505
- Loads textual inversion weights if the LORA is 'pivotal'.
506
- This is a simplified version based on app (4).py's logic.
507
-
508
- NOTE: This assumes a fixed mapping. A better app would
509
- read this from a JSON config.
510
- """
511
- # This is a hardcoded map based on the files.
512
- # In a real app, you'd get this from a config.
513
- pivotal_map = {
514
- # "RetroArt": { "is_pivotal": True, "repo": "primerz/pixagram", "file": "retroart-ti.safetensors" },
515
- # Add other pivotal LoRAs here if you have them
516
- }
517
-
518
- if self.handler and lora_choice in pivotal_map:
519
- config = pivotal_map[lora_choice]
520
- try:
521
- print(f"Loading pivotal tuning embeddings for {lora_choice}...")
522
- # Download the TI weights
523
- ti_path = hf_hub_download(repo_id=config["repo"], filename=config["file"])
524
- state_dict_embedding = load_file(ti_path)
525
-
526
- # Load embeddings into the handler
527
- self.handler.load_embeddings_from_state_dict(state_dict_embedding)
528
- print(f" [OK] Loaded pivotal weights for {lora_choice}")
529
-
530
- except Exception as e:
531
- print(f" [WARNING] Failed to load pivotal weights for {lora_choice}: {e}")
532
- else:
533
- # If not pivotal, retract any previous embeddings to reset
534
- if self.handler:
535
- self.handler.retract_embeddings()
536
- # --- END FIX ---
537
 
538
  def generate_retro_art(
539
  self,
540
  input_image,
541
- prompt="retro game character, vibrant colors, detailed",
542
- negative_prompt="blurry, low quality, ugly, distorted",
543
  num_inference_steps=12,
544
- guidance_scale=1.0,
545
- depth_control_scale=0.8,
546
  identity_control_scale=0.85,
547
- lora_choice="RetroArt",
548
  lora_scale=1.0,
549
- identity_preservation=0.8,
550
- strength=0.75,
551
  enable_color_matching=False,
552
  consistency_mode=True,
553
  seed=-1
554
  ):
555
- """Generate retro art with img2img pipeline and enhanced InstantID"""
556
-
557
- # Sanitize text inputs
558
- prompt = sanitize_text(prompt)
559
- negative_prompt = sanitize_text(negative_prompt)
560
-
561
- if not negative_prompt or not negative_prompt.strip():
562
- negative_prompt = ""
563
-
564
- # Apply parameter validation
565
- if consistency_mode:
566
- print("\n[CONSISTENCY] Validating and adjusting parameters...")
567
- strength, guidance_scale, lora_scale, identity_preservation, identity_control_scale, depth_control_scale = \
568
- self.validate_and_adjust_parameters(
569
- strength, guidance_scale, lora_scale, identity_preservation,
570
- identity_control_scale, depth_control_scale, consistency_mode
571
- )
572
-
573
- # --- START FIX: Pass lora_choice to add_trigger_word ---
574
- prompt = self.add_trigger_word(prompt, lora_choice)
575
- # --- END FIX ---
576
-
577
- # Calculate optimal size with flexible aspect ratio support
578
- original_width, original_height = input_image.size
579
- target_width, target_height = calculate_optimal_size(original_width, original_height)
580
 
581
- print(f"Resizing from {original_width}x{original_height} to {target_width}x{target_height}")
582
- print(f"Prompt: {prompt}")
583
- print(f"Img2Img Strength: {strength}")
584
-
585
- # Resize with high quality
586
- resized_image = input_image.resize((int(target_width), int(target_height)), Image.LANCZOS)
587
-
588
- # Generate depth map
589
- depth_image = None
590
- if self.depth_active:
591
- print("Generating Zoe depth map...")
592
- depth_image = self.get_depth_map(resized_image)
593
- if depth_image.size != (target_width, target_height):
594
- depth_image = depth_image.resize((int(target_width), int(target_height)), Image.LANCZOS)
595
 
596
- # Handle face detection
597
- face_kps_image = None
598
- face_embeddings = None
599
- face_crop_enhanced = None
600
- has_detected_faces = False
601
- face_bbox_original = None
602
-
603
- if self.instantid_active:
604
- # Try InsightFace first (if available)
605
- insightface_tried = False
606
- insightface_success = False
607
 
608
- if self.face_app is not None:
609
- print("Detecting faces with InsightFace...")
610
- insightface_tried = True
611
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
612
  try:
613
- img_array = cv2.cvtColor(np.array(resized_image), cv2.COLOR_RGB2BGR)
614
- faces = self.face_app.get(img_array)
615
 
616
  if len(faces) > 0:
617
- insightface_success = True
618
  has_detected_faces = True
619
- print(f"✓ InsightFace detected {len(faces)} face(s)")
620
-
621
- # Get largest face
622
- face = sorted(faces, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]))[-1]
623
-
624
- # ADAPTIVE PARAMETERS
625
- adaptive_params = self.detect_face_quality(face)
626
- if adaptive_params is not None:
627
- print(f"[ADAPTIVE] {adaptive_params['reason']}")
628
- identity_preservation = adaptive_params['identity_preservation']
629
- identity_control_scale = adaptive_params['identity_control_scale']
630
- guidance_scale = adaptive_params['guidance_scale']
631
- lora_scale = adaptive_params['lora_scale']
632
-
633
- # Extract face embeddings
634
- face_embeddings_base = face.normed_embedding
635
-
636
- # Extract face crop
637
- bbox = face.bbox.astype(int)
638
- x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
639
- face_bbox_original = [x1, y1, x2, y2]
640
-
641
- # Add padding
642
- face_width = x2 - x1
643
- face_height = y2 - y1
644
- padding_x = int(face_width * 0.3)
645
- padding_y = int(face_height * 0.3)
646
- x1 = max(0, x1 - padding_x)
647
- y1 = max(0, y1 - padding_y)
648
- x2 = min(resized_image.width, x2 + padding_x)
649
- y2 = min(resized_image.height, y2 + padding_y)
650
-
651
- # Crop face region
652
- face_crop = resized_image.crop((x1, y1, x2, y2))
653
 
654
- # MULTI-SCALE PROCESSING
655
- face_embeddings = self.extract_multi_scale_face(face_crop, face)
656
-
657
- # Enhance face crop
658
- face_crop_enhanced = enhance_face_crop(face_crop)
659
 
660
  # Draw keypoints
661
- face_kps = face.kps
662
- face_kps_image = draw_kps(resized_image, face_kps)
663
-
664
- # ENHANCED: Extract comprehensive facial attributes
665
- from utils import get_facial_attributes, build_enhanced_prompt
666
- facial_attrs = get_facial_attributes(face)
667
 
668
- # Update prompt with detected attributes
669
- prompt = build_enhanced_prompt(prompt, facial_attrs, TRIGGER_WORD[lora_choice])
670
 
671
- # Legacy output for compatibility
672
- age = facial_attrs['age']
673
- gender_code = facial_attrs['gender']
674
- det_score = facial_attrs['quality']
 
 
 
 
675
 
676
- gender_str = 'M' if gender_code == 1 else ('F' if gender_code == 0 else 'N/A')
677
- print(f"Face info: bbox={face.bbox}, age={age if age else 'N/A'}, gender={gender_str}")
678
- print(f"Face crop size: {face_crop.size}, enhanced: {face_crop_enhanced.size if face_crop_enhanced else 'N/A'}")
679
  else:
680
- print(" InsightFace found no faces")
681
-
682
  except Exception as e:
683
- print(f"[ERROR] InsightFace detection failed: {e}")
684
- traceback.print_exc()
685
- else:
686
- print("[INFO] InsightFace not available (face_app is None)")
687
 
688
- # If InsightFace didn't succeed, try MediapipeFace
689
- if not insightface_success:
690
- if self.mediapipe_face is not None:
691
- print("Trying MediapipeFaceDetector as fallback...")
692
-
693
- try:
694
- # MediapipeFace returns an annotated image with keypoints
695
- mediapipe_result = self.mediapipe_face(resized_image)
696
-
697
- # Check if face was detected (result is not blank/black)
698
- mediapipe_array = np.array(mediapipe_result)
699
- if mediapipe_array.sum() > 1000: # If image has significant content
700
- has_detected_faces = True
701
- face_kps_image = mediapipe_result
702
- print(f"✓ MediapipeFace detected face(s)")
703
- print(f"[INFO] Using MediapipeFace keypoints (no embeddings available)")
704
-
705
- # Note: MediapipeFace doesn't provide embeddings or detailed info
706
- # So face_embeddings, face_crop_enhanced remain None
707
- # InstantID will work with keypoints only (reduced quality)
708
- else:
709
- print("✗ MediapipeFace found no faces")
710
- except Exception as e:
711
- print(f"[ERROR] MediapipeFace detection failed: {e}")
712
- traceback.print_exc()
713
- else:
714
- print("[INFO] MediapipeFaceDetector not available")
715
 
716
- # Final summary
717
- if not has_detected_faces:
718
- print("\n[SUMMARY] No faces detected by any detector")
719
- if insightface_tried:
720
- print(" - InsightFace: tried, found nothing")
721
- else:
722
- print(" - InsightFace: not available")
723
-
724
- if self.mediapipe_face is not None:
725
- print(" - MediapipeFace: tried, found nothing")
726
- else:
727
- print(" - MediapipeFace: not available")
728
- print()
729
-
730
- # --- START FIX: Load pivotal weights if necessary ---
731
- # This must be done *before* encoding prompts
732
- self.load_pivotal_lora(lora_choice)
733
- # --- END FIX ---
734
-
735
- # Set LORA
736
- if hasattr(self.pipe, 'set_adapters') and self.models_loaded['lora']:
737
- adapter_name = lora_choice.lower() # "retroart", "vga", "lucasart", or "none"
738
 
739
- if adapter_name != "none" and self.loaded_loras.get(adapter_name, False):
 
740
  try:
741
- self.pipe.set_adapters([adapter_name], adapter_weights=[lora_scale])
742
- print(f"LORA: Set adapter '{adapter_name}' with scale: {lora_scale}")
 
 
 
 
 
 
 
743
  except Exception as e:
744
- print(f"Could not set LORA adapter '{adapter_name}': {e}")
745
- self.pipe.set_adapters([]) # Disable LORAs if setting failed
 
746
  else:
747
- if adapter_name == "none":
748
- print("LORAs disabled by user choice.")
749
- else:
750
- print(f"LORA '{adapter_name}' not loaded or available, disabling LORAs.")
751
- self.pipe.set_adapters([]) # Disable all LORAs
752
-
753
-
754
- # Prepare generation kwargs
755
- pipe_kwargs = {
756
- "image": resized_image,
757
- "strength": strength,
758
- "num_inference_steps": num_inference_steps,
759
- "guidance_scale": guidance_scale,
760
- }
761
-
762
- # Setup generator with seed control
763
- if seed == -1:
764
- generator = torch.Generator(device=self.device)
765
- actual_seed = generator.seed()
766
- print(f"[SEED] Using random seed: {actual_seed}")
767
- else:
768
- generator = torch.Generator(device=self.device).manual_seed(seed)
769
- actual_seed = seed
770
- print(f"[SEED] Using fixed seed: {actual_seed}")
771
-
772
- pipe_kwargs["generator"] = generator
773
-
774
- # --- START FIX: Use Compel instead of Cappella ---
775
- if self.use_compel and self.compel is not None:
776
- try:
777
- print("Encoding prompts with Compel...")
778
- conditioning = self.compel(prompt)
779
- negative_conditioning = self.compel(negative_prompt)
780
-
781
- pipe_kwargs["prompt_embeds"] = conditioning[0]
782
- pipe_kwargs["pooled_prompt_embeds"] = conditioning[1]
783
- pipe_kwargs["negative_prompt_embeds"] = negative_conditioning[0]
784
- pipe_kwargs["negative_pooled_prompt_embeds"] = negative_conditioning[1]
785
-
786
- print(f"[OK] Compel encoded - Prompt: {pipe_kwargs['prompt_embeds'].shape}, Negative: {pipe_kwargs['negative_prompt_embeds'].shape}")
787
- except Exception as e:
788
- print(f"Compel encoding failed, using standard prompts: {e}")
789
- traceback.print_exc()
790
  pipe_kwargs["prompt"] = prompt
791
  pipe_kwargs["negative_prompt"] = negative_prompt
792
- else:
793
- print("[WARNING] Compel not found, using standard prompt encoding.")
794
- pipe_kwargs["prompt"] = prompt
795
- pipe_kwargs["negative_prompt"] = negative_prompt
796
- # --- END FIX ---
797
-
798
- # Add CLIP skip
799
- if hasattr(self.pipe, 'text_encoder'):
800
- pipe_kwargs["clip_skip"] = 2
801
-
802
- control_images = []
803
- conditioning_scales = []
804
- scale_debug_str = []
805
-
806
- # Helper function to ensure control image has correct dimensions
807
- def ensure_correct_size(img, target_w, target_h, name="control"):
808
- """Ensure image matches target dimensions exactly"""
809
- if img is None:
810
- return Image.new("RGB", (target_w, target_h), (0,0,0))
811
 
812
- if img.size != (target_w, target_h):
813
- print(f" [RESIZE] {name}: {img.size} -> ({target_w}, {target_h})")
814
- img = img.resize((target_w, target_h), Image.LANCZOS)
815
- return img
816
-
817
- # --- START FIX: Re-written IP-Adapter/ControlNet logic ---
818
-
819
- # 1. InstantID (Identity)
820
- if self.instantid_active:
821
- if has_detected_faces and face_kps_image is not None and face_embeddings is not None:
822
- # Case 1: Face + Embeddings found
823
-
824
- # A. Set the IP-Adapter (face) strength
825
- boosted_scale = identity_preservation * IDENTITY_BOOST_MULTIPLIER
826
- # self.pipe.set_ip_adapter_scale(boosted_scale) # This method doesn't exist on standard pipe
827
-
828
- # B. Pass the raw 512-dim face embeddings to the pipeline
829
- # pipe_kwargs["image_embeds"] = face_embeddings # This param doesn't exist on standard pipe
830
-
831
- # --- START: Manual Embedding Concatenation ---
832
- print(f"Processing InstantID face embeddings with Resampler...")
833
- with torch.no_grad():
834
- face_emb_tensor = torch.from_numpy(face_embeddings).to(device=self.device, dtype=self.dtype)
835
- face_emb_tensor = face_emb_tensor.reshape(1, -1, 512)
836
- face_proj_embeds = self.image_proj_model(face_emb_tensor)
837
 
838
- face_proj_embeds = face_proj_embeds * boosted_scale
839
- print(f" - Face embedding: {face_proj_embeds.shape}, Scale: {boosted_scale:.2f}")
840
-
841
- if 'prompt_embeds' in pipe_kwargs:
842
- original_embeds = pipe_kwargs['prompt_embeds']
843
-
844
- # Concatenate face embeddings to POSITIVE prompt
845
- combined_embeds = torch.cat([original_embeds, face_proj_embeds], dim=1)
846
- pipe_kwargs['prompt_embeds'] = combined_embeds
847
-
848
- # CRITICAL: Pad negative_prompt_embeds by the same amount
849
- if 'negative_prompt_embeds' in pipe_kwargs:
850
- negative_embeds = pipe_kwargs['negative_prompt_embeds']
851
- neg_padding = torch.zeros(
852
- (
853
- negative_embeds.shape[0], # 1
854
- face_proj_embeds.shape[1], # 16
855
- negative_embeds.shape[2], # 2048
856
- ),
857
- device=negative_embeds.device,
858
- dtype=negative_embeds.dtype
859
- )
860
- pipe_kwargs['negative_prompt_embeds'] = torch.cat([negative_embeds, neg_padding], dim=1)
861
- print(f" [OK] Negative prompt padded to match: {pipe_kwargs['negative_prompt_embeds'].shape}")
862
-
863
- print(f" [OK] Face embeddings concatenated successfully! Prompt: {combined_embeds.shape}")
864
- else:
865
- print(f" [WARNING] Can't concatenate - no prompt_embeds (use Compel)")
866
- # --- END: Manual Embedding Concatenation ---
867
-
868
- # C. Add the face keypoints (ControlNet) image
869
- face_kps_image = ensure_correct_size(face_kps_image, target_width, target_height, "InstantID")
870
- control_images.append(face_kps_image)
871
- conditioning_scales.append(identity_control_scale)
872
-
873
- scale_debug_str.append(f"Identity (IP): {boosted_scale:.2f}")
874
- scale_debug_str.append(f"Identity (CN): {identity_control_scale:.2f}")
875
- print(f"[OK] InstantID active: IP-Adapter scale set to {boosted_scale:.2f}, ControlNet scale set to {identity_control_scale:.2f}")
876
-
877
- elif has_detected_faces:
878
- # Case 2: Face detected (e.g., Mediapipe) but no embeddings available
879
- print("[INSTANTID] Using keypoints only (no face embeddings for IP-Adapter).")
880
-
881
- # A. Turn off IP-Adapter (by not concatenating embeddings)
882
- # B. Pass dummy embeddings (This is handled by the padding logic below)
883
-
884
- # C. Add face keypoints (ControlNet)
885
- face_kps_image = ensure_correct_size(face_kps_image, target_width, target_height, "InstantID")
886
- control_images.append(face_kps_image)
887
- conditioning_scales.append(identity_control_scale) # Use the CN scale
888
-
889
- scale_debug_str.append("Identity (IP): 0.00")
890
- scale_debug_str.append(f"Identity (CN): {identity_control_scale:.2f}")
891
-
892
  else:
893
- # Case 3: No face detected at all
894
- print("[INSTANTID] No face detected. Disabling face identity.")
895
-
896
- # A. Turn off IP-Adapter (by not concatenating embeddings)
897
- # B. Pass dummy embeddings (This is handled by the padding logic below)
898
-
899
- # C. Add blank image for ControlNet (to keep list order)
900
- control_images.append(Image.new("RGB", (target_width, target_height), (0,0,0)))
901
- conditioning_scales.append(0.0) # Set CN scale to 0
902
 
903
- scale_debug_str.append("Identity (IP): 0.00")
904
- scale_debug_str.append("Identity (CN): 0.00")
905
-
906
- # --- END FIX ---
907
-
908
- # 2. Depth
909
- if self.depth_active:
910
- # Ensure depth image has correct size
911
- depth_image = ensure_correct_size(depth_image, target_width, target_height, "Depth")
912
- control_images.append(depth_image)
913
- conditioning_scales.append(depth_control_scale)
914
- scale_debug_str.append(f"Depth: {depth_control_scale:.2f}")
915
-
916
- # Final validation: ensure all control images have identical dimensions
917
- if control_images:
918
- expected_size = (target_width, target_height)
919
- for idx, img in enumerate(control_images):
920
- if img.size != expected_size:
921
- print(f" [WARNING] Control image {idx} size mismatch: {img.size} vs expected {expected_size}")
922
- control_images[idx] = img.resize(expected_size, Image.LANCZOS)
923
 
924
- pipe_kwargs["control_image"] = control_images
925
- pipe_kwargs["controlnet_conditioning_scale"] = conditioning_scales
926
- print(f"Active ControlNets: {len(control_images)} (all {target_width}x{target_height})")
927
- else:
928
- print("No active ControlNets, running standard Img2Img")
929
-
930
- # Generate
931
- print(f"Generating with LCM: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")
932
- print(f"Controlnet scales - {' | '.join(scale_debug_str)}")
933
- result = self.pipe(**pipe_kwargs)
934
-
935
- generated_image = result.images[0]
936
-
937
- # Post-processing
938
- if enable_color_matching and has_detected_faces:
939
- print("Applying enhanced face-aware color matching...")
940
- try:
941
- if face_bbox_original is not None:
942
- generated_image = enhanced_color_match(
943
- generated_image,
944
- resized_image,
945
- face_bbox=face_bbox_original
946
- )
947
- print("[OK] Enhanced color matching applied (face-aware)")
948
- else:
949
  generated_image = color_match(generated_image, resized_image, mode='mkl')
950
  print("[OK] Standard color matching applied")
951
- except Exception as e:
952
- print(f"Color matching failed: {e}")
953
- elif enable_color_matching:
954
- print("Applying standard color matching...")
955
- try:
956
- generated_image = color_match(generated_image, resized_image, mode='mkl')
957
- print("[OK] Standard color matching applied")
958
- except Exception as e:
959
- print(f"Color matching failed: {e}")
960
 
961
- return generated_image
 
 
 
 
962
 
963
 
964
- print("[OK] Generator class ready")
 
1
  """
2
+ Generation logic for Pixagram AI Pixel Art Generator
3
+ UPDATED VERSION with InstantID pipeline integration
4
  """
 
5
  import torch
6
  import numpy as np
7
  import cv2
8
  from PIL import Image
9
+ import gc
 
 
 
 
10
 
11
  from config import (
12
+ device, dtype, TRIGGER_WORD,
13
+ ADAPTIVE_THRESHOLDS, ADAPTIVE_PARAMS, CAPTION_CONFIG
 
14
  )
15
  from utils import (
16
+ sanitize_text, enhanced_color_match, color_match,
17
+ get_demographic_description, calculate_optimal_size, safe_image_size
18
  )
19
  from models import (
20
+ load_face_analysis, load_depth_detector, load_controlnets,
21
+ load_sdxl_pipeline, load_lora, setup_compel,
22
+ setup_scheduler, optimize_pipeline, load_caption_model, set_clip_skip
 
 
23
  )
24
 
25
 
26
  class RetroArtConverter:
27
+ """Main class for retro art generation with InstantID"""
28
 
29
  def __init__(self):
30
  self.device = device
 
33
  'custom_checkpoint': False,
34
  'lora': False,
35
  'instantid': False,
36
+ 'zoe_depth': False
 
 
 
37
  }
 
38
 
39
+ # Load face analysis
40
  self.face_app, self.face_detection_enabled = load_face_analysis()
41
 
42
+ # Load depth detector
43
+ self.zoe_depth, zoe_success = load_depth_detector()
44
+ self.models_loaded['zoe_depth'] = zoe_success
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ # Load ControlNets AS LIST
47
+ controlnet_instantid, controlnet_depth = load_controlnets()
48
+ controlnets = [controlnet_instantid, controlnet_depth]
49
+ self.models_loaded['instantid'] = True
50
 
51
+ print("Initializing InstantID pipeline with Face + Depth ControlNets")
 
 
 
52
 
53
+ # Load SDXL pipeline with InstantID (handles IP-Adapter internally)
54
+ self.pipe, checkpoint_success = load_sdxl_pipeline(controlnets)
55
  self.models_loaded['custom_checkpoint'] = checkpoint_success
56
 
57
+ # Load LORA
58
+ lora_success = load_lora(self.pipe)
59
  self.models_loaded['lora'] = lora_success
60
 
61
+ # Setup Compel
62
+ self.compel, self.use_compel = setup_compel(self.pipe)
 
 
 
 
 
 
 
 
 
 
63
 
64
+ # Setup scheduler
65
  setup_scheduler(self.pipe)
66
 
67
+ # Optimize
68
  optimize_pipeline(self.pipe)
69
 
70
  # Load caption model
71
  self.caption_processor, self.caption_model, self.caption_enabled, self.caption_model_type = load_caption_model()
72
 
 
 
 
 
 
 
 
 
 
 
73
  # Set CLIP skip
74
  set_clip_skip(self.pipe)
75
 
76
+ # Print status
 
 
 
 
77
  self._print_status()
78
 
79
+ print(" [OK] RetroArtConverter initialized with InstantID!")
80
 
81
  def _print_status(self):
82
  """Print model loading status"""
83
  print("\n=== MODEL STATUS ===")
84
  for model, loaded in self.models_loaded.items():
85
+ status = "[OK] LOADED" if loaded else "[FALLBACK/DISABLED]"
86
+ print(f"{model}: {status}")
87
+ print("InstantID Pipeline: [OK] ACTIVE")
88
+ print("IP-Adapter: [OK] Built into pipeline")
 
 
 
 
 
89
  print("===================\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  def get_depth_map(self, image):
92
+ """Generate depth map using Zoe Depth"""
93
+ if self.zoe_depth is not None:
 
 
 
94
  try:
95
  if image.mode != 'RGB':
96
  image = image.convert('RGB')
97
 
98
+ # Use safe size helper to avoid numpy.int64 issues
99
+ orig_width, orig_height = safe_image_size(image)
 
100
 
101
+ # Use multiples of 64
102
  target_width = int((orig_width // 64) * 64)
103
  target_height = int((orig_height // 64) * 64)
104
 
 
108
  size_for_depth = (int(target_width), int(target_height))
109
  image_for_depth = image.resize(size_for_depth, Image.LANCZOS)
110
 
111
+ depth_array = self.zoe_depth(image_for_depth, detect_resolution=512, image_resolution=1024)
112
+ depth_image = Image.fromarray(depth_array)
 
 
 
 
 
 
 
 
 
 
 
113
 
114
+ if depth_image.size != image.size:
115
+ depth_image = depth_image.resize(image.size, Image.LANCZOS)
 
 
 
 
116
 
117
+ print(f"[DEPTH] Generated depth map: {depth_image.size}")
118
+ return depth_image, depth_array
119
  except Exception as e:
120
+ print(f"[DEPTH] Generation failed: {e}, using grayscale")
121
+ return image.convert('L').convert('RGB'), None
 
 
 
 
 
 
122
  else:
123
+ print("[DEPTH] Detector not available, using grayscale")
124
+ return image.convert('L').convert('RGB'), None
 
 
 
125
 
126
+ def add_trigger_word(self, prompt):
 
127
  """Add trigger word to prompt if not present"""
128
+ if TRIGGER_WORD.lower() not in prompt.lower():
 
 
 
 
 
 
 
129
  if not prompt or not prompt.strip():
130
+ return TRIGGER_WORD
131
+ return f"{TRIGGER_WORD}, {prompt}"
 
132
  return prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
  def detect_face_quality(self, face):
135
+ """Detect face quality and adaptively adjust parameters"""
 
 
 
136
  try:
137
  bbox = face.bbox
138
  face_size = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
139
  det_score = float(face.det_score) if hasattr(face, 'det_score') else 1.0
140
 
141
+ # Small face -> boost preservation
142
  if face_size < ADAPTIVE_THRESHOLDS['small_face_size']:
143
  return ADAPTIVE_PARAMS['small_face'].copy()
144
 
 
146
  elif det_score < ADAPTIVE_THRESHOLDS['low_confidence']:
147
  return ADAPTIVE_PARAMS['low_confidence'].copy()
148
 
149
+ # Check for profile view
150
+ elif hasattr(face, 'pose') and len(face.pose) > 1:
151
+ try:
152
+ yaw = float(face.pose[1])
153
+ if abs(yaw) > ADAPTIVE_THRESHOLDS['profile_angle']:
154
+ return ADAPTIVE_PARAMS['profile_view'].copy()
155
+ except (ValueError, TypeError, IndexError):
156
+ pass
157
+
158
  return None
159
 
160
  except Exception as e:
161
  print(f"[ADAPTIVE] Quality detection failed: {e}")
162
  return None
163
 
164
+ def generate_caption(self, image):
165
+ """Generate caption for image"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  if not self.caption_enabled or self.caption_model is None:
167
  return None
168
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  try:
170
+ if self.caption_model_type == 'git':
171
+ inputs = self.caption_processor(images=image, return_tensors="pt").to(self.device)
172
+ generated_ids = self.caption_model.generate(**inputs, max_length=CAPTION_CONFIG['max_length'])
173
+ caption = self.caption_processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
174
+ elif self.caption_model_type == 'blip':
175
+ inputs = self.caption_processor(image, return_tensors="pt").to(self.device)
176
+ generated_ids = self.caption_model.generate(**inputs, max_length=CAPTION_CONFIG['max_length'])
177
+ caption = self.caption_processor.decode(generated_ids[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  else:
179
+ return None
 
 
 
 
 
 
 
 
 
 
 
180
 
181
+ return sanitize_text(caption)
 
 
182
  except Exception as e:
183
+ print(f"[CAPTION] Generation failed: {e}")
 
184
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  def generate_retro_art(
187
  self,
188
  input_image,
189
+ prompt=" ",
190
+ negative_prompt=" ",
191
  num_inference_steps=12,
192
+ guidance_scale=1.3,
193
+ depth_control_scale=0.75,
194
  identity_control_scale=0.85,
 
195
  lora_scale=1.0,
196
+ identity_preservation=1.2,
197
+ strength=0.50,
198
  enable_color_matching=False,
199
  consistency_mode=True,
200
  seed=-1
201
  ):
202
+ """Generate retro art with InstantID face preservation"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
 
204
+ try:
205
+ # Add trigger word
206
+ prompt = self.add_trigger_word(prompt)
207
+ prompt = sanitize_text(prompt)
208
+ negative_prompt = sanitize_text(negative_prompt)
 
 
 
 
 
 
 
 
 
209
 
210
+ print(f"[PROMPT] {prompt}")
 
 
 
 
 
 
 
 
 
 
211
 
212
+ # Calculate optimal size
213
+ orig_width, orig_height = safe_image_size(input_image)
214
+ optimal_width, optimal_height = calculate_optimal_size(orig_width, orig_height)
215
+
216
+ # Resize image
217
+ resized_image = input_image.resize((optimal_width, optimal_height), Image.LANCZOS)
218
+ print(f"[SIZE] Resized to {optimal_width}x{optimal_height}")
219
+
220
+ # Generate depth map
221
+ depth_image, depth_array = self.get_depth_map(resized_image)
222
+
223
+ # Detect faces
224
+ has_detected_faces = False
225
+ face_kps_image = None
226
+ face_embeddings = None
227
+ face_bbox_original = None
228
+
229
+ if self.face_detection_enabled and self.face_app is not None:
230
  try:
231
+ image_array = cv2.cvtColor(np.array(resized_image), cv2.COLOR_RGB2BGR)
232
+ faces = self.face_app.get(image_array)
233
 
234
  if len(faces) > 0:
 
235
  has_detected_faces = True
236
+ face = faces[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
+ # Get face embeddings (512D array)
239
+ face_embeddings = face.normed_embedding
 
 
 
240
 
241
  # Draw keypoints
242
+ from pipeline_stable_diffusion_xl_instantid_img2img import draw_kps
243
+ face_kps_image = draw_kps(resized_image, face.kps)
 
 
 
 
244
 
245
+ # Get bbox for color matching
246
+ face_bbox_original = face.bbox
247
 
248
+ # Adaptive parameter adjustment
249
+ adaptive_params = self.detect_face_quality(face)
250
+ if adaptive_params:
251
+ print(f"[ADAPTIVE] {adaptive_params['reason']}")
252
+ identity_preservation = adaptive_params.get('identity_preservation', identity_preservation)
253
+ identity_control_scale = adaptive_params.get('identity_control_scale', identity_control_scale)
254
+ guidance_scale = adaptive_params.get('guidance_scale', guidance_scale)
255
+ lora_scale = adaptive_params.get('lora_scale', lora_scale)
256
 
257
+ print(f"[FACE] Detected face with {face.det_score:.2f} confidence")
258
+ print(f"[FACE] Embeddings shape: {face_embeddings.shape}")
 
259
  else:
260
+ print("[FACE] No faces detected")
261
+
262
  except Exception as e:
263
+ print(f"[FACE] Detection failed: {e}")
 
 
 
264
 
265
+ # Set LORA scale
266
+ if hasattr(self.pipe, 'set_adapters') and self.models_loaded['lora']:
267
+ try:
268
+ self.pipe.set_adapters(["retroart"], adapter_weights=[lora_scale])
269
+ print(f"[LORA] Scale: {lora_scale}")
270
+ except Exception as e:
271
+ print(f"[LORA] Could not set scale: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
 
273
+ # Prepare generation kwargs
274
+ pipe_kwargs = {
275
+ "image": resized_image,
276
+ "strength": strength,
277
+ "num_inference_steps": num_inference_steps,
278
+ "guidance_scale": guidance_scale,
279
+ }
280
+
281
+ # Setup generator with seed
282
+ if seed == -1:
283
+ generator = torch.Generator(device=self.device)
284
+ actual_seed = generator.seed()
285
+ print(f"[SEED] Random: {actual_seed}")
286
+ else:
287
+ generator = torch.Generator(device=self.device).manual_seed(seed)
288
+ actual_seed = seed
289
+ print(f"[SEED] Fixed: {actual_seed}")
290
+
291
+ pipe_kwargs["generator"] = generator
 
 
 
292
 
293
+ # Use Compel for prompt encoding
294
+ if self.use_compel and self.compel is not None:
295
  try:
296
+ conditioning = self.compel(prompt)
297
+ negative_conditioning = self.compel(negative_prompt)
298
+
299
+ pipe_kwargs["prompt_embeds"] = conditioning[0]
300
+ pipe_kwargs["pooled_prompt_embeds"] = conditioning[1]
301
+ pipe_kwargs["negative_prompt_embeds"] = negative_conditioning[0]
302
+ pipe_kwargs["negative_pooled_prompt_embeds"] = negative_conditioning[1]
303
+
304
+ print("[OK] Using Compel-encoded prompts")
305
  except Exception as e:
306
+ print(f"[COMPEL] Failed, using standard prompts: {e}")
307
+ pipe_kwargs["prompt"] = prompt
308
+ pipe_kwargs["negative_prompt"] = negative_prompt
309
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  pipe_kwargs["prompt"] = prompt
311
  pipe_kwargs["negative_prompt"] = negative_prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
 
313
+ # Configure ControlNets + IP-Adapter (SIMPLIFIED!)
314
+ if has_detected_faces and face_kps_image is not None:
315
+ print("Using InstantID (keypoints + embeddings) + Depth ControlNets")
316
+
317
+ # Control images: [face keypoints, depth map]
318
+ pipe_kwargs["control_image"] = [face_kps_image, depth_image]
319
+
320
+ # Conditioning scales: [identity, depth]
321
+ pipe_kwargs["controlnet_conditioning_scale"] = [
322
+ identity_control_scale,
323
+ depth_control_scale
324
+ ]
325
+
326
+ # IP-Adapter face embeddings (SIMPLE - pipeline handles everything!)
327
+ if face_embeddings is not None:
328
+ print(f"Adding face embeddings for IP-Adapter...")
 
 
 
 
 
 
 
 
 
329
 
330
+ # Just pass the embeddings - pipeline does the rest!
331
+ pipe_kwargs["image_embeds"] = face_embeddings
332
+
333
+ # Control IP-Adapter strength
334
+ pipe_kwargs["ip_adapter_scale"] = identity_preservation
335
+
336
+ print(f" - Face embeddings shape: {face_embeddings.shape}")
337
+ print(f" - IP-Adapter scale: {identity_preservation}")
338
+ print(f" [OK] Face embeddings configured")
339
+ else:
340
+ print(" [WARNING] No face embeddings - using keypoints only")
341
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  else:
343
+ print("No faces detected - using Depth ControlNet only")
 
 
 
 
 
 
 
 
344
 
345
+ # Use depth for both ControlNet slots (identity scale = 0)
346
+ pipe_kwargs["control_image"] = [depth_image, depth_image]
347
+ pipe_kwargs["controlnet_conditioning_scale"] = [0.0, depth_control_scale]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
 
349
+ # Generate
350
+ print(f"Generating: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")
351
+ result = self.pipe(**pipe_kwargs)
352
+
353
+ generated_image = result.images[0]
354
+
355
+ # Post-processing: Color matching
356
+ if enable_color_matching and has_detected_faces:
357
+ print("Applying enhanced face-aware color matching...")
358
+ try:
359
+ if face_bbox_original is not None:
360
+ generated_image = enhanced_color_match(
361
+ generated_image,
362
+ resized_image,
363
+ face_bbox=face_bbox_original
364
+ )
365
+ print("[OK] Enhanced color matching applied")
366
+ else:
367
+ generated_image = color_match(generated_image, resized_image, mode='mkl')
368
+ print("[OK] Standard color matching applied")
369
+ except Exception as e:
370
+ print(f"[COLOR] Matching failed: {e}")
371
+ elif enable_color_matching:
372
+ print("Applying standard color matching...")
373
+ try:
374
  generated_image = color_match(generated_image, resized_image, mode='mkl')
375
  print("[OK] Standard color matching applied")
376
+ except Exception as e:
377
+ print(f"[COLOR] Matching failed: {e}")
378
+
379
+ return generated_image
 
 
 
 
 
380
 
381
+ finally:
382
+ # Memory cleanup
383
+ if torch.cuda.is_available():
384
+ torch.cuda.empty_cache()
385
+ gc.collect()
386
 
387
 
388
+ print("[OK] Generator class ready with InstantID support")
models.py CHANGED
@@ -1,32 +1,23 @@
1
  """
2
  Model loading and initialization for Pixagram AI Pixel Art Generator
3
- FIXED VERSION with proper IP-Adapter and BLIP-2 support
4
  """
5
  import torch
6
  import time
7
- import os
8
- import shutil
9
  from diffusers import (
10
- StableDiffusionXLControlNetImg2ImgPipeline,
11
  ControlNetModel,
12
  AutoencoderKL,
13
  LCMScheduler
14
  )
15
- from diffusers.models.attention_processor import AttnProcessor2_0
16
- from transformers import (
17
- CLIPVisionModelWithProjection, CLIPTokenizer,
18
- CLIPTextModel, CLIPTextModelWithProjection
19
- )
20
  from insightface.app import FaceAnalysis
21
- from controlnet_aux import ZoeDetector, LeresDetector, MidasDetector, MediapipeFaceDetector
22
- from huggingface_hub import hf_hub_download, snapshot_download
23
-
24
  from compel import Compel, ReturnedEmbeddingsType
25
- from cog_sdxl_dataset_and_utils import TokenEmbeddingsHandler
26
 
27
- # Use reference implementation's attention processor
28
- from attention_processor import IPAttnProcessor2_0, AttnProcessor
29
- from resampler import Resampler
 
30
 
31
  from config import (
32
  device, dtype, MODEL_REPO, MODEL_FILES, HUGGINGFACE_TOKEN,
@@ -34,24 +25,26 @@ from config import (
34
  )
35
 
36
 
37
- def download_model_with_retry(repo_id, filename, max_retries=None, **kwargs):
38
  """Download model with retry logic and proper token handling."""
39
  if max_retries is None:
40
  max_retries = DOWNLOAD_CONFIG['max_retries']
41
 
42
- # Ensure token is passed if available
43
- if HUGGINGFACE_TOKEN and "token" not in kwargs:
44
- kwargs["token"] = HUGGINGFACE_TOKEN
45
-
46
  for attempt in range(max_retries):
47
  try:
48
  print(f" Attempting to download {filename} (attempt {attempt + 1}/{max_retries})...")
49
 
50
- return hf_hub_download(
 
 
 
 
51
  repo_id=repo_id,
52
  filename=filename,
53
  **kwargs
54
  )
 
 
55
 
56
  except Exception as e:
57
  print(f" [WARNING] Download attempt {attempt + 1} failed: {e}")
@@ -67,372 +60,135 @@ def download_model_with_retry(repo_id, filename, max_retries=None, **kwargs):
67
 
68
 
69
  def load_face_analysis():
70
- """
71
- Load face analysis model with proper model downloading from HuggingFace.
72
- Downloads from DIAMONIK7777/antelopev2 which has the correct model structure.
73
- """
74
  print("Loading face analysis model...")
75
-
76
  try:
77
- antelope_download = snapshot_download(repo_id="DIAMONIK7777/antelopev2", local_dir="/data/models/antelopev2")
78
- # --- FIX: Load InsightFace on CPU to save VRAM ---
79
- face_app = FaceAnalysis(name='antelopev2', root='/data', providers=['CPUExecutionProvider'])
80
- face_app.prepare(ctx_id=0, det_size=(640, 640))
81
- print(" [OK] Face analysis loaded (on CPU)")
 
 
 
 
 
82
  return face_app, True
83
-
84
  except Exception as e:
85
- print(f" [ERROR] Face detection not available: {e}")
86
- import traceback
87
- traceback.print_exc()
88
  return None, False
89
-
 
90
  def load_depth_detector():
91
- """
92
- Load depth detector with fallback hierarchy: Leres → Zoe Midas.
93
- Returns (detector, detector_type, success).
94
- """
95
- print("Loading depth detector with fallback hierarchy...")
96
-
97
- # Try LeresDetector first (best quality)
98
- try:
99
- print(" Attempting LeresDetector (highest quality)...")
100
- # --- FIX: Load on CPU ---
101
- leres_depth = LeresDetector.from_pretrained("lllyasviel/Annotators")
102
- # leres_depth.to(device) # Removed
103
- print(" [OK] LeresDetector loaded successfully (on CPU)")
104
- return leres_depth, 'leres', True
105
- except Exception as e:
106
- print(f" [INFO] LeresDetector not available: {e}")
107
-
108
- # Fallback to ZoeDetector
109
  try:
110
- print(" Attempting ZoeDetector (fallback #1)...")
111
- # --- FIX: Load on CPU ---
112
  zoe_depth = ZoeDetector.from_pretrained("lllyasviel/Annotators")
113
- # zoe_depth.to(device) # Removed
114
- print(" [OK] ZoeDetector loaded successfully (on CPU)")
115
- return zoe_depth, 'zoe', True
116
  except Exception as e:
117
- print(f" [INFO] ZoeDetector not available: {e}")
118
-
119
- # Final fallback to MidasDetector
120
- try:
121
- print(" Attempting MidasDetector (fallback #2)...")
122
- # --- FIX: Load on CPU ---
123
- midas_depth = MidasDetector.from_pretrained("lllyasviel/Annotators")
124
- # midas_depth.to(device) # Removed
125
- print(" [OK] MidasDetector loaded successfully (on CPU)")
126
- return midas_depth, 'midas', True
127
- except Exception as e:
128
- print(f" [WARNING] MidasDetector not available: {e}")
129
-
130
- print(" [ERROR] No depth detector available")
131
- return None, None, False
132
-
133
- # --- NEW FUNCTION ---
134
- def load_mediapipe_face_detector():
135
- """Load MediapipeFaceDetector for advanced face detection."""
136
- print("Loading MediapipeFaceDetector...")
137
- try:
138
- face_detector = MediapipeFaceDetector()
139
- print(" [OK] MediapipeFaceDetector loaded successfully")
140
- return face_detector, True
141
- except Exception as e:
142
- print(f" [WARNING] MediapipeFaceDetector not available: {e}")
143
  return None, False
144
- # --- END NEW FUNCTION ---
145
 
146
  def load_controlnets():
147
- """Load ControlNet models."""
148
- print("Loading ControlNet Zoe Depth model...")
149
- # --- FIX: Load core models on GPU ---
 
 
 
 
 
 
 
 
 
 
150
  controlnet_depth = ControlNetModel.from_pretrained(
151
- "xinsir/controlnet-depth-sdxl-1.0",
152
  torch_dtype=dtype
153
  ).to(device)
154
- print(" [OK] ControlNet Depth loaded (on GPU)")
155
 
156
- print("Loading InstantID ControlNet...")
157
- try:
158
- # --- FIX: Load core models on GPU ---
159
- controlnet_instantid = ControlNetModel.from_pretrained(
160
- "InstantX/InstantID",
161
- subfolder="ControlNetModel",
162
- torch_dtype=dtype
163
- ).to(device)
164
- print(" [OK] InstantID ControlNet loaded successfully (on GPU)")
165
- # Return all three models
166
- return controlnet_depth, controlnet_instantid, True
167
- except Exception as e:
168
- print(f" [WARNING] InstantID ControlNet not available: {e}")
169
- # Return models, indicating InstantID failure
170
- return controlnet_depth, None, False
171
-
172
-
173
- def load_image_encoder():
174
- """Load CLIP Image Encoder for IP-Adapter."""
175
- print("Loading CLIP Image Encoder for IP-Adapter...")
176
- try:
177
- # --- FIX: Load core models on GPU ---
178
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(
179
- "h94/IP-Adapter",
180
- subfolder="models/image_encoder",
181
- torch_dtype=dtype
182
- ).to(device)
183
- print(" [OK] CLIP Image Encoder loaded successfully (on GPU)")
184
- return image_encoder
185
- except Exception as e:
186
- print(f" [ERROR] Could not load image encoder: {e}")
187
- return None
188
 
189
 
190
  def load_sdxl_pipeline(controlnets):
191
- """Load SDXL checkpoint from HuggingFace Hub."""
192
- print("Loading SDXL checkpoint (horizon) with bundled VAE from HuggingFace Hub...")
193
-
194
- # --- START FIX ---
195
- # Load tokenizers and text encoders from the base model first
196
- # This guarantees they exist, even if the single file doesn't have them
197
- print(" Loading base tokenizers and text encoders...")
198
- BASE_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
199
-
200
- try:
201
- tokenizer = CLIPTokenizer.from_pretrained(BASE_MODEL, subfolder="tokenizer")
202
- tokenizer_2 = CLIPTokenizer.from_pretrained(BASE_MODEL, subfolder="tokenizer_2")
203
-
204
- text_encoder = CLIPTextModel.from_pretrained(
205
- BASE_MODEL, subfolder="text_encoder", torch_dtype=dtype
206
- ).to(device)
207
-
208
- text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(
209
- BASE_MODEL, subfolder="text_encoder_2", torch_dtype=dtype
210
- ).to(device)
211
- print(" [OK] Base text/token models loaded")
212
-
213
- except Exception as e:
214
- print(f" [ERROR] Could not load base text models: {e}")
215
- print(" Pipeline will likely fail. Check HF connection/model access.")
216
- # Allow it to continue, but it will likely fail below
217
- tokenizer = None
218
- tokenizer_2 = None
219
- text_encoder = None
220
- text_encoder_2 = None
221
- # --- END FIX ---
222
-
223
  try:
224
- model_path = download_model_with_retry(MODEL_REPO, MODEL_FILES['checkpoint'], repo_type="model")
225
 
226
- # --- START FIX ---
227
- # Pass the pre-loaded models to from_single_file
228
- pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_single_file(
229
  model_path,
230
  controlnet=controlnets,
231
  torch_dtype=dtype,
232
- use_safetensors=True,
233
-
234
- # Explicitly provide the models
235
- tokenizer=tokenizer,
236
- tokenizer_2=tokenizer_2,
237
- text_encoder=text_encoder,
238
- text_encoder_2=text_encoder_2,
239
-
240
- ).to(device) # This main pipe MUST be on device
241
- # --- END FIX ---
242
-
243
- print(" [OK] Custom checkpoint loaded successfully (VAE bundled)")
 
244
  return pipe, True
245
 
246
  except Exception as e:
247
- print(f" [WARNING] Could not load custom checkpoint: {e}")
248
- print(" Using default SDXL base model")
 
249
 
250
- # The fallback logic is already correct
 
 
251
  pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
252
  "stabilityai/stable-diffusion-xl-base-1.0",
253
  controlnet=controlnets,
254
  torch_dtype=dtype,
255
  use_safetensors=True
256
- ).to(device) # This main pipe MUST be on device
257
  return pipe, False
258
 
259
- def load_loras(pipe):
260
- """Load all LORAs from HuggingFace Hub."""
261
- print("Loading all LORAs from HuggingFace Hub...")
262
- loaded_loras = {}
263
-
264
- lora_files = {
265
- "retroart": MODEL_FILES.get("lora_retroart"),
266
- "vga": MODEL_FILES.get("lora_vga"),
267
- "lucasart": MODEL_FILES.get("lora_lucasart")
268
- }
269
-
270
- for adapter_name, filename in lora_files.items():
271
- if not filename:
272
- print(f" [INFO] No file specified for LORA '{adapter_name}', skipping.")
273
- loaded_loras[adapter_name] = False
274
- continue
275
-
276
- try:
277
- lora_path = download_model_with_retry(MODEL_REPO, filename, repo_type="model")
278
- pipe.load_lora_weights(lora_path, adapter_name=adapter_name)
279
- print(f" [OK] LORA loaded successfully: {filename} as '{adapter_name}'")
280
- loaded_loras[adapter_name] = True
281
- except Exception as e:
282
- print(f" [WARNING] Could not load LORA {filename}: {e}")
283
- loaded_loras[adapter_name] = False
284
-
285
- success = any(loaded_loras.values())
286
- if not success:
287
- print(" [WARNING] No LORAs were loaded successfully.")
288
-
289
- return loaded_loras, success
290
-
291
 
292
- def setup_ip_adapter(pipe, image_encoder):
293
- """
294
- Setup IP-Adapter for InstantID face embeddings.
295
- This is CRITICAL for face preservation.
296
- """
297
- if image_encoder is None:
298
- return None, False
299
-
300
- print("Setting up IP-Adapter for InstantID face embeddings...")
301
  try:
302
- # Download InstantID weights
303
- ip_adapter_path = download_model_with_retry(
304
- "InstantX/InstantID",
305
- "ip-adapter.bin",
306
- repo_type="model"
307
- )
308
-
309
- # Load full state dict
310
- state_dict = torch.load(ip_adapter_path, map_location="cpu")
311
-
312
- # Extract image_proj and ip_adapter weights
313
- image_proj_state_dict = {}
314
- ip_adapter_state_dict = {}
315
-
316
- for key, value in state_dict.items():
317
- if key.startswith("image_proj."):
318
- image_proj_state_dict[key.replace("image_proj.", "")] = value
319
- elif key.startswith("ip_adapter."):
320
- ip_adapter_state_dict[key.replace("ip_adapter.", "")] = value
321
-
322
- # Create Resampler with CORRECT parameters
323
- print("Creating Resampler (Perceiver architecture)...")
324
- image_proj_model = Resampler(
325
- dim=1280,
326
- depth=4,
327
- dim_head=64,
328
- heads=20,
329
- num_queries=16,
330
- embedding_dim=512, # CRITICAL: Must match InsightFace embedding size
331
- output_dim=pipe.unet.config.cross_attention_dim,
332
- ff_mult=4
333
- )
334
-
335
- image_proj_model.eval()
336
- image_proj_model = image_proj_model.to(device, dtype=dtype)
337
-
338
- # Load image_proj weights
339
- if image_proj_state_dict:
340
- try:
341
- image_proj_model.load_state_dict(image_proj_state_dict, strict=True)
342
- print(" [OK] Resampler loaded with pretrained weights")
343
- except Exception as e:
344
- print(f" [WARNING] Could not load Resampler weights: {e}")
345
-
346
- # Setup IP-Adapter attention processors
347
- print("Setting up IP-Adapter attention processors...")
348
- attn_procs = {}
349
- num_tokens = 16
350
-
351
- for name in pipe.unet.attn_processors.keys():
352
- cross_attention_dim = None if name.endswith("attn1.processor") else pipe.unet.config.cross_attention_dim
353
-
354
- if name.startswith("mid_block"):
355
- hidden_size = pipe.unet.config.block_out_channels[-1]
356
- elif name.startswith("up_blocks"):
357
- block_id = int(name[len("up_blocks.")])
358
- hidden_size = list(reversed(pipe.unet.config.block_out_channels))[block_id]
359
- elif name.startswith("down_blocks"):
360
- block_id = int(name[len("down_blocks.")])
361
- hidden_size = pipe.unet.config.block_out_channels[block_id]
362
- else:
363
- hidden_size = pipe.unet.config.block_out_channels[-1]
364
-
365
- if cross_attention_dim is None:
366
- attn_procs[name] = AttnProcessor2_0()
367
- else:
368
- attn_procs[name] = IPAttnProcessor2_0(
369
- hidden_size=hidden_size,
370
- cross_attention_dim=cross_attention_dim,
371
- scale=1.0,
372
- num_tokens=num_tokens
373
- ).to(device, dtype=dtype)
374
-
375
- # Set attention processors
376
- pipe.unet.set_attn_processor(attn_procs)
377
-
378
- # Load IP-Adapter weights
379
- if ip_adapter_state_dict:
380
- try:
381
- ip_layers = torch.nn.ModuleList(pipe.unet.attn_processors.values())
382
- ip_layers.load_state_dict(ip_adapter_state_dict, strict=False)
383
- print(" [OK] IP-Adapter attention weights loaded")
384
- except Exception as e:
385
- print(f" [WARNING] Could not load IP-Adapter weights: {e}")
386
-
387
- # Store image encoder
388
- pipe.image_encoder = image_encoder
389
-
390
- print(" [OK] IP-Adapter fully loaded with InstantID architecture")
391
- print(f" - Resampler: 4 layers, 20 heads, 16 output tokens")
392
- print(f" - Face embeddings: 512D -> 16x{pipe.unet.config.cross_attention_dim}D")
393
-
394
- return image_proj_model, True
395
-
396
  except Exception as e:
397
- print(f" [ERROR] Could not setup IP-Adapter: {e}")
398
- import traceback
399
- traceback.print_exc()
400
- return None, False
401
 
402
 
403
- # --- START FIX: Remove premature token initialization ---
404
  def setup_compel(pipe):
405
- """Setup Compel for prompt encoding."""
406
- print("Setting up Compel (prompt encoder)...")
407
  try:
408
- # 1. Initialize the handler to modify tokenizers
409
- print(" Initializing TokenEmbeddingsHandler for special tokens...")
410
- handler = TokenEmbeddingsHandler(
411
- [pipe.text_encoder, pipe.text_encoder_2],
412
- [pipe.tokenizer, pipe.tokenizer_2]
413
- )
414
-
415
- handler.initialize_new_tokens(["<s0>", "<s1>"])
416
- print(" [OK] Special tokens <s0>, <s1> added to tokenizers.")
417
-
418
- # 3. Now, initialize Compel with the *unmodified* 77-token tokenizers
419
  compel = Compel(
420
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
421
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
422
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
423
  requires_pooled=[False, True]
424
  )
425
- print(" [OK] Compel loaded successfully.")
426
-
427
- # 4. Return both compel and the handler
428
- return compel, handler, True
429
-
430
  except Exception as e:
431
- print(f" [WARNING] Compel setup failed: {e}")
432
- import traceback
433
- traceback.print_exc()
434
- return None, None, False
435
- # --- END FIX ---
436
 
437
 
438
  def setup_scheduler(pipe):
@@ -444,10 +200,6 @@ def setup_scheduler(pipe):
444
 
445
  def optimize_pipeline(pipe):
446
  """Apply optimizations to pipeline."""
447
-
448
- # --- FIX: Removed enable_model_cpu_offload() ---
449
-
450
- # Try to enable xformers
451
  if device == "cuda":
452
  try:
453
  pipe.enable_xformers_memory_efficient_attention()
@@ -463,18 +215,17 @@ def load_caption_model():
463
  """
464
  print("Loading caption model...")
465
 
466
- # Try GIT-Large first (good balance of quality and compatibility)
467
  try:
468
  from transformers import AutoProcessor, AutoModelForCausalLM
469
 
470
  print(" Attempting GIT-Large (recommended)...")
471
  caption_processor = AutoProcessor.from_pretrained("microsoft/git-large-coco")
472
- # --- FIX: Load on CPU ---
473
  caption_model = AutoModelForCausalLM.from_pretrained(
474
  "microsoft/git-large-coco",
475
  torch_dtype=dtype
476
- ) # .to(device) removed
477
- print(" [OK] GIT-Large model loaded (produces detailed captions, on CPU)")
478
  return caption_processor, caption_model, True, 'git'
479
  except Exception as e1:
480
  print(f" [INFO] GIT-Large not available: {e1}")
@@ -485,16 +236,14 @@ def load_caption_model():
485
 
486
  print(" Attempting BLIP base (fallback)...")
487
  caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
488
- # --- FIX: Load on CPU ---
489
  caption_model = BlipForConditionalGeneration.from_pretrained(
490
  "Salesforce/blip-image-captioning-base",
491
  torch_dtype=dtype
492
- ) # .to(device) removed
493
- print(" [OK] BLIP base model loaded (standard captions, on CPU)")
494
  return caption_processor, caption_model, True, 'blip'
495
  except Exception as e2:
496
  print(f" [WARNING] Caption models not available: {e2}")
497
- print(" Caption generation will be disabled")
498
  return None, None, False, 'none'
499
 
500
 
@@ -504,4 +253,4 @@ def set_clip_skip(pipe):
504
  print(f" [OK] CLIP skip set to {CLIP_SKIP}")
505
 
506
 
507
- print("[OK] Model loading functions ready")
 
1
  """
2
  Model loading and initialization for Pixagram AI Pixel Art Generator
3
+ UPDATED VERSION with proper InstantID pipeline support
4
  """
5
  import torch
6
  import time
 
 
7
  from diffusers import (
 
8
  ControlNetModel,
9
  AutoencoderKL,
10
  LCMScheduler
11
  )
 
 
 
 
 
12
  from insightface.app import FaceAnalysis
13
+ from controlnet_aux import ZoeDetector
14
+ from huggingface_hub import hf_hub_download
 
15
  from compel import Compel, ReturnedEmbeddingsType
 
16
 
17
+ # Use InstantID pipeline
18
+ from pipeline_stable_diffusion_xl_instantid_img2img import (
19
+ StableDiffusionXLInstantIDImg2ImgPipeline
20
+ )
21
 
22
  from config import (
23
  device, dtype, MODEL_REPO, MODEL_FILES, HUGGINGFACE_TOKEN,
 
25
  )
26
 
27
 
28
+ def download_model_with_retry(repo_id, filename, max_retries=None):
29
  """Download model with retry logic and proper token handling."""
30
  if max_retries is None:
31
  max_retries = DOWNLOAD_CONFIG['max_retries']
32
 
 
 
 
 
33
  for attempt in range(max_retries):
34
  try:
35
  print(f" Attempting to download {filename} (attempt {attempt + 1}/{max_retries})...")
36
 
37
+ kwargs = {"repo_type": "model"}
38
+ if HUGGINGFACE_TOKEN:
39
+ kwargs["token"] = HUGGINGFACE_TOKEN
40
+
41
+ path = hf_hub_download(
42
  repo_id=repo_id,
43
  filename=filename,
44
  **kwargs
45
  )
46
+ print(f" [OK] Downloaded: {filename}")
47
+ return path
48
 
49
  except Exception as e:
50
  print(f" [WARNING] Download attempt {attempt + 1} failed: {e}")
 
60
 
61
 
62
  def load_face_analysis():
63
+ """Load face analysis model with proper error handling."""
 
 
 
64
  print("Loading face analysis model...")
 
65
  try:
66
+ face_app = FaceAnalysis(
67
+ name=FACE_DETECTION_CONFIG['model_name'],
68
+ root='./models/insightface',
69
+ providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
70
+ )
71
+ face_app.prepare(
72
+ ctx_id=FACE_DETECTION_CONFIG['ctx_id'],
73
+ det_size=FACE_DETECTION_CONFIG['det_size']
74
+ )
75
+ print(" [OK] Face analysis model loaded successfully")
76
  return face_app, True
 
77
  except Exception as e:
78
+ print(f" [WARNING] Face detection not available: {e}")
 
 
79
  return None, False
80
+
81
+
82
  def load_depth_detector():
83
+ """Load Zoe Depth detector."""
84
+ print("Loading Zoe Depth detector...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  try:
 
 
86
  zoe_depth = ZoeDetector.from_pretrained("lllyasviel/Annotators")
87
+ zoe_depth.to(device)
88
+ print(" [OK] Zoe Depth loaded successfully")
89
+ return zoe_depth, True
90
  except Exception as e:
91
+ print(f" [WARNING] Zoe Depth not available: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  return None, False
93
+
94
 
95
  def load_controlnets():
96
+ """
97
+ Load ControlNets for InstantID pipeline.
98
+ Returns both ControlNets (InstantID first, then Depth).
99
+ """
100
+ print("Loading InstantID ControlNet...")
101
+ controlnet_instantid = ControlNetModel.from_pretrained(
102
+ "InstantX/InstantID",
103
+ subfolder="ControlNetModel",
104
+ torch_dtype=dtype
105
+ ).to(device)
106
+ print(" [OK] InstantID ControlNet loaded")
107
+
108
+ print("Loading Zoe Depth ControlNet...")
109
  controlnet_depth = ControlNetModel.from_pretrained(
110
+ "diffusers/controlnet-zoe-depth-sdxl-1.0",
111
  torch_dtype=dtype
112
  ).to(device)
113
+ print(" [OK] Zoe Depth ControlNet loaded")
114
 
115
+ return controlnet_instantid, controlnet_depth
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
 
118
  def load_sdxl_pipeline(controlnets):
119
+ """
120
+ Load SDXL pipeline with InstantID support.
121
+ controlnets MUST be a list: [identitynet, depthnet]
122
+ """
123
+ print("Loading SDXL checkpoint with InstantID pipeline...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  try:
125
+ model_path = download_model_with_retry(MODEL_REPO, MODEL_FILES['checkpoint'])
126
 
127
+ # Use InstantID-enabled pipeline
128
+ pipe = StableDiffusionXLInstantIDImg2ImgPipeline.from_single_file(
 
129
  model_path,
130
  controlnet=controlnets,
131
  torch_dtype=dtype,
132
+ use_safetensors=True
133
+ ).to(device)
134
+
135
+ # Load IP-Adapter weights for InstantID
136
+ print("Loading IP-Adapter for InstantID...")
137
+ ip_adapter_path = download_model_with_retry(
138
+ "InstantX/InstantID",
139
+ "ip-adapter.bin"
140
+ )
141
+ pipe.load_ip_adapter_instantid(ip_adapter_path)
142
+ pipe.set_ip_adapter_scale(0.8) # Default scale
143
+
144
+ print(" [OK] InstantID pipeline loaded successfully")
145
  return pipe, True
146
 
147
  except Exception as e:
148
+ print(f" [ERROR] Could not load InstantID pipeline: {e}")
149
+ import traceback
150
+ traceback.print_exc()
151
 
152
+ # Fallback to standard pipeline
153
+ print(" Falling back to standard SDXL pipeline (no InstantID)")
154
+ from diffusers import StableDiffusionXLControlNetImg2ImgPipeline
155
  pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
156
  "stabilityai/stable-diffusion-xl-base-1.0",
157
  controlnet=controlnets,
158
  torch_dtype=dtype,
159
  use_safetensors=True
160
+ ).to(device)
161
  return pipe, False
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
+ def load_lora(pipe):
165
+ """Load LORA from HuggingFace Hub."""
166
+ print("Loading LORA (retroart) from HuggingFace Hub...")
 
 
 
 
 
 
167
  try:
168
+ lora_path = download_model_with_retry(MODEL_REPO, MODEL_FILES['lora'])
169
+ pipe.load_lora_weights(lora_path, adapter_name="retroart")
170
+ print(f" [OK] LORA loaded successfully")
171
+ return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  except Exception as e:
173
+ print(f" [WARNING] Could not load LORA: {e}")
174
+ return False
 
 
175
 
176
 
 
177
  def setup_compel(pipe):
178
+ """Setup Compel for better SDXL prompt handling."""
179
+ print("Setting up Compel for enhanced prompt processing...")
180
  try:
 
 
 
 
 
 
 
 
 
 
 
181
  compel = Compel(
182
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
183
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
184
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
185
  requires_pooled=[False, True]
186
  )
187
+ print(" [OK] Compel loaded successfully")
188
+ return compel, True
 
 
 
189
  except Exception as e:
190
+ print(f" [WARNING] Compel not available: {e}")
191
+ return None, False
 
 
 
192
 
193
 
194
  def setup_scheduler(pipe):
 
200
 
201
  def optimize_pipeline(pipe):
202
  """Apply optimizations to pipeline."""
 
 
 
 
203
  if device == "cuda":
204
  try:
205
  pipe.enable_xformers_memory_efficient_attention()
 
215
  """
216
  print("Loading caption model...")
217
 
218
+ # Try GIT-Large first
219
  try:
220
  from transformers import AutoProcessor, AutoModelForCausalLM
221
 
222
  print(" Attempting GIT-Large (recommended)...")
223
  caption_processor = AutoProcessor.from_pretrained("microsoft/git-large-coco")
 
224
  caption_model = AutoModelForCausalLM.from_pretrained(
225
  "microsoft/git-large-coco",
226
  torch_dtype=dtype
227
+ ).to(device)
228
+ print(" [OK] GIT-Large model loaded")
229
  return caption_processor, caption_model, True, 'git'
230
  except Exception as e1:
231
  print(f" [INFO] GIT-Large not available: {e1}")
 
236
 
237
  print(" Attempting BLIP base (fallback)...")
238
  caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
 
239
  caption_model = BlipForConditionalGeneration.from_pretrained(
240
  "Salesforce/blip-image-captioning-base",
241
  torch_dtype=dtype
242
+ ).to(device)
243
+ print(" [OK] BLIP base model loaded")
244
  return caption_processor, caption_model, True, 'blip'
245
  except Exception as e2:
246
  print(f" [WARNING] Caption models not available: {e2}")
 
247
  return None, None, False, 'none'
248
 
249
 
 
253
  print(f" [OK] CLIP skip set to {CLIP_SKIP}")
254
 
255
 
256
+ print("[OK] Model loading functions ready")