00Boobs00 commited on
Commit
2d4cae8
·
verified ·
1 Parent(s): 0a92f49

Update app.py from anycoder

Browse files
Files changed (1) hide show
  1. app.py +1 -568
app.py CHANGED
@@ -1,568 +1 @@
1
- import gradio as gr
2
- import torch
3
- import gc
4
- import os
5
- import sys
6
- import time
7
- import tempfile
8
- import traceback
9
- from pathlib import Path
10
- from typing import Optional, Generator
11
- from PIL import Image
12
- import logging
13
-
14
- logging.basicConfig(level=logging.INFO, format='%(asctime)s | %(levelname)s | %(message)s')
15
- logger = logging.getLogger(__name__)
16
-
17
- # Environment - Electric Jungle Setup
18
- if Path("/data").exists():
19
- os.environ["HF_HOME"] = "/data/.huggingface"
20
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
21
-
22
- HF_TOKEN = os.getenv("HF_TOKEN")
23
- LORA_REPO = "Playtime-AI/Wan2.2-Loras"
24
- LORA_CACHE = Path(os.environ.get("HF_HOME", ".")) / "loras"
25
- LORA_CACHE.mkdir(parents=True, exist_ok=True)
26
-
27
- # ZeroGPU Detection
28
- try:
29
- import spaces
30
- ZEROGPU = True
31
- except ImportError:
32
- ZEROGPU = False
33
-
34
- # Global State
35
- lora_cache: dict = {}
36
- selected_lora: Optional[str] = None
37
- MODEL_ID = "Wan-AI/Wan2.2-TI2V-5B-Diffusers"
38
-
39
- # === ELECTRIC PROGRESS TRACKER ===
40
- class ElectricProgress:
41
- """Vibrant progress tracking with visual flair"""
42
- STAGES = [
43
- ("init", "🔥 Igniting Core", 5),
44
- ("download_check", "⚡ Scanning Cache", 5),
45
- ("download", "🌩️ Downloading Model", 40),
46
- ("load_pipeline", "🚀 Pipeline Activation", 20),
47
- ("load_lora", "🎨 Style Fusion", 5),
48
- ("prepare", "✨ Frame Prep", 5),
49
- ("generate", "🎬 Creative Explosion", 15),
50
- ("export", "🎥 Video Manifest", 5),
51
- ]
52
-
53
- def __init__(self):
54
- self.current_stage = 0
55
- self.stage_progress = 0
56
- self.start_time = time.time()
57
- self.stage_start_time = time.time()
58
- self.messages = []
59
-
60
- def _get_stage_weight(self, stage_name: str) -> int:
61
- for name, _, weight in self.STAGES:
62
- if name == stage_name: return weight
63
- return 10
64
-
65
- def _get_cumulative_progress(self) -> int:
66
- total = 0
67
- for i, (name, _, weight) in enumerate(self.STAGES):
68
- if i < self.current_stage:
69
- total += weight
70
- elif i == self.current_stage:
71
- total += int(weight * self.stage_progress / 100)
72
- break
73
- return min(total, 100)
74
-
75
- def start_stage(self, stage_name: str, message: str = ""):
76
- for i, (name, label, _) in enumerate(self.STAGES):
77
- if name == stage_name:
78
- self.current_stage = i
79
- self.stage_progress = 0
80
- self.stage_start_time = time.time()
81
- self.messages.append(f"⚡ {label}" + (f": {message}" if message else ""))
82
- break
83
-
84
- def update_stage(self, progress: int, detail: str = ""):
85
- self.stage_progress = min(progress, 100)
86
- if detail and self.messages:
87
- stage_label = self.STAGES[self.current_stage][1]
88
- self.messages[-1] = f"⚡ {stage_label}: {detail}"
89
-
90
- def complete_stage(self, message: str = ""):
91
- self.stage_progress = 100
92
- elapsed = time.time() - self.stage_start_time
93
- stage_label = self.STAGES[self.current_stage][1]
94
- self.messages[-1] = f"✅ {stage_label} ({elapsed:.1f}s)" + (f" - {message}" if message else "")
95
-
96
- def error(self, message: str):
97
- self.messages.append(f"💥 {message}")
98
-
99
- def format_display(self) -> str:
100
- overall = self._get_cumulative_progress()
101
- elapsed = time.time() - self.start_time
102
-
103
- if overall > 5:
104
- estimated_total = elapsed / (overall / 100)
105
- remaining = max(0, estimated_total - elapsed)
106
- time_str = f"⏱️ ~{int(remaining)}s remaining"
107
- else:
108
- time_str = "⏳ calculating..."
109
-
110
- bar_width = 40
111
- filled = int(bar_width * overall / 100)
112
- bar = "🟩" * filled + "⬜" * (bar_width - filled)
113
-
114
- lines = [
115
- f"# 🎨 Progress: {overall}%",
116
- f"```{bar}``` {time_str}",
117
- "",
118
- "**⚡ Creative Pipeline:**",
119
- ]
120
- lines.extend(self.messages[-8:])
121
- return "\n".join(lines)
122
-
123
- # === VALIDATION ===
124
- def validate_image(image) -> tuple[bool, str, Optional[Image.Image]]:
125
- if image is None:
126
- return False, "🎨 **Upload your creative spark!**\n\nDrop an image to ignite the jungle.", None
127
-
128
- try:
129
- if isinstance(image, str):
130
- if not os.path.exists(image):
131
- return False, "🔍 **Image not found**\n\nTry uploading again.", None
132
- img = Image.open(image)
133
- elif isinstance(image, Image.Image):
134
- img = image
135
- else:
136
- return False, f"❌ **Invalid image type**: {type(image).__name__}", None
137
-
138
- if img.mode not in ('RGB', 'RGBA', 'L'):
139
- img = img.convert('RGB')
140
-
141
- width, height = img.size
142
- if width < 64 or height < 64:
143
- return False, f"📏 **Image too small**: {width}x{height}\n\nMinimum: 64x64", None
144
- if width > 4096 or height > 4096:
145
- return False, f"📏 **Image too large**: {width}x{height}\n\nMax: 4096x4096", None
146
-
147
- return True, "✅ **Canvas ready!**", img.convert('RGB')
148
- except Exception as e:
149
- return False, f"💥 **Image processing failed**: {str(e)}", None
150
-
151
- def validate_prompt(prompt: str) -> tuple[bool, str]:
152
- if not prompt or not prompt.strip():
153
- return False, "✍️ **Describe the motion!**\n\n'exhale gently', 'hair flows in wind', 'subtle smile'"
154
- if len(prompt) > 1000:
155
- return False, f"📜 **Prompt too long**: {len(prompt)} chars\n\nMax 1000 chars"
156
- return True, "✅ **Vision captured!**"
157
-
158
- # === CORE CREATION ENGINE ===
159
- def generate_with_progress(image, prompt, frames, height, width, guidance, steps, lora_scale, seed) -> Generator:
160
- progress = ElectricProgress()
161
-
162
- try:
163
- # Stage 1: Ignition
164
- progress.start_stage("init", "Validating creative inputs")
165
- yield None, progress.format_display()
166
-
167
- valid, msg, processed_image = validate_image(image)
168
- if not valid:
169
- progress.error(msg)
170
- yield None, progress.format_display()
171
- return
172
-
173
- valid, msg = validate_prompt(prompt)
174
- if not valid:
175
- progress.error(msg)
176
- yield None, progress.format_display()
177
- return
178
-
179
- progress.complete_stage("Vision locked!")
180
- yield None, progress.format_display()
181
-
182
- # Stage 2: Model Scan
183
- progress.start_stage("download_check", "Scanning neural cache")
184
- yield None, progress.format_display()
185
-
186
- try:
187
- from huggingface_hub import try_to_load_from_cache
188
- cached = try_to_load_from_cache(MODEL_ID, "model_index.json")
189
- model_cached = cached is not None
190
- progress.complete_stage("Neural pathways active" if model_cached else "Fetching cosmic data")
191
- except:
192
- model_cached = False
193
- progress.complete_stage("Cache scan complete")
194
- yield None, progress.format_display()
195
-
196
- # Stage 3: Model Download (if needed)
197
- if not model_cached:
198
- progress.start_stage("download", "Channeling 20GB neural network")
199
- yield None, progress.format_display()
200
- progress.update_stage(25, "Syncing cosmic intelligence...")
201
- yield None, progress.format_display()
202
- time.sleep(0.8)
203
- progress.update_stage(75, "Neural weights downloading...")
204
- yield None, progress.format_display()
205
- time.sleep(0.8)
206
- else:
207
- progress.start_stage("download", "Using cached neural pathways")
208
- progress.complete_stage("Instant activation")
209
- yield None, progress.format_display()
210
-
211
- # Stage 4: Pipeline Activation
212
- progress.start_stage("load_pipeline", "Awakening AI pipeline")
213
- yield None, progress.format_display()
214
-
215
- try:
216
- from diffusers import WanImageToVideoPipeline
217
- progress.update_stage(25, "Initializing creative core...")
218
- yield None, progress.format_display()
219
-
220
- pipe = WanImageToVideoPipeline.from_pretrained(
221
- MODEL_ID,
222
- torch_dtype=torch.bfloat16,
223
- use_safetensors=True,
224
- token=HF_TOKEN,
225
- )
226
-
227
- progress.update_stage(60, "Electrifying GPU pathways...")
228
- yield None, progress.format_display()
229
-
230
- if torch.cuda.is_available():
231
- pipe.enable_model_cpu_offload()
232
- if hasattr(pipe, 'vae'):
233
- pipe.vae.enable_slicing()
234
- pipe.vae.enable_tiling()
235
- progress.update_stage(90, "GPU symphony engaged")
236
- else:
237
- pipe = pipe.to("cpu")
238
- progress.update_stage(90, "CPU mode activated")
239
-
240
- progress.complete_stage()
241
- yield None, progress.format_display()
242
-
243
- except Exception as e:
244
- progress.error(f"Pipeline failed: {str(e)[:100]}")
245
- yield None, progress.format_display()
246
- return
247
-
248
- # Stage 5: Style Fusion (LoRA)
249
- progress.start_stage("load_lora")
250
- yield None, progress.format_display()
251
-
252
- if selected_lora and selected_lora in lora_cache:
253
- try:
254
- from huggingface_hub import hf_hub_download
255
- progress.update_stage(30, f"Fusing {selected_lora} style...")
256
- yield None, progress.format_display()
257
-
258
- filename = lora_cache[selected_lora]
259
- local_path = LORA_CACHE / Path(filename).name
260
-
261
- if not local_path.exists():
262
- progress.update_stage(60, "Downloading style weights...")
263
- yield None, progress.format_display()
264
- hf_hub_download(
265
- repo_id=LORA_REPO,
266
- filename=filename,
267
- local_dir=str(LORA_CACHE),
268
- token=HF_TOKEN
269
- )
270
-
271
- progress.update_stage(90, "Style infusion complete")
272
- pipe.load_lora_weights(str(local_path.parent), weight_name=local_path.name)
273
- progress.complete_stage(f"{selected_lora} activated")
274
- except Exception as e:
275
- progress.error(f"LoRA failed: {str(e)[:50]}")
276
- else:
277
- progress.complete_stage("Pure generation mode")
278
- yield None, progress.format_display()
279
-
280
- # Stage 6: Creative Prep
281
- progress.start_stage("prepare", "Framing the vision")
282
- yield None, progress.format_display()
283
-
284
- w = (int(width) // 8) * 8
285
- h = (int(height) // 8) * 8
286
- img = processed_image.resize((w, h), Image.Resampling.LANCZOS)
287
- progress.update_stage(50, f"Canvas: {w}x{h}")
288
- yield None, progress.format_display()
289
-
290
- gen = None
291
- if seed >= 0:
292
- device = "cuda" if torch.cuda.is_available() else "cpu"
293
- gen = torch.Generator(device=device).manual_seed(int(seed))
294
- progress.update_stage(80, f"Creative seed: {int(seed)}")
295
- else:
296
- progress.update_stage(80, "Chaos seed activated")
297
- yield None, progress.format_display()
298
- progress.complete_stage()
299
- yield None, progress.format_display()
300
-
301
- # Stage 7: Creative Explosion
302
- progress.start_stage("generate", f"Exploding {int(frames)} frames")
303
- yield None, progress.format_display()
304
-
305
- kwargs = {
306
- "prompt": prompt,
307
- "image": img,
308
- "height": h,
309
- "width": w,
310
- "num_frames": int(frames),
311
- "guidance_scale": float(guidance),
312
- "num_inference_steps": int(steps),
313
- "generator": gen,
314
- }
315
-
316
- if selected_lora:
317
- kwargs["cross_attention_kwargs"] = {"scale": float(lora_scale)}
318
-
319
- def progress_callback(pipe, step, timestep, callback_kwargs):
320
- pct = int((step / int(steps)) * 100)
321
- progress.update_stage(pct, f"Frame {step}/{int(steps)}")
322
- return callback_kwargs
323
-
324
- kwargs["callback_on_step_end"] = progress_callback
325
- progress.update_stage(5, "Initiating creative burst...")
326
- yield None, progress.format_display()
327
-
328
- output = pipe(**kwargs)
329
- frames_out = output.frames[0]
330
- progress.complete_stage(f"{len(frames_out)} frames materialized")
331
- yield None, progress.format_display()
332
-
333
- # Stage 8: Video Manifest
334
- progress.start_stage("export", "Manifesting video masterpiece")
335
- yield None, progress.format_display()
336
-
337
- from diffusers.utils import export_to_video
338
- fps = min(8 + (frames // 10), 16)
339
- progress.update_stage(50, f"Encoding symphony @ {fps}fps")
340
- yield None, progress.format_display()
341
-
342
- with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as f:
343
- export_to_video(frames_out, f.name, fps=fps)
344
- video_path = f.name
345
-
346
- progress.complete_stage()
347
-
348
- # Cleanup & Victory
349
- del pipe
350
- gc.collect()
351
- if torch.cuda.is_available():
352
- torch.cuda.empty_cache()
353
-
354
- total_time = time.time() - progress.start_time
355
- final_status = f"""# 🎉 Masterpiece Complete!
356
-
357
- **{len(frames_out)} frames** @ **{fps}fps** = **{len(frames_out)/fps:.1f}s** of pure creativity
358
-
359
- **Canvas:** {w}x{h} | **Time:** {total_time:.1f}s | **Style:** {selected_lora or 'Pure'}
360
-
361
- **Your electric jungle creation is ready!** ⚡🎬✨
362
- """
363
- yield video_path, final_status
364
-
365
- except torch.cuda.OutOfMemoryError:
366
- error_msg = """# 💥 GPU Overload!
367
-
368
- **The jungle got too electric!**
369
-
370
- **Quick fixes:**
371
- - Canvas: 256x384
372
- - Frames: 17
373
- - Refresh & retry
374
-
375
- *Heavy creativity needs space to breathe* 🌩️"""
376
- yield None, error_msg
377
-
378
- except Exception as e:
379
- tb = traceback.format_exc()
380
- logger.error(f"Creation failed: {e}")
381
- error_msg = f"""# 💥 Creation Interrupted
382
-
383
- **Error:** {type(e).__name__}: {str(e)[:200]}
384
-
385
- **Reset the jungle:**
386
- 1. New image
387
- 2. Simple prompt
388
- 3. Lower settings
389
- 4. Refresh page
390
-
391
- ```
392
- {tb[-800:]}
393
- ```"""
394
- yield None, error_msg
395
-
396
- # === CREATIVE WRAPPERS ===
397
- if ZEROGPU:
398
- @spaces.GPU(duration=300)
399
- def generate_video(image, prompt, frames, height, width, guidance, steps, lora_scale, seed):
400
- result = None
401
- status = ""
402
- for video, msg in generate_with_progress(image, prompt, frames, height, width, guidance, steps, lora_scale, seed):
403
- result = video
404
- status = msg
405
- return result, status
406
- else:
407
- def generate_video(image, prompt, frames, height, width, guidance, steps, lora_scale, seed):
408
- result = None
409
- status = ""
410
- for video, msg in generate_with_progress(image, prompt, frames, height, width, guidance, steps, lora_scale, seed):
411
- result = video
412
- status = msg
413
- return result, status
414
-
415
- # === ELECTRIC UI FUNCTIONS ===
416
- def get_status() -> str:
417
- gpu = "⚡ ZeroGPU" if ZEROGPU else ("🔥 CUDA" if torch.cuda.is_available() else "💻 CPU")
418
- lora = selected_lora or "Pure Creation"
419
- return f"**{gpu} | Style: {lora}**"
420
-
421
- def refresh_loras():
422
- global lora_cache
423
- try:
424
- from huggingface_hub import list_repo_files
425
- files = list_repo_files(LORA_REPO, token=HF_TOKEN)
426
- lora_cache = {Path(f).stem: f for f in files if f.endswith('.safetensors')}
427
- names = sorted(lora_cache.keys())
428
- return gr.Dropdown(choices=names, value=names[0] if names else None), f"✅ **{len(names)} styles** ready to ignite!"
429
- except Exception as e:
430
- return gr.Dropdown(choices=[], value=None), f"⚠️ **Style scan failed**: {str(e)[:80]}"
431
-
432
- def select_lora(name: str) -> str:
433
- global selected_lora
434
- if name and name in lora_cache:
435
- selected_lora = name
436
- return f"🎨 **{name}** electrified!\n\n*Style will pulse through your creation*"
437
- selected_lora = None
438
- return "🌿 **Pure creation mode** - raw AI power"
439
-
440
- # === GRAND ELECTRIC INTERFACE ===
441
- css = """
442
- .electric-jungle {
443
- background: linear-gradient(135deg, #0f0f23 0%, #1a0033 50%, #0f0f23 100%);
444
- font-family: 'Segoe UI', -apple-system, sans-serif;
445
- color: #e0e0ff;
446
- }
447
- .progress-box {
448
- font-family: 'Fira Code', monospace;
449
- background: linear-gradient(145deg, #1a1a3e, #16213e);
450
- padding: 20px;
451
- border-radius: 12px;
452
- border: 1px solid #00d4ff;
453
- box-shadow: 0 8px 32px rgba(0, 212, 255, 0.1);
454
- }
455
- .btn-electric {
456
- background: linear-gradient(45deg, #ff00aa, #00d4ff, #ffaa00);
457
- border: none;
458
- border-radius: 12px;
459
- font-weight: bold;
460
- transition: all 0.3s ease;
461
- }
462
- .btn-electric:hover {
463
- transform: translateY(-2px);
464
- box-shadow: 0 12px 40px rgba(255, 0, 170, 0.4);
465
- }
466
- """
467
-
468
- with gr.Blocks(title="Electric Jungle - Wan2.2 Creator") as demo:
469
-
470
- gr.Markdown("""
471
- # ⚡ **Electric Jungle Creator**
472
- ### *Wan2.2 LoRA Video Generator*
473
-
474
- **Transform static images into pulsing video art**
475
- *First run: 3-5min download | Cached: 45-90s magic*
476
-
477
- <div style="background: linear-gradient(90deg, #ff00aa, #00d4ff); height: 4px; border-radius: 2px; margin: 20px 0;"></div>
478
- """)
479
-
480
- status = gr.Markdown(get_status())
481
-
482
- with gr.Tabs():
483
- with gr.TabItem("🎬 Create"):
484
- with gr.Row():
485
- with gr.Column(scale=1):
486
- img_in = gr.Image(type="pil", label="🌿 Your Creative Spark", height=320)
487
- prompt_in = gr.Textbox(
488
- label="✨ Motion Vision",
489
- lines=3,
490
- placeholder="gentle breathing, wind in hair, subtle pulsing light, cosmic dance...",
491
- max_lines=5
492
- )
493
-
494
- with gr.Accordion("⚙️ Creative Controls", open=False):
495
- gr.Markdown("*Lower = faster lightning*")
496
- with gr.Row():
497
- frames_sl = gr.Slider(17, 49, 25, step=8, label="⚡ Frames")
498
- h_sl = gr.Slider(256, 480, 384, step=32, label="📐 Height")
499
- w_sl = gr.Slider(256, 640, 512, step=32, label="📐 Width")
500
- with gr.Row():
501
- cfg_sl = gr.Slider(4, 12, 7.0, step=0.5, label="🎯 Vision Lock")
502
- steps_sl = gr.Slider(15, 30, 22, step=2, label="🔮 Quality")
503
- lora_sl = gr.Slider(0, 1.5, 0.85, step=0.05, label="🎨 Style Pulse")
504
- seed_in = gr.Number(1337, label="🌌 Seed", info="-1 = cosmic chaos")
505
-
506
- gen_btn = gr.Button("🚀 IGNITE CREATION", variant="primary", size="lg")
507
-
508
- with gr.Column(scale=1):
509
- gen_status = gr.Markdown("*Drop your spark and ignite*", elem_classes=["progress-box"])
510
- vid_out = gr.Video(label="🎥 Your Electric Masterpiece", height=380)
511
-
512
- with gr.TabItem("🎨 Styles"):
513
- gr.Markdown("""
514
- ## 🎨 **Style Fusion Chamber**
515
-
516
- *Select artistic styles that pulse through your creations*
517
- """)
518
- lora_dd = gr.Dropdown(choices=[], label="⚡ Available Styles")
519
- with gr.Row():
520
- refresh_btn = gr.Button("🔄 Scan Styles")
521
- select_btn = gr.Button("✅ Fuse Style", variant="primary")
522
- lora_status = gr.Markdown("🌿 Ready for pure creation...")
523
-
524
- with gr.TabItem("🧠 Neural Codex"):
525
- gr.Markdown("""
526
- ## ⚡ **Electric Jungle Codex**
527
-
528
- ### **How to Ignite**
529
- 1. **Drop Image** - Portrait, scene, abstract art
530
- 2. **Vision Prompt** - "gentle sway", "cosmic pulse", "hair flows wildly"
531
- 3. **⚙️ Tweak** - Lower settings = faster lightning
532
- 4. **IGNITE** - Watch magic unfold
533
-
534
- ### **Pro Tips**
535
- - **Fast**: 320x512, 17 frames, 18 steps
536
- - **Epic**: 512x768, 33 frames, 25 steps
537
- - **Styles**: Amplify with LoRA fusion
538
-
539
- ### **Tech Stack**
540
- | Component | Power |
541
- |-----------|-------|
542
- | Wan2.2-TI2V | 5B params |
543
- | ZeroGPU | H200 cluster |
544
- | LoRA Styles | Infinite |
545
- | Output | 1080p@16fps |
546
-
547
- *Creativity has no limits in the Electric Jungle* 🌩️✨
548
- """)
549
-
550
- # Event Symphony
551
- refresh_btn.click(refresh_loras, outputs=[lora_dd, lora_status])
552
- select_btn.click(select_lora, [lora_dd], [lora_status]).then(get_status, outputs=[status])
553
- gen_btn.click(
554
- generate_video,
555
- [img_in, prompt_in, frames_sl, h_sl, w_sl, cfg_sl, steps_sl, lora_sl, seed_in],
556
- [vid_out, gen_status]
557
- )
558
-
559
- demo.load(refresh_loras, outputs=[lora_dd, lora_status])
560
-
561
- if __name__ == "__main__":
562
- demo.queue(max_size=3).launch(
563
- server_name="0.0.0.0",
564
- server_port=7860,
565
- show_error=True,
566
- share=False,
567
- css=css
568
- )
 
1
+ \n{error_message}\n