K1Z3M1112 commited on
Commit
ff79d24
·
verified ·
1 Parent(s): 53e6ac1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -478
app.py CHANGED
@@ -26,8 +26,6 @@ print(f"🖥️ Device: {device} | dtype: {dtype}")
26
 
27
  # Lazy import (to avoid long startup if unused)
28
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, StableDiffusionPipeline
29
- from diffusers import StableDiffusionInstructPix2PixPipeline, AutoPipelineForImage2Image
30
- from diffusers import FluxPipeline, FluxImg2ImgPipeline
31
  from controlnet_aux import LineartDetector, LineartAnimeDetector
32
 
33
  # Memory optimization
@@ -40,16 +38,13 @@ else:
40
  print("⚠️ Running on CPU - Image generation will be significantly slower")
41
 
42
  # ===== Model & Config =====
 
43
  CURRENT_CONTROLNET_PIPE = None
44
  CURRENT_CONTROLNET_KEY = None # (model_name, is_anime)
45
  LINEART_DETECTOR = None
46
  LINEART_ANIME_DETECTOR = None
47
  CURRENT_T2I_PIPE = None
48
  CURRENT_T2I_MODEL = None
49
- CURRENT_PIX2PIX_PIPE = None
50
- CURRENT_PIX2PIX_MODEL = None
51
- CURRENT_FLUX_PIPE = None # New: FLUX model pipeline
52
- CURRENT_FLUX_MODEL = None # New: Current FLUX model name
53
 
54
  def get_pipeline(model_name: str, anime_model: bool = False):
55
  """Get or create a ControlNet pipeline for the given model and anime flag"""
@@ -309,267 +304,6 @@ def load_t2i_model(model_name: str):
309
  CURRENT_T2I_MODEL = None
310
  raise
311
 
312
- def load_pix2pix_model():
313
- """Load Instruct-Pix2Pix model for image editing"""
314
- global CURRENT_PIX2PIX_PIPE, CURRENT_PIX2PIX_MODEL
315
-
316
- if CURRENT_PIX2PIX_PIPE is not None:
317
- return CURRENT_PIX2PIX_PIPE
318
-
319
- try:
320
- print("Loading Instruct-Pix2Pix model...")
321
- CURRENT_PIX2PIX_PIPE = StableDiffusionInstructPix2PixPipeline.from_pretrained(
322
- "timbrooks/instruct-pix2pix",
323
- torch_dtype=dtype,
324
- safety_checker=None,
325
- requires_safety_checker=False,
326
- use_safetensors=True,
327
- variant="fp16" if dtype == torch.float16 else None
328
- ).to(device)
329
-
330
- # Optimizations
331
- CURRENT_PIX2PIX_PIPE.enable_attention_slicing(slice_size="max")
332
-
333
- # Use new API for VAE slicing
334
- if hasattr(CURRENT_PIX2PIX_PIPE, 'vae') and hasattr(CURRENT_PIX2PIX_PIPE.vae, 'enable_slicing'):
335
- CURRENT_PIX2PIX_PIPE.vae.enable_slicing()
336
- else:
337
- try:
338
- CURRENT_PIX2PIX_PIPE.enable_vae_slicing()
339
- except:
340
- pass
341
-
342
- if device.type == "cuda":
343
- try:
344
- CURRENT_PIX2PIX_PIPE.enable_xformers_memory_efficient_attention()
345
- print("✅ xFormers enabled for Pix2Pix")
346
- except:
347
- pass
348
- CURRENT_PIX2PIX_PIPE.enable_model_cpu_offload()
349
-
350
- # Compile if available
351
- if hasattr(torch, 'compile') and device.type == "cuda":
352
- try:
353
- CURRENT_PIX2PIX_PIPE.unet = torch.compile(CURRENT_PIX2PIX_PIPE.unet, mode="reduce-overhead", fullgraph=True)
354
- print("✅ Pix2Pix model compiled")
355
- except:
356
- pass
357
-
358
- CURRENT_PIX2PIX_MODEL = "timbrooks/instruct-pix2pix"
359
- return CURRENT_PIX2PIX_PIPE
360
-
361
- except Exception as e:
362
- print(f"Error loading Instruct-Pix2Pix model: {e}")
363
- print(f"⚠️ Trying to load without use_safetensors...")
364
-
365
- # Retry without use_safetensors
366
- try:
367
- CURRENT_PIX2PIX_PIPE = StableDiffusionInstructPix2PixPipeline.from_pretrained(
368
- "timbrooks/instruct-pix2pix",
369
- torch_dtype=dtype,
370
- safety_checker=None,
371
- requires_safety_checker=False
372
- ).to(device)
373
-
374
- CURRENT_PIX2PIX_PIPE.enable_attention_slicing(slice_size="max")
375
- if hasattr(CURRENT_PIX2PIX_PIPE, 'vae') and hasattr(CURRENT_PIX2PIX_PIPE.vae, 'enable_slicing'):
376
- CURRENT_PIX2PIX_PIPE.vae.enable_slicing()
377
- else:
378
- try:
379
- CURRENT_PIX2PIX_PIPE.enable_vae_slicing()
380
- except:
381
- pass
382
-
383
- if device.type == "cuda":
384
- try:
385
- CURRENT_PIX2PIX_PIPE.enable_xformers_memory_efficient_attention()
386
- print("✅ xFormers enabled for Pix2Pix")
387
- except:
388
- pass
389
- CURRENT_PIX2PIX_PIPE.enable_model_cpu_offload()
390
-
391
- if hasattr(torch, 'compile') and device.type == "cuda":
392
- try:
393
- CURRENT_PIX2PIX_PIPE.unet = torch.compile(CURRENT_PIX2PIX_PIPE.unet, mode="reduce-overhead", fullgraph=True)
394
- print("✅ Pix2Pix model compiled")
395
- except:
396
- pass
397
-
398
- CURRENT_PIX2PIX_MODEL = "timbrooks/instruct-pix2pix"
399
- return CURRENT_PIX2PIX_PIPE
400
-
401
- except Exception as retry_e:
402
- print(f"❌ Error loading Instruct-Pix2Pix model (retry): {retry_e}")
403
- CURRENT_PIX2PIX_PIPE = None
404
- CURRENT_PIX2PIX_MODEL = None
405
- raise
406
-
407
- def load_flux_model():
408
- """Load FLUX.1-Kontext model for image-to-image"""
409
- global CURRENT_FLUX_PIPE, CURRENT_FLUX_MODEL
410
-
411
- model_name = "kpsss34/FLUX.1-Kontext-dev-int4"
412
-
413
- if CURRENT_FLUX_MODEL == model_name and CURRENT_FLUX_PIPE is not None:
414
- return CURRENT_FLUX_PIPE
415
-
416
- try:
417
- if CURRENT_FLUX_PIPE is not None:
418
- print(f"🗑️ Unloading old FLUX model: {CURRENT_FLUX_MODEL}")
419
- del CURRENT_FLUX_PIPE
420
- CURRENT_FLUX_PIPE = None
421
- gc.collect()
422
- if torch.cuda.is_available():
423
- torch.cuda.empty_cache()
424
-
425
- print(f"📥 Loading FLUX model: {model_name}")
426
-
427
- # ใช้ FluxPipeline หรือ FluxImg2ImgPipeline ขึ้นอยู่กับว่ามีหรือไม่
428
- # FLUX 1.0 ใช้ architecture ใหม่ที่แตกต่างจาก Stable Diffusion
429
-
430
- try:
431
- # ลองใช้ FluxImg2ImgPipeline ก่อน (ถ้ามี)
432
- CURRENT_FLUX_PIPE = FluxImg2ImgPipeline.from_pretrained(
433
- model_name,
434
- torch_dtype=dtype,
435
- safety_checker=None,
436
- requires_safety_checker=False,
437
- use_safetensors=True,
438
- variant="fp16" if dtype == torch.float16 else None
439
- ).to(device)
440
- print("✅ Using FluxImg2ImgPipeline")
441
- except:
442
- # ถ้าไม่มี FluxImg2ImgPipeline ให้ลองใช้ FluxPipeline
443
- try:
444
- CURRENT_FLUX_PIPE = FluxPipeline.from_pretrained(
445
- model_name,
446
- torch_dtype=dtype,
447
- safety_checker=None,
448
- requires_safety_checker=False,
449
- use_safetensors=True,
450
- variant="fp16" if dtype == torch.float16 else None
451
- ).to(device)
452
- print("✅ Using FluxPipeline")
453
- except Exception as flux_err:
454
- # ถ้าไม่มี FluxPipeline เลย ให้ใช้ AutoPipelineForImage2Image
455
- print(f"⚠️ FluxPipeline not available, trying AutoPipelineForImage2Image: {flux_err}")
456
- CURRENT_FLUX_PIPE = AutoPipelineForImage2Image.from_pretrained(
457
- model_name,
458
- torch_dtype=dtype,
459
- safety_checker=None,
460
- requires_safety_checker=False,
461
- use_safetensors=True,
462
- variant="fp16" if dtype == torch.float16 else None
463
- ).to(device)
464
- print("✅ Using AutoPipelineForImage2Image")
465
-
466
- # Optimizations สำหรับ FLUX
467
- if hasattr(CURRENT_FLUX_PIPE, 'enable_attention_slicing'):
468
- CURRENT_FLUX_PIPE.enable_attention_slicing(slice_size="max")
469
-
470
- # สำหรับ FLUX อาจไม่มี VAE จึงต้องตรวจสอบก่อน
471
- if hasattr(CURRENT_FLUX_PIPE, 'vae') and hasattr(CURRENT_FLUX_PIPE.vae, 'enable_slicing'):
472
- CURRENT_FLUX_PIPE.vae.enable_slicing()
473
- elif hasattr(CURRENT_FLUX_PIPE, 'enable_vae_slicing'):
474
- try:
475
- CURRENT_FLUX_PIPE.enable_vae_slicing()
476
- except:
477
- pass
478
-
479
- if device.type == "cuda":
480
- if hasattr(CURRENT_FLUX_PIPE, 'enable_xformers_memory_efficient_attention'):
481
- try:
482
- CURRENT_FLUX_PIPE.enable_xformers_memory_efficient_attention()
483
- print("✅ xFormers enabled for FLUX")
484
- except:
485
- pass
486
-
487
- if hasattr(CURRENT_FLUX_PIPE, 'enable_model_cpu_offload'):
488
- CURRENT_FLUX_PIPE.enable_model_cpu_offload()
489
-
490
- # Compile if available
491
- if hasattr(torch, 'compile') and device.type == "cuda" and hasattr(CURRENT_FLUX_PIPE, 'transformer'):
492
- try:
493
- CURRENT_FLUX_PIPE.transformer = torch.compile(CURRENT_FLUX_PIPE.transformer, mode="reduce-overhead", fullgraph=True)
494
- print("✅ FLUX transformer compiled")
495
- except:
496
- pass
497
-
498
- CURRENT_FLUX_MODEL = model_name
499
- return CURRENT_FLUX_PIPE
500
-
501
- except Exception as e:
502
- print(f"Error loading FLUX model {model_name}: {e}")
503
- print(f"⚠️ Trying to load without use_safetensors...")
504
-
505
- # Retry without use_safetensors
506
- try:
507
- try:
508
- CURRENT_FLUX_PIPE = FluxImg2ImgPipeline.from_pretrained(
509
- model_name,
510
- torch_dtype=dtype,
511
- safety_checker=None,
512
- requires_safety_checker=False
513
- ).to(device)
514
- print("✅ Using FluxImg2ImgPipeline (without safetensors)")
515
- except:
516
- try:
517
- CURRENT_FLUX_PIPE = FluxPipeline.from_pretrained(
518
- model_name,
519
- torch_dtype=dtype,
520
- safety_checker=None,
521
- requires_safety_checker=False
522
- ).to(device)
523
- print("✅ Using FluxPipeline (without safetensors)")
524
- except Exception as flux_err:
525
- print(f"⚠️ FluxPipeline not available, trying AutoPipelineForImage2Image: {flux_err}")
526
- CURRENT_FLUX_PIPE = AutoPipelineForImage2Image.from_pretrained(
527
- model_name,
528
- torch_dtype=dtype,
529
- safety_checker=None,
530
- requires_safety_checker=False
531
- ).to(device)
532
- print("✅ Using AutoPipelineForImage2Image (without safetensors)")
533
-
534
- # Optimizations
535
- if hasattr(CURRENT_FLUX_PIPE, 'enable_attention_slicing'):
536
- CURRENT_FLUX_PIPE.enable_attention_slicing(slice_size="max")
537
-
538
- if hasattr(CURRENT_FLUX_PIPE, 'vae') and hasattr(CURRENT_FLUX_PIPE.vae, 'enable_slicing'):
539
- CURRENT_FLUX_PIPE.vae.enable_slicing()
540
- elif hasattr(CURRENT_FLUX_PIPE, 'enable_vae_slicing'):
541
- try:
542
- CURRENT_FLUX_PIPE.enable_vae_slicing()
543
- except:
544
- pass
545
-
546
- if device.type == "cuda":
547
- if hasattr(CURRENT_FLUX_PIPE, 'enable_xformers_memory_efficient_attention'):
548
- try:
549
- CURRENT_FLUX_PIPE.enable_xformers_memory_efficient_attention()
550
- print("✅ xFormers enabled for FLUX")
551
- except:
552
- pass
553
-
554
- if hasattr(CURRENT_FLUX_PIPE, 'enable_model_cpu_offload'):
555
- CURRENT_FLUX_PIPE.enable_model_cpu_offload()
556
-
557
- if hasattr(torch, 'compile') and device.type == "cuda" and hasattr(CURRENT_FLUX_PIPE, 'transformer'):
558
- try:
559
- CURRENT_FLUX_PIPE.transformer = torch.compile(CURRENT_FLUX_PIPE.transformer, mode="reduce-overhead", fullgraph=True)
560
- print("✅ FLUX transformer compiled")
561
- except:
562
- pass
563
-
564
- CURRENT_FLUX_MODEL = model_name
565
- return CURRENT_FLUX_PIPE
566
-
567
- except Exception as retry_e:
568
- print(f"❌ Error loading FLUX model (retry): {retry_e}")
569
- CURRENT_FLUX_PIPE = None
570
- CURRENT_FLUX_MODEL = None
571
- raise
572
-
573
  # ===== Utils =====
574
  def is_lineart(img: Image.Image) -> bool:
575
  arr = np.array(img.convert("L"))
@@ -657,92 +391,11 @@ def t2i(prompt, model, seed, steps, scale, w, h):
657
  error_img = Image.new('RGB', (int(w), int(h)), color='red')
658
  return error_img
659
 
660
- def pix2pix_edit(image, instruction, seed, steps, scale, image_scale):
661
- """Edit image using Instruct-Pix2Pix"""
662
- try:
663
- pipe = load_pix2pix_model()
664
- print(f"🔄 Using Pix2Pix model: {CURRENT_PIX2PIX_MODEL}")
665
-
666
- image = resize_image(image, max_size=768)
667
- gen = torch.Generator(device=device).manual_seed(int(seed))
668
-
669
- with torch.inference_mode():
670
- result = pipe(
671
- instruction,
672
- image=image,
673
- num_inference_steps=int(steps),
674
- guidance_scale=float(scale),
675
- image_guidance_scale=float(image_scale),
676
- generator=gen
677
- ).images[0]
678
-
679
- if device.type == "cuda":
680
- torch.cuda.empty_cache()
681
-
682
- return result
683
-
684
- except Exception as e:
685
- print(f"❌ Error in pix2pix_edit: {e}")
686
- if image:
687
- error_img = Image.new('RGB', image.size, color='red')
688
- else:
689
- error_img = Image.new('RGB', (512, 512), color='red')
690
- return error_img
691
-
692
- def flux_img2img(image, prompt, seed, steps, scale, strength):
693
- """Image-to-image generation using FLUX.1-Kontext"""
694
- try:
695
- pipe = load_flux_model()
696
- print(f"🌀 Using FLUX model: {CURRENT_FLUX_MODEL}")
697
-
698
- # Resize image to optimal size for FLUX (FLUX ทำงานที่ดีที่สุดที่ 1024x1024)
699
- image = resize_image(image, max_size=1024)
700
-
701
- gen = torch.Generator(device=device).manual_seed(int(seed))
702
-
703
- with torch.inference_mode():
704
- # สำหรับ FLUX Pipeline เราต้องตรวจสอบประเภทของ pipeline
705
- if isinstance(pipe, FluxImg2ImgPipeline) or hasattr(pipe, '__class__') and 'Img2Img' in pipe.__class__.__name__:
706
- # สำหรับ image-to-image pipeline
707
- result = pipe(
708
- prompt=prompt,
709
- image=image,
710
- strength=float(strength),
711
- num_inference_steps=int(steps),
712
- guidance_scale=float(scale),
713
- generator=gen
714
- ).images[0]
715
- else:
716
- # สำหรับ text-to-image pipeline (ใช้เป็น img2img ด้วย image prompt)
717
- # FLUX สามารถใช้ image เป็น conditioning ได้
718
- result = pipe(
719
- prompt=prompt,
720
- image=image,
721
- num_inference_steps=int(steps),
722
- guidance_scale=float(scale),
723
- generator=gen
724
- ).images[0]
725
-
726
- if device.type == "cuda":
727
- torch.cuda.empty_cache()
728
-
729
- return result
730
-
731
- except Exception as e:
732
- print(f"❌ Error in flux_img2img: {e}")
733
- if image:
734
- error_img = Image.new('RGB', image.size, color='red')
735
- else:
736
- error_img = Image.new('RGB', (1024, 1024), color='red')
737
- return error_img
738
-
739
  # ===== Function to unload all models =====
740
  def unload_all_models():
741
  global CURRENT_CONTROLNET_PIPE, CURRENT_CONTROLNET_KEY
742
  global LINEART_DETECTOR, LINEART_ANIME_DETECTOR
743
  global CURRENT_T2I_PIPE, CURRENT_T2I_MODEL
744
- global CURRENT_PIX2PIX_PIPE, CURRENT_PIX2PIX_MODEL
745
- global CURRENT_FLUX_PIPE, CURRENT_FLUX_MODEL
746
 
747
  print("Unloading all models from memory...")
748
 
@@ -779,24 +432,6 @@ def unload_all_models():
779
  pass
780
  CURRENT_T2I_MODEL = None
781
 
782
- # Unload Pix2Pix model
783
- try:
784
- if CURRENT_PIX2PIX_PIPE is not None:
785
- del CURRENT_PIX2PIX_PIPE
786
- CURRENT_PIX2PIX_PIPE = None
787
- except:
788
- pass
789
- CURRENT_PIX2PIX_MODEL = None
790
-
791
- # Unload FLUX model
792
- try:
793
- if CURRENT_FLUX_PIPE is not None:
794
- del CURRENT_FLUX_PIPE
795
- CURRENT_FLUX_PIPE = None
796
- except:
797
- pass
798
- CURRENT_FLUX_MODEL = None
799
-
800
  # Force garbage collection
801
  gc.collect()
802
  if torch.cuda.is_available():
@@ -810,7 +445,7 @@ def unload_all_models():
810
  # ===== Gradio UI =====
811
  with gr.Blocks(title="🎨 Advanced Image Generation Suite", theme=gr.themes.Soft()) as demo:
812
  gr.Markdown("# 🎨 Advanced Image Generation & Editing Suite")
813
- gr.Markdown("### Powered by Stable Diffusion, ControlNet, Instruct-Pix2Pix & FLUX")
814
 
815
  # Add system info
816
  if torch.cuda.is_available():
@@ -845,7 +480,9 @@ with gr.Blocks(title="🎨 Advanced Image Generation Suite", theme=gr.themes.Sof
845
  "digiplay/ChikMix_V3",
846
  "digiplay/chilloutmix_NiPrunedFp16Fix",
847
  "gsdf/Counterfeit-V2.5",
848
- "stablediffusionapi/anything-v5"
 
 
849
  ],
850
  value="digiplay/ChikMix_V3",
851
  label="Base Model"
@@ -892,7 +529,9 @@ with gr.Blocks(title="🎨 Advanced Image Generation Suite", theme=gr.themes.Sof
892
  "digiplay/ChikMix_V3",
893
  "digiplay/chilloutmix_NiPrunedFp16Fix",
894
  "gsdf/Counterfeit-V2.5",
895
- "stablediffusionapi/anything-v5"
 
 
896
  ],
897
  value="digiplay/ChikMix_V3",
898
  label="Model"
@@ -913,115 +552,6 @@ with gr.Blocks(title="🎨 Advanced Image Generation Suite", theme=gr.themes.Sof
913
  [t2i_prompt, t2i_model, t2i_seed, t2i_steps, t2i_scale, w, h],
914
  t2i_out
915
  )
916
-
917
- with gr.Tab("🔄 Instruct-Pix2Pix"):
918
- gr.Markdown("""
919
- ### Edit Images with Text Instructions
920
- Upload an image and describe how you want to change it.
921
- Examples: 'make it winter', 'turn day into night', 'add sunglasses', 'make it look like a painting'
922
- """)
923
-
924
- with gr.Row():
925
- with gr.Column():
926
- pix2pix_input = gr.Image(label="Input Image", type="pil")
927
- pix2pix_instruction = gr.Textbox(
928
- label="Edit Instruction",
929
- placeholder="e.g., make it winter, turn day into night, add sunglasses...",
930
- lines=2
931
- )
932
-
933
- with gr.Row():
934
- pix2pix_seed = gr.Number(value=42, label="Seed")
935
- pix2pix_steps = gr.Slider(10, 100, 50, step=5, label="Steps")
936
-
937
- with gr.Row():
938
- pix2pix_scale = gr.Slider(1, 20, 7.5, step=0.5, label="Text Guidance Scale")
939
- pix2pix_image_scale = gr.Slider(1, 5, 1.5, step=0.1, label="Image Guidance Scale")
940
-
941
- pix2pix_btn = gr.Button("🔄 Edit Image", variant="primary")
942
-
943
- with gr.Column():
944
- pix2pix_output = gr.Image(label="Edited Image", type="pil")
945
-
946
- with gr.Row():
947
- gr.Examples(
948
- examples=[
949
- ["make it winter", 42, 50, 7.5, 1.5],
950
- ["turn day into night", 42, 50, 7.5, 1.5],
951
- ["make it look like a painting", 42, 50, 7.5, 1.5],
952
- ["add sunglasses", 42, 50, 7.5, 1.5],
953
- ["make it cyberpunk style", 42, 50, 7.5, 1.5],
954
- ["change hair color to blue", 42, 50, 7.5, 1.5],
955
- ],
956
- inputs=[pix2pix_instruction, pix2pix_seed, pix2pix_steps, pix2pix_scale, pix2pix_image_scale],
957
- label="Quick Examples"
958
- )
959
-
960
- pix2pix_btn.click(
961
- pix2pix_edit,
962
- [pix2pix_input, pix2pix_instruction, pix2pix_seed, pix2pix_steps, pix2pix_scale, pix2pix_image_scale],
963
- pix2pix_output
964
- )
965
-
966
- with gr.Tab("🌀 FLUX Image-to-Image"):
967
- gr.Markdown("""
968
- ### Image-to-Image with FLUX.1-Kontext
969
- **Model:** `kpsss34/FLUX.1-Kontext-dev-int4`
970
-
971
- Transform your images using FLUX, a powerful image-to-image model.
972
- Upload an image and provide a prompt to guide the transformation.
973
-
974
- **Note:** FLUX 1.0 uses a different architecture than Stable Diffusion and may require more memory.
975
- The int4 quantized version is used to reduce memory usage.
976
-
977
- **Tips:**
978
- - Use **strength** to control how much the input image is preserved (lower = more original, higher = more creative)
979
- - FLUX works best with high-resolution images (1024x1024 recommended)
980
- - The model is quantized (int4) for better performance and lower memory usage
981
- """)
982
-
983
- with gr.Row():
984
- with gr.Column():
985
- flux_input = gr.Image(label="Input Image", type="pil")
986
- flux_prompt = gr.Textbox(
987
- label="Prompt",
988
- placeholder="e.g., a beautiful anime character, cyberpunk style, cinematic lighting...",
989
- lines=3
990
- )
991
-
992
- with gr.Row():
993
- flux_seed = gr.Number(value=42, label="Seed")
994
- flux_steps = gr.Slider(10, 100, 50, step=5, label="Steps")
995
-
996
- with gr.Row():
997
- flux_scale = gr.Slider(1, 20, 7.5, step=0.5, label="CFG Scale")
998
- flux_strength = gr.Slider(0.1, 1.0, 0.75, step=0.05,
999
- label="Strength (higher = more creative, lower = more original)")
1000
-
1001
- flux_btn = gr.Button("🌀 Transform with FLUX", variant="primary")
1002
-
1003
- with gr.Column():
1004
- flux_output = gr.Image(label="Transformed Image", type="pil")
1005
-
1006
- with gr.Row():
1007
- gr.Examples(
1008
- examples=[
1009
- ["turn into anime style", 42, 50, 7.5, 0.75],
1010
- ["make it look like a painting", 42, 50, 7.5, 0.8],
1011
- ["cyberpunk transformation", 42, 50, 7.5, 0.7],
1012
- ["fantasy style with magic effects", 42, 50, 7.5, 0.85],
1013
- ["realistic photo style", 42, 50, 7.5, 0.6],
1014
- ["studio Ghibli art style", 42, 50, 7.5, 0.9],
1015
- ],
1016
- inputs=[flux_prompt, flux_seed, flux_steps, flux_scale, flux_strength],
1017
- label="Quick Examples"
1018
- )
1019
-
1020
- flux_btn.click(
1021
- flux_img2img,
1022
- [flux_input, flux_prompt, flux_seed, flux_steps, flux_scale, flux_strength],
1023
- flux_output
1024
- )
1025
 
1026
  try:
1027
  demo.launch(
 
26
 
27
  # Lazy import (to avoid long startup if unused)
28
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, StableDiffusionPipeline
 
 
29
  from controlnet_aux import LineartDetector, LineartAnimeDetector
30
 
31
  # Memory optimization
 
38
  print("⚠️ Running on CPU - Image generation will be significantly slower")
39
 
40
  # ===== Model & Config =====
41
+ # เปลี่ยนจาก dict เป็นตัวแปรเดี่ยวเพื่อจัดการหน่วยความจำได้ง่ายขึ้น
42
  CURRENT_CONTROLNET_PIPE = None
43
  CURRENT_CONTROLNET_KEY = None # (model_name, is_anime)
44
  LINEART_DETECTOR = None
45
  LINEART_ANIME_DETECTOR = None
46
  CURRENT_T2I_PIPE = None
47
  CURRENT_T2I_MODEL = None
 
 
 
 
48
 
49
  def get_pipeline(model_name: str, anime_model: bool = False):
50
  """Get or create a ControlNet pipeline for the given model and anime flag"""
 
304
  CURRENT_T2I_MODEL = None
305
  raise
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  # ===== Utils =====
308
  def is_lineart(img: Image.Image) -> bool:
309
  arr = np.array(img.convert("L"))
 
391
  error_img = Image.new('RGB', (int(w), int(h)), color='red')
392
  return error_img
393
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
  # ===== Function to unload all models =====
395
  def unload_all_models():
396
  global CURRENT_CONTROLNET_PIPE, CURRENT_CONTROLNET_KEY
397
  global LINEART_DETECTOR, LINEART_ANIME_DETECTOR
398
  global CURRENT_T2I_PIPE, CURRENT_T2I_MODEL
 
 
399
 
400
  print("Unloading all models from memory...")
401
 
 
432
  pass
433
  CURRENT_T2I_MODEL = None
434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
  # Force garbage collection
436
  gc.collect()
437
  if torch.cuda.is_available():
 
445
  # ===== Gradio UI =====
446
  with gr.Blocks(title="🎨 Advanced Image Generation Suite", theme=gr.themes.Soft()) as demo:
447
  gr.Markdown("# 🎨 Advanced Image Generation & Editing Suite")
448
+ gr.Markdown("### Powered by Stable Diffusion & ControlNet")
449
 
450
  # Add system info
451
  if torch.cuda.is_available():
 
480
  "digiplay/ChikMix_V3",
481
  "digiplay/chilloutmix_NiPrunedFp16Fix",
482
  "gsdf/Counterfeit-V2.5",
483
+ "stablediffusionapi/anything-v5",
484
+ "digiplay/CleanLinearMix_nsfw",
485
+ "Laxhar/noobai-XL-1.1" # เพิ่มโมเดลใหม่
486
  ],
487
  value="digiplay/ChikMix_V3",
488
  label="Base Model"
 
529
  "digiplay/ChikMix_V3",
530
  "digiplay/chilloutmix_NiPrunedFp16Fix",
531
  "gsdf/Counterfeit-V2.5",
532
+ "stablediffusionapi/anything-v5",
533
+ "digiplay/CleanLinearMix_nsfw",
534
+ "Laxhar/noobai-XL-1.1" # เพิ่มโมเดลใหม่
535
  ],
536
  value="digiplay/ChikMix_V3",
537
  label="Model"
 
552
  [t2i_prompt, t2i_model, t2i_seed, t2i_steps, t2i_scale, w, h],
553
  t2i_out
554
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555
 
556
  try:
557
  demo.launch(