AaronCIH commited on
Commit
56fe60e
·
verified ·
1 Parent(s): 8dda48f

Upload Models/RAR_infer.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. Models/RAR_infer.py +240 -221
Models/RAR_infer.py CHANGED
@@ -12,7 +12,7 @@
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  """
15
- CUDA_VISIBLE_DEVICES=0 streamlit run RAR_infer.py
16
 
17
  CUDA_VISIBLE_DEVICES=0 streamlit run RAR_infer.py \
18
  --config=./configs/infer_cfg.yaml \
@@ -81,6 +81,8 @@ from diffusion.model.sd35 import load_scheduler, load_vae, load_mmdit, load_text
81
  import torchvision.transforms.functional as F
82
  from torchvision.transforms import InterpolationMode
83
  from diffusion.model.utils import set_fp32_attention, set_grad_checkpoint
 
 
84
 
85
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
86
 
@@ -278,242 +280,256 @@ def get_args():
278
  parser.add_argument("--config", type=str, help="config")
279
  return parser.parse_known_args()[0]
280
 
281
- #### Main
282
- # [0]: Config
283
- ## ======================================================================
284
- # args = get_args()
285
- config = args = pyrallis.parse(config_class=SanaInference, config_path="./configs/infer_cfg.yaml")
286
- args.image_size = config.model.image_size
287
- if args.resolution:
288
- args.image_size = args.resolution
289
- set_env(args.seed)
290
-
291
- if args.weight_type == "bf16":
292
- weight_type = torch.bfloat16
293
- elif args.weight_type == "fp16":
294
- weight_type = torch.float16
295
- elif args.weight_type == "fp32":
296
- weight_type = torch.float32
297
- else:
298
- raise KeyError(f"Unsupported Weight Type: {args.weight_type}")
299
-
300
- # only support fixed latent size currently
301
- latent_size = args.image_size // config.vae.vae_downsample_rate
302
- max_sequence_length = config.text_encoder.model_max_length
303
- flow_shift = config.scheduler.flow_shift
304
- pag_applied_layers = config.model.pag_applied_layers
305
- guidance_type = "classifier-free_PAG"
306
- assert (
307
- isinstance(args.interval_guidance, list)
308
- and len(args.interval_guidance) == 2
309
- and args.interval_guidance[0] <= args.interval_guidance[1]
310
- )
311
- args.interval_guidance = [max(0, args.interval_guidance[0]), min(1, args.interval_guidance[1])]
312
-
313
- # tags
314
- match = re.search(r".*epoch_(\d+).*step_(\d+).*", args.model_path)
315
- epoch_name, step_name = match.groups() if match else ("unknown", "unknown")
316
- guidance_type = guidance_type_select(guidance_type, args.pag_scale, config.model.attn_type)
317
-
318
- # Sampler Config
319
- args.sampling_algo = (
320
- args.sampling_algo
321
- if ("flow" not in args.model_path or args.sampling_algo == "flow_dpm-solver")
322
- else "flow_euler"
323
- )
324
- assert args.sampling_algo in ["flow_dpm-solver", "flow_euler"], f"Only support flow_dpm-solver and flow_euler now, but received {args.sampling_algo}."
325
- sample_steps_dict = {"flow_dpm-solver": 20, "flow_euler": 28} # {"dpm-solver": 20, "sa-solver": 25, "flow_dpm-solver": 20, "flow_euler": 28}
326
- sample_steps = args.step if args.step != -1 else sample_steps_dict[args.sampling_algo]
327
-
328
- # output setting
329
- work_dir = args.work_dir
330
- # work_dir = os.path.join(work_dir, f"ep{epoch_name}_it{step_name}_r{args.image_size}_s{args.step}_n{args.num_rounds}_{args.flow_type}") # ep100_r256_s4_n4_p2p
331
- config.work_dir = work_dir
332
- os.umask(0o000)
333
- save_root = work_dir # $work_dirs/online/ep100_it32500_s4_n4_p2p/SOTS
334
- os.makedirs(work_dir, exist_ok=True)
335
- save_detail = args.detail
336
-
337
- # logger
338
- num_gpus = torch.cuda.device_count()
339
- logger = setup_logger('SD35M', save_root, 0)
340
- logger.info("##############################################################")
341
- logger.info('Using {} GPUS'.format(num_gpus))
342
- logger.info('Running with config:\n{}'.format(config))
343
- logger.info('Running with args:\n{}'.format(args))
344
- logger.info(f"Sampler {args.sampling_algo}")
345
- logger.info(colored(f"Save Results: {save_root}", "blue"))
346
- logger.info("##############################################################")
347
- # [1]: model define
348
- ## ======================================================================
349
- weight_dtype = weight_type # get_weight_dtype(config.model.mixed_precision)
350
- device = "cuda" if torch.cuda.is_available() else "cpu"
351
- ## [1-1]: Loading VAE ...
352
- vae = None
353
- vae_dtype = get_weight_dtype(config.vae.weight_dtype)
354
- if not config.data.load_vae_feat:
355
- if config.vae.vae_type == "SDVAE":
356
- vae = load_vae(config.vae.vae_pretrained, device)
357
- vae = vae.to(vae_dtype).eval()
358
  else:
359
- raise KeyError(f"Only support VAE: 'SDVAE', but received {config.vae.vae_type}.")
360
- vae.to(vae_dtype)
361
- logger.info("##############################################################")
362
- logger.info(f"VAE type: {config.vae.vae_type}, path: {config.vae.vae_pretrained}, weight_dtype: {vae_dtype}")
363
- logger.info(f"VAE Params: {sum(p.numel() for p in vae.parameters())/1e6} M, dtype: {next(vae.parameters()).dtype}")
364
- logger.info("##############################################################")
365
-
366
- # [1-2]: Loading Tokenizer ...
367
- text_encoder = None
368
- logger.info("##############################################################")
369
- logger.info(f"text_encoder type: {config.text_encoder.text_encoder_name}, path: {config.text_encoder.text_encoder_pretrained}")
370
- if config.text_encoder.text_encoder_name == "sd35-text":
371
- text_encoder = load_text_encoder(config.text_encoder.text_encoder_pretrained, device)
372
- logger.info("##############################################################")
373
- os.environ["AUTOCAST_LINEAR_ATTN"] = "true" if config.model.autocast_linear_attn else "false"
374
- ## [1-3]: Loading IQA model ...
375
- logger.info("##############################################################")
376
- if not args.assessment_model:
377
- args.assessment_model = "SDQA"
378
- logger.info(f"IQA type: {args.assessment_model}, config: {args.assessment_config}")
379
- if args.assessment_model == "SDQA":
380
- from iqa import DepictQA, load_pretrained_weights
381
- assert os.path.isfile(args.assessment_config)
382
- ## loading cfg
383
- with open(args.assessment_config, "r") as f:
384
- iqa_cfg = EasyDict(yaml.safe_load(f))
385
- ## Model
386
- assessment = DepictQA(iqa_cfg, training=False)
387
- assessment = load_pretrained_weights(iqa_cfg, assessment, logger=None)
388
- assessment.eval().to(weight_dtype).to(device)
389
- logger.propagate = False
390
- logger.info(f"IQA Params: {sum(p.numel() for p in assessment.parameters())/1e6} M, dtype: {next(assessment.parameters()).dtype}")
391
- logger.info("##############################################################")
392
- ## [1-4]: Loading Connector model ...
393
- connector_dtype = torch.float32
394
- logger.info("##############################################################")
395
- logger.info(f"Connector type: {config.connector.model}, path: {config.connector.model_pretrained}, weight_dtype: {connector_dtype}")
396
- if config.connector.model == "QFormer":
397
- from diffusion.model.qa_connector import QFormer
398
- connector = QFormer(
399
- hidden_dim = config.connector.hidden_dim,
400
- layers = config.connector.layers,
401
- heads = config.connector.heads
402
  )
403
- else:
404
- raise KeyError("Unknown Connector Type: only support [QFormer], but recieve {config.connector.model}.")
405
- # if config.connector.load_from:
406
- # logger.info(f"Loading Pre-trained Weight for Connector: {config.connector.load_from}")
407
- # state_dict = torch.load(config.connector.load_from, map_location='cpu')
408
- # missing, unexpected = connector.load_state_dict(state_dict["state_dict"], strict=False)
409
- # logger.warning(f"Missing keys: {missing}")
410
- # logger.warning(f"Unexpected keys: {unexpected}")
411
- connector = connector.eval().to(weight_dtype).to(device)
412
- logger.info(f"Connector Params: {sum(p.numel() for p in connector.parameters())/1e6} M, dtype: {next(connector.parameters()).dtype}")
413
- logger.info("##############################################################")
414
- # [1-5]: Loading DiT model ...
415
- if config.model.model == "SD35M_P2P":
416
- assert args.flow_type == "p2p", f"Error: Model {config.model.model} only support 'p2p' mode."
417
- from diffusion.model.sd35 import load_mmdit_p2p
418
- DiT = load_mmdit_p2p(
419
- config.model.model_pretrained,
420
- config.model.shift,
421
- False,
422
- device,
423
- config.model.image_size,
424
- config.model.input_channel,
425
- ).eval().to(device)
426
- elif config.model.model == "SD35M_D2C":
427
- assert args.flow_type == "d2c", f"Error: Model {config.model.model} only support 'd2c' mode."
428
- from diffusion.model.sd35 import load_mmdit
429
- DiT = load_mmdit(
430
- config.model.model_pretrained,
431
- config.model.shift,
432
- False,
433
- device,
434
- config.model.image_size,
435
- config.model.input_channel,
436
- ).eval().to(device)
437
- else:
438
- raise KeyError(f"Only support Model: 'SD35M_P2P' or 'SD35M_D2C', but received {config.model.model}.")
439
- ## Load model
440
- state_dict = torch.load(config.model.load_from)
441
- if config.model.load_from.endswith(".bin"):
442
- logger.info("Loading fsdp bin checkpoint....")
443
- old_state_dict = state_dict
444
- state_dict = dict()
445
- state_dict["state_dict"] = old_state_dict
446
- if "pos_embed" in state_dict["state_dict"]:
447
- del state_dict["state_dict"]["pos_embed"]
448
- missing, unexpected = DiT.load_state_dict(state_dict["state_dict"], strict=False)
449
- DiT.eval().to(weight_dtype)
450
- dit_dtype = weight_dtype
451
- logger.info("##############################################################")
452
- logger.info("# % Model Define ..... ")
453
- logger.info(f"Inference with {weight_dtype}, default guidance_type: {guidance_type}, flow_shift: {flow_shift}")
454
- logger.info(f"{DiT.__class__.__name__}:{config.model.model}, Model Parameters: {sum(p.numel() for p in DiT.parameters()):,}")
455
- logger.info("Generating sample from ckpt: %s" % config.model.load_from)
456
- logger.warning(f"Missing keys: {missing}")
457
- logger.warning(f"Unexpected keys: {unexpected}")
458
- logger.info(f"Parameter of DiT: {sum(p.numel() for p in DiT.parameters()) / 1000000} M")
459
- logger.info("##############################################################")
460
- # [1-6]: Combination Model
461
- model = IQAIR(DiT, connector, assessment, device)
462
- logger.info("##############################################################")
463
- logger.info("Summary: IQAIR")
464
- for param in model.parameters():
465
- param.requires_grad = False
466
- num_total_params = sum(p.numel() for p in model.parameters())
467
- logger.info(f"All params: {round(num_total_params/1e6, 3)}M")
468
- logger.info("##############################################################")
469
- ## Load model
470
- if os.path.isfile(args.model_path):
471
- state_dict = torch.load(args.model_path)
472
- if args.model_path.endswith(".bin"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  logger.info("Loading fsdp bin checkpoint....")
474
  old_state_dict = state_dict
475
  state_dict = dict()
476
  state_dict["state_dict"] = old_state_dict
477
  if "pos_embed" in state_dict["state_dict"]:
478
  del state_dict["state_dict"]["pos_embed"]
479
- missing, unexpected = model.load_state_dict(state_dict["state_dict"], strict=False)
480
- model.eval().to(weight_dtype)
481
  dit_dtype = weight_dtype
482
  logger.info("##############################################################")
483
  logger.info("# % Model Define ..... ")
484
  logger.info(f"Inference with {weight_dtype}, default guidance_type: {guidance_type}, flow_shift: {flow_shift}")
485
- logger.info(f"{model.__class__.__name__}:{config.model.model}, Model Parameters: {sum(p.numel() for p in model.parameters()):,}")
486
- logger.info("Generating sample from ckpt: %s" % args.model_path)
487
- missing_ckpt = []
488
- for m in missing:
489
- if "llm" in m and "lora" not in m:
490
- continue
491
- missing_ckpt.append(m)
492
- logger.warning(f"Missing keys: {missing_ckpt}")
493
  logger.warning(f"Unexpected keys: {unexpected}")
494
- logger.info(f"Parameter of Model: {sum(p.numel() for p in model.parameters()) / 1000000} M")
495
  logger.info("##############################################################")
496
- else:
 
497
  logger.info("##############################################################")
498
- logger.info("Combination Model is inference from pre-trained weight!")
 
 
 
 
499
  logger.info("##############################################################")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
 
501
  # [2]: Inference
502
- def RAR_process(img, fname):
503
  lq = image_process(img)
504
  lq = lq.to(device)
505
  bs = lq.shape[0]
 
506
  latent_size_h, latent_size_w = latent_size, latent_size
507
 
 
 
508
  save_folder = os.path.join(save_root, fname)
509
  os.makedirs(save_folder, exist_ok=True)
510
  restore_results = []
511
  flag = 0
512
  # generator
513
  generator = torch.Generator(device=device).manual_seed(args.seed)
514
- dit_step = args.step
515
  num_rounds = args.num_rounds
516
  print("="*20)
 
 
 
 
 
 
 
 
 
 
 
517
  # start sampling
518
  with torch.no_grad():
519
  ## [a1]: VAE
@@ -586,7 +602,7 @@ def RAR_process(img, fname):
586
  guidance_type=guidance_type,
587
  cfg_scale=args.cfg_scale,
588
  pag_scale=args.pag_scale,
589
- pag_applied_layers=pag_applied_layers,
590
  model_type="flow",
591
  model_kwargs=model_kwargs,
592
  schedule="FLOW",
@@ -598,7 +614,7 @@ def RAR_process(img, fname):
598
  order=2,
599
  skip_type="time_uniform_flow",
600
  method="multistep",
601
- flow_shift=flow_shift,
602
  )
603
  elif args.sampling_algo == "flow_euler":
604
  flow_solver = FlowEuler(
@@ -643,12 +659,12 @@ def RAR_process(img, fname):
643
  recon = vae.process_out(input_images.to(vae_dtype).to(device)).to(device)
644
  recon = vae.decode(recon)
645
  recon = torch.clamp((recon + 1.0) / 2.0, min=0.0, max=1.0)
646
- restore_results.append(recon)
647
  # save recon image
648
  save_recon_path = os.path.join(save_folder, "%s_step%d_%s.png"%(fname.split(".")[0], n_round, prompts[0]))
649
  save_recon = 255.0 * rearrange(recon[0], "c h w -> h w c")
650
  save_recon = Image.fromarray(save_recon.type(torch.uint8).cpu().numpy())
651
  save_recon.save(save_recon_path)
 
652
  return restore_results
653
  elif save_detail:
654
  if flag == 1:
@@ -657,23 +673,17 @@ def RAR_process(img, fname):
657
  recon = vae.process_out(samples.to(vae_dtype).to(device)).to(device)
658
  recon = vae.decode(recon)
659
  recon = torch.clamp((recon + 1.0) / 2.0, min=0.0, max=1.0)
660
- restore_results.append(recon)
661
  # save recon image
662
  save_recon_path = os.path.join(save_folder, "%s_step%d_%s.png"%(fname.split(".")[0], n_round, prompts[0]))
663
  save_recon = 255.0 * rearrange(recon[0], "c h w -> h w c")
664
  save_recon = Image.fromarray(save_recon.type(torch.uint8).cpu().numpy())
665
  save_recon.save(save_recon_path)
666
-
667
  # prepare for next round
668
  input_images = samples
669
  torch.cuda.empty_cache()
670
 
671
  # ==========================================================================================================
672
- # Interface
673
- # sample_image = r"/home/CORP/hsiang.chen/Projects/Demo/sample_images/1/input.png"
674
- import streamlit as st
675
- import time
676
-
677
  # root_dir = r"/home/CORP/hsiang.chen/Desktop/RAR_proj/Demo/"
678
  # -----------------------------
679
  # Predefined images – replace with your own paths
@@ -687,7 +697,10 @@ PREDEFINED_IMAGES = {
687
  "Sample 6": {"input":"sample_images/6/input.png", "output": ["sample_images/6/s1_noise.png", "sample_images/6/s2_LL.png"]},
688
  "Sample 7": {"input":"sample_images/7/input.png", "output": ["sample_images/7/s1_resolution.png", "sample_images/7/s2_LL.png", "sample_images/7/s3_haze.png"]},
689
  "Sample 8": {"input":"sample_images/8/input.png", "output": ["sample_images/8/s1_resolution.png", "sample_images/8/s2_none.png"]},
 
 
690
  }
 
691
 
692
  st.set_page_config(page_title="RAR Demo", layout="wide", initial_sidebar_state='expanded')
693
  st.title("AIC-C: RAR Demo")
@@ -709,13 +722,18 @@ selected_name = st.sidebar.selectbox(
709
  # Load image based on user choice
710
  input_image = None
711
  if upload_file is not None:
712
- filename = os.path.basename(upload_file)
713
  input_image = Image.open(upload_file)
714
- elif selected_name != "None":
 
715
  # input_image = Image.open(os.path.join(root_dir, PREDEFINED_IMAGES[selected_name][0]))
716
  filename = os.path.basename(PREDEFINED_IMAGES[selected_name]["input"])
717
  input_image = Image.open(PREDEFINED_IMAGES[selected_name]["input"])
718
  input_image = input_image.resize((256, 256))
 
 
 
 
719
 
720
  # =============================
721
  # Image Processing Section
@@ -730,11 +748,12 @@ def process_image(image: Image.Image, selected_name: str, filename: str):
730
  For now, we simply return the original image.
731
  """
732
  output_images = [image]
733
- if selected_name != "None":
734
  for image_path in PREDEFINED_IMAGES[selected_name]["output"]:
735
  output_images.append(Image.open(image_path))
736
  else:
737
- restored_images = RAR_process(image, filename)
 
738
  output_images += restored_images
739
  # Example placeholder: return the input image unchanged
740
  return output_images
@@ -761,7 +780,7 @@ with left_col:
761
  with right_col:
762
  st.subheader("Processed Output")
763
  if input_image:
764
- results = process_image(input_image, selected_name, ) # <-------------------------------------- Modify process_image
765
  stage_idx = st.session_state.get("stage_idx", len(results)-1) if st.session_state.get("stage_idx", len(results)-1) < len(results) else len(results)-1
766
  st.image(results[stage_idx].resize((256,256)))
767
 
 
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  """
15
+ CUDA_VISIBLE_DEVICES=0 streamlit run run.py
16
 
17
  CUDA_VISIBLE_DEVICES=0 streamlit run RAR_infer.py \
18
  --config=./configs/infer_cfg.yaml \
 
81
  import torchvision.transforms.functional as F
82
  from torchvision.transforms import InterpolationMode
83
  from diffusion.model.utils import set_fp32_attention, set_grad_checkpoint
84
+ # Interface
85
+ import streamlit as st
86
 
87
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
88
 
 
280
  parser.add_argument("--config", type=str, help="config")
281
  return parser.parse_known_args()[0]
282
 
283
+ @st.cache_resource
284
+ def load_rar_model():
285
+ # [0]: Config
286
+ ## ======================================================================
287
+ # args = get_args()
288
+ config = args = pyrallis.parse(config_class=SanaInference, config_path="./configs/infer_cfg.yaml")
289
+ args.image_size = config.model.image_size
290
+ if args.resolution:
291
+ args.image_size = args.resolution
292
+ set_env(args.seed)
293
+
294
+ if args.weight_type == "bf16":
295
+ weight_type = torch.bfloat16
296
+ elif args.weight_type == "fp16":
297
+ weight_type = torch.float16
298
+ elif args.weight_type == "fp32":
299
+ weight_type = torch.float32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
300
  else:
301
+ raise KeyError(f"Unsupported Weight Type: {args.weight_type}")
302
+
303
+ # only support fixed latent size currently
304
+ flow_shift = config.scheduler.flow_shift
305
+ pag_applied_layers = config.model.pag_applied_layers
306
+ guidance_type = "classifier-free_PAG"
307
+ assert (
308
+ isinstance(args.interval_guidance, list)
309
+ and len(args.interval_guidance) == 2
310
+ and args.interval_guidance[0] <= args.interval_guidance[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  )
312
+ args.interval_guidance = [max(0, args.interval_guidance[0]), min(1, args.interval_guidance[1])]
313
+
314
+ # tags
315
+ match = re.search(r".*epoch_(\d+).*step_(\d+).*", args.model_path)
316
+ epoch_name, step_name = match.groups() if match else ("unknown", "unknown")
317
+ guidance_type = guidance_type_select(guidance_type, args.pag_scale, config.model.attn_type)
318
+
319
+ # Sampler Config
320
+ args.sampling_algo = (
321
+ args.sampling_algo
322
+ if ("flow" not in args.model_path or args.sampling_algo == "flow_dpm-solver")
323
+ else "flow_euler"
324
+ )
325
+ assert args.sampling_algo in ["flow_dpm-solver", "flow_euler"], f"Only support flow_dpm-solver and flow_euler now, but received {args.sampling_algo}."
326
+ sample_steps_dict = {"flow_dpm-solver": 20, "flow_euler": 28} # {"dpm-solver": 20, "sa-solver": 25, "flow_dpm-solver": 20, "flow_euler": 28}
327
+ sample_steps = args.step if args.step != -1 else sample_steps_dict[args.sampling_algo]
328
+
329
+ # output setting
330
+ work_dir = args.work_dir
331
+ # work_dir = os.path.join(work_dir, f"ep{epoch_name}_it{step_name}_r{args.image_size}_s{args.step}_n{args.num_rounds}_{args.flow_type}") # ep100_r256_s4_n4_p2p
332
+ config.work_dir = work_dir
333
+ os.umask(0o000)
334
+ save_root = work_dir # $work_dirs/online/ep100_it32500_s4_n4_p2p/SOTS
335
+ os.makedirs(work_dir, exist_ok=True)
336
+ save_detail = args.detail
337
+
338
+ # logger
339
+ num_gpus = torch.cuda.device_count()
340
+ logger = setup_logger('SD35M', save_root, 0)
341
+ logger.info("##############################################################")
342
+ logger.info('Using {} GPUS'.format(num_gpus))
343
+ logger.info('Running with config:\n{}'.format(config))
344
+ logger.info('Running with args:\n{}'.format(args))
345
+ logger.info(f"Sampler {args.sampling_algo}")
346
+ logger.info(colored(f"Save Results: {save_root}", "blue"))
347
+ logger.info("##############################################################")
348
+ # [1]: model define
349
+ ## ======================================================================
350
+ weight_dtype = weight_type # get_weight_dtype(config.model.mixed_precision)
351
+ device = "cuda" if torch.cuda.is_available() else "cpu"
352
+ ## [1-1]: Loading VAE ...
353
+ vae = None
354
+ vae_dtype = get_weight_dtype(config.vae.weight_dtype)
355
+ if not config.data.load_vae_feat:
356
+ if config.vae.vae_type == "SDVAE":
357
+ vae = load_vae(config.vae.vae_pretrained, device)
358
+ vae = vae.to(vae_dtype).eval()
359
+ else:
360
+ raise KeyError(f"Only support VAE: 'SDVAE', but received {config.vae.vae_type}.")
361
+ vae.to(vae_dtype)
362
+ logger.info("##############################################################")
363
+ logger.info(f"VAE type: {config.vae.vae_type}, path: {config.vae.vae_pretrained}, weight_dtype: {vae_dtype}")
364
+ logger.info(f"VAE Params: {sum(p.numel() for p in vae.parameters())/1e6} M, dtype: {next(vae.parameters()).dtype}")
365
+ logger.info("##############################################################")
366
+
367
+ # [1-2]: Loading Tokenizer ...
368
+ text_encoder = None
369
+ logger.info("##############################################################")
370
+ logger.info(f"text_encoder type: {config.text_encoder.text_encoder_name}, path: {config.text_encoder.text_encoder_pretrained}")
371
+ if config.text_encoder.text_encoder_name == "sd35-text":
372
+ text_encoder = load_text_encoder(config.text_encoder.text_encoder_pretrained, device)
373
+ logger.info("##############################################################")
374
+ os.environ["AUTOCAST_LINEAR_ATTN"] = "true" if config.model.autocast_linear_attn else "false"
375
+ ## [1-3]: Loading IQA model ...
376
+ logger.info("##############################################################")
377
+ if not args.assessment_model:
378
+ args.assessment_model = "SDQA"
379
+ logger.info(f"IQA type: {args.assessment_model}, config: {args.assessment_config}")
380
+ if args.assessment_model == "SDQA":
381
+ from iqa import DepictQA, load_pretrained_weights
382
+ assert os.path.isfile(args.assessment_config)
383
+ ## loading cfg
384
+ with open(args.assessment_config, "r") as f:
385
+ iqa_cfg = EasyDict(yaml.safe_load(f))
386
+ ## Model
387
+ assessment = DepictQA(iqa_cfg, training=False)
388
+ assessment = load_pretrained_weights(iqa_cfg, assessment, logger=None)
389
+ assessment.eval().to(weight_dtype).to(device)
390
+ logger.propagate = False
391
+ logger.info(f"IQA Params: {sum(p.numel() for p in assessment.parameters())/1e6} M, dtype: {next(assessment.parameters()).dtype}")
392
+ logger.info("##############################################################")
393
+ ## [1-4]: Loading Connector model ...
394
+ connector_dtype = torch.float32
395
+ logger.info("##############################################################")
396
+ logger.info(f"Connector type: {config.connector.model}, path: {config.connector.model_pretrained}, weight_dtype: {connector_dtype}")
397
+ if config.connector.model == "QFormer":
398
+ from diffusion.model.qa_connector import QFormer
399
+ connector = QFormer(
400
+ hidden_dim = config.connector.hidden_dim,
401
+ layers = config.connector.layers,
402
+ heads = config.connector.heads
403
+ )
404
+ else:
405
+ raise KeyError("Unknown Connector Type: only support [QFormer], but recieve {config.connector.model}.")
406
+ # if config.connector.load_from:
407
+ # logger.info(f"Loading Pre-trained Weight for Connector: {config.connector.load_from}")
408
+ # state_dict = torch.load(config.connector.load_from, map_location='cpu')
409
+ # missing, unexpected = connector.load_state_dict(state_dict["state_dict"], strict=False)
410
+ # logger.warning(f"Missing keys: {missing}")
411
+ # logger.warning(f"Unexpected keys: {unexpected}")
412
+ connector = connector.eval().to(weight_dtype).to(device)
413
+ logger.info(f"Connector Params: {sum(p.numel() for p in connector.parameters())/1e6} M, dtype: {next(connector.parameters()).dtype}")
414
+ logger.info("##############################################################")
415
+ # [1-5]: Loading DiT model ...
416
+ if config.model.model == "SD35M_P2P":
417
+ assert args.flow_type == "p2p", f"Error: Model {config.model.model} only support 'p2p' mode."
418
+ from diffusion.model.sd35 import load_mmdit_p2p
419
+ DiT = load_mmdit_p2p(
420
+ config.model.model_pretrained,
421
+ config.model.shift,
422
+ False,
423
+ device,
424
+ config.model.image_size,
425
+ config.model.input_channel,
426
+ ).eval().to(device)
427
+ elif config.model.model == "SD35M_D2C":
428
+ assert args.flow_type == "d2c", f"Error: Model {config.model.model} only support 'd2c' mode."
429
+ from diffusion.model.sd35 import load_mmdit
430
+ DiT = load_mmdit(
431
+ config.model.model_pretrained,
432
+ config.model.shift,
433
+ False,
434
+ device,
435
+ config.model.image_size,
436
+ config.model.input_channel,
437
+ ).eval().to(device)
438
+ else:
439
+ raise KeyError(f"Only support Model: 'SD35M_P2P' or 'SD35M_D2C', but received {config.model.model}.")
440
+ ## Load model
441
+ state_dict = torch.load(config.model.load_from)
442
+ if config.model.load_from.endswith(".bin"):
443
  logger.info("Loading fsdp bin checkpoint....")
444
  old_state_dict = state_dict
445
  state_dict = dict()
446
  state_dict["state_dict"] = old_state_dict
447
  if "pos_embed" in state_dict["state_dict"]:
448
  del state_dict["state_dict"]["pos_embed"]
449
+ missing, unexpected = DiT.load_state_dict(state_dict["state_dict"], strict=False)
450
+ DiT.eval().to(weight_dtype)
451
  dit_dtype = weight_dtype
452
  logger.info("##############################################################")
453
  logger.info("# % Model Define ..... ")
454
  logger.info(f"Inference with {weight_dtype}, default guidance_type: {guidance_type}, flow_shift: {flow_shift}")
455
+ logger.info(f"{DiT.__class__.__name__}:{config.model.model}, Model Parameters: {sum(p.numel() for p in DiT.parameters()):,}")
456
+ logger.info("Generating sample from ckpt: %s" % config.model.load_from)
457
+ logger.warning(f"Missing keys: {missing}")
 
 
 
 
 
458
  logger.warning(f"Unexpected keys: {unexpected}")
459
+ logger.info(f"Parameter of DiT: {sum(p.numel() for p in DiT.parameters()) / 1000000} M")
460
  logger.info("##############################################################")
461
+ # [1-6]: Combination Model
462
+ model = IQAIR(DiT, connector, assessment, device)
463
  logger.info("##############################################################")
464
+ logger.info("Summary: IQAIR")
465
+ for param in model.parameters():
466
+ param.requires_grad = False
467
+ num_total_params = sum(p.numel() for p in model.parameters())
468
+ logger.info(f"All params: {round(num_total_params/1e6, 3)}M")
469
  logger.info("##############################################################")
470
+ ## Load model
471
+ if os.path.isfile(args.model_path):
472
+ state_dict = torch.load(args.model_path)
473
+ if args.model_path.endswith(".bin"):
474
+ logger.info("Loading fsdp bin checkpoint....")
475
+ old_state_dict = state_dict
476
+ state_dict = dict()
477
+ state_dict["state_dict"] = old_state_dict
478
+ if "pos_embed" in state_dict["state_dict"]:
479
+ del state_dict["state_dict"]["pos_embed"]
480
+ missing, unexpected = model.load_state_dict(state_dict["state_dict"], strict=False)
481
+ model.eval().to(weight_dtype)
482
+ dit_dtype = weight_dtype
483
+ logger.info("##############################################################")
484
+ logger.info("# % Model Define ..... ")
485
+ logger.info(f"Inference with {weight_dtype}, default guidance_type: {guidance_type}, flow_shift: {flow_shift}")
486
+ logger.info(f"{model.__class__.__name__}:{config.model.model}, Model Parameters: {sum(p.numel() for p in model.parameters()):,}")
487
+ logger.info("Generating sample from ckpt: %s" % args.model_path)
488
+ missing_ckpt = []
489
+ for m in missing:
490
+ if "llm" in m and "lora" not in m:
491
+ continue
492
+ missing_ckpt.append(m)
493
+ logger.warning(f"Missing keys: {missing_ckpt}")
494
+ logger.warning(f"Unexpected keys: {unexpected}")
495
+ logger.info(f"Parameter of Model: {sum(p.numel() for p in model.parameters()) / 1000000} M")
496
+ logger.info("##############################################################")
497
+ else:
498
+ logger.info("##############################################################")
499
+ logger.info("Combination Model is inference from pre-trained weight!")
500
+ logger.info("##############################################################")
501
+ return model, vae, args, config, device
502
 
503
  # [2]: Inference
504
+ def RAR_process(img, fname, model, vae, args, config, device):
505
  lq = image_process(img)
506
  lq = lq.to(device)
507
  bs = lq.shape[0]
508
+ latent_size = args.image_size // config.vae.vae_downsample_rate
509
  latent_size_h, latent_size_w = latent_size, latent_size
510
 
511
+ save_detail = args.detail
512
+ save_root = config.work_dir
513
  save_folder = os.path.join(save_root, fname)
514
  os.makedirs(save_folder, exist_ok=True)
515
  restore_results = []
516
  flag = 0
517
  # generator
518
  generator = torch.Generator(device=device).manual_seed(args.seed)
519
+ sample_steps = args.step
520
  num_rounds = args.num_rounds
521
  print("="*20)
522
+
523
+ if args.weight_type == "bf16":
524
+ weight_dtype = torch.bfloat16
525
+ elif args.weight_type == "fp16":
526
+ weight_dtype = torch.float16
527
+ elif args.weight_type == "fp32":
528
+ weight_dtype = torch.float32
529
+ else:
530
+ raise KeyError(f"Unsupported Weight Type: {args.weight_type}")
531
+ dit_dtype = weight_dtype
532
+ vae_dtype = get_weight_dtype(config.vae.weight_dtype)
533
  # start sampling
534
  with torch.no_grad():
535
  ## [a1]: VAE
 
602
  guidance_type=guidance_type,
603
  cfg_scale=args.cfg_scale,
604
  pag_scale=args.pag_scale,
605
+ pag_applied_layers=config.model.pag_applied_layers,
606
  model_type="flow",
607
  model_kwargs=model_kwargs,
608
  schedule="FLOW",
 
614
  order=2,
615
  skip_type="time_uniform_flow",
616
  method="multistep",
617
+ flow_shift=config.scheduler.flow_shift,
618
  )
619
  elif args.sampling_algo == "flow_euler":
620
  flow_solver = FlowEuler(
 
659
  recon = vae.process_out(input_images.to(vae_dtype).to(device)).to(device)
660
  recon = vae.decode(recon)
661
  recon = torch.clamp((recon + 1.0) / 2.0, min=0.0, max=1.0)
 
662
  # save recon image
663
  save_recon_path = os.path.join(save_folder, "%s_step%d_%s.png"%(fname.split(".")[0], n_round, prompts[0]))
664
  save_recon = 255.0 * rearrange(recon[0], "c h w -> h w c")
665
  save_recon = Image.fromarray(save_recon.type(torch.uint8).cpu().numpy())
666
  save_recon.save(save_recon_path)
667
+ restore_results.append(save_recon)
668
  return restore_results
669
  elif save_detail:
670
  if flag == 1:
 
673
  recon = vae.process_out(samples.to(vae_dtype).to(device)).to(device)
674
  recon = vae.decode(recon)
675
  recon = torch.clamp((recon + 1.0) / 2.0, min=0.0, max=1.0)
 
676
  # save recon image
677
  save_recon_path = os.path.join(save_folder, "%s_step%d_%s.png"%(fname.split(".")[0], n_round, prompts[0]))
678
  save_recon = 255.0 * rearrange(recon[0], "c h w -> h w c")
679
  save_recon = Image.fromarray(save_recon.type(torch.uint8).cpu().numpy())
680
  save_recon.save(save_recon_path)
681
+ restore_results.append(save_recon)
682
  # prepare for next round
683
  input_images = samples
684
  torch.cuda.empty_cache()
685
 
686
  # ==========================================================================================================
 
 
 
 
 
687
  # root_dir = r"/home/CORP/hsiang.chen/Desktop/RAR_proj/Demo/"
688
  # -----------------------------
689
  # Predefined images – replace with your own paths
 
697
  "Sample 6": {"input":"sample_images/6/input.png", "output": ["sample_images/6/s1_noise.png", "sample_images/6/s2_LL.png"]},
698
  "Sample 7": {"input":"sample_images/7/input.png", "output": ["sample_images/7/s1_resolution.png", "sample_images/7/s2_LL.png", "sample_images/7/s3_haze.png"]},
699
  "Sample 8": {"input":"sample_images/8/input.png", "output": ["sample_images/8/s1_resolution.png", "sample_images/8/s2_none.png"]},
700
+ "Prague 1": {"input":"sample_images/realworld/IMG_9453.jpeg", "output": None},
701
+ "Prague 2": {"input":"sample_images/realworld/IMG_9525.jpeg", "output": None},
702
  }
703
+ # sample_image = r"/home/CORP/hsiang.chen/Projects/Demo/sample_images/1/input.png"
704
 
705
  st.set_page_config(page_title="RAR Demo", layout="wide", initial_sidebar_state='expanded')
706
  st.title("AIC-C: RAR Demo")
 
722
  # Load image based on user choice
723
  input_image = None
724
  if upload_file is not None:
725
+ filename = upload_file.name
726
  input_image = Image.open(upload_file)
727
+ input_image = input_image.resize((256, 256))
728
+ elif "Sample" in selected_name:
729
  # input_image = Image.open(os.path.join(root_dir, PREDEFINED_IMAGES[selected_name][0]))
730
  filename = os.path.basename(PREDEFINED_IMAGES[selected_name]["input"])
731
  input_image = Image.open(PREDEFINED_IMAGES[selected_name]["input"])
732
  input_image = input_image.resize((256, 256))
733
+ elif "Prague" in selected_name:
734
+ filename = upload_file.name
735
+ input_image = Image.open(upload_file)
736
+ input_image = input_image.resize((256, 256))
737
 
738
  # =============================
739
  # Image Processing Section
 
748
  For now, we simply return the original image.
749
  """
750
  output_images = [image]
751
+ if "Sample" in selected_name:
752
  for image_path in PREDEFINED_IMAGES[selected_name]["output"]:
753
  output_images.append(Image.open(image_path))
754
  else:
755
+ model, vae, args, config, device = load_rar_model()
756
+ restored_images = RAR_process(image, filename, model, vae, args, config, device)
757
  output_images += restored_images
758
  # Example placeholder: return the input image unchanged
759
  return output_images
 
780
  with right_col:
781
  st.subheader("Processed Output")
782
  if input_image:
783
+ results = process_image(input_image, selected_name, filename) # <-------------------------------------- Modify process_image
784
  stage_idx = st.session_state.get("stage_idx", len(results)-1) if st.session_state.get("stage_idx", len(results)-1) < len(results) else len(results)-1
785
  st.image(results[stage_idx].resize((256,256)))
786