# -*- coding: UTF-8 -*- """ @Time : 30/05/2025 19:24 @Author : xiaoguangliang @File : unconditional_diffusion_inference.py @Project : Faice_text2face """ import torch import random import numpy as np from diffusers import DDPMPipeline from accelerate import Accelerator import gradio as gr import spaces import PIL.Image from loguru import logger from utils import timer model_path = 'Ngene787/Faice_unconditional_diffusion' if torch.backends.mps.is_available(): accelerator = Accelerator(gradient_accumulation_steps=1) else: accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1) logger.info("Loading model ...") device = "cuda" if torch.cuda.is_available() else "cpu" if torch.cuda.is_available(): torch_dtype = torch.float16 else: torch_dtype = torch.float32 pipe = DDPMPipeline.from_pretrained(model_path, torch_dtype=torch_dtype, low_cpu_mem_usage=True ) pipe = pipe.to(device) pipe = accelerator.prepare(pipe) # Enable memory-efficient attention # pipe.enable_xformers_memory_efficient_attention() MAX_SEED = np.iinfo(np.int32).max @spaces.GPU(duration=65) def inference_unconditional(seed, randomize_seed=False, num_inference_steps=20, progress=gr.Progress(track_tqdm=True), ): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) logger.info('Generating image ...') with timer("inference"): image = pipe( batch_size=1, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images[0] return image