--- license: mit tags: - pytorch - diffusers - class-conditional-image-generation - diffusion-models-class --- # This model is a diffusion model for generate number. ## Usage ```python # 定义模型 class ClassConditionalUnet(nn.Module): def __init__(self, num_classes=10, class_emb_size=4): super().__init__() # 将数字所属的类别映射到一个长度为class_emb_size的特征向量 self.class_emb = nn.Embedding(num_classes, class_emb_size) # self.model就是一个不带条件的unet模型,在这里给他添加额外的输入通道,用于接收条件信息 self.model = UNet2DModel( sample_size=28, #生成的图像是28*28 in_channels=1 + class_emb_size, #加入额外的输入通道 out_channels=1, # 输入单通道黑白数字图 layers_per_block=2, # 设置一个unet模块有多少个残差连接层 block_out_channels=(32, 64, 64), down_block_types=( "DownBlock2D", #普通的ResNet下采样模块 "AttnDownBlock2D", #含有spatial self-attention的下采样和模块 "AttnDownBlock2D", ), up_block_types=( "AttnUpBlock2D", #含有spatial self-attention的ResNet上采样模块 "AttnUpBlock2D", "UpBlock2D", ), ) def forward(self, x, t, class_labels): bs, ch, w, h = x.shape class_cond = self.class_emb(class_labels) # 将类别映射为向量形式 class_cond = class_cond.view(bs, class_cond.shape[1], 1, 1).expand(bs, class_cond.shape[1], w, h) # 拓展张量形状 net_input = torch.cat((class_cond, x), dim=1) return self.model(net_input, t).sample ckpt = torch.load("class_cond_unet.pth", map_location="cpu") model = ClassConditionalUnet( num_classes=ckpt["num_classes"], class_emb_size=ckpt["class_emb_size"] ) model.load_state_dict(ckpt["model_state_dict"]) model.eval() noise_scheduler = DDPMScheduler.from_pretrained("Dirry525/class_num_generator") ```