Spaces:
Paused
Paused
fix bugs
Browse files- main.py +5 -2
- requirements.txt +1 -0
main.py
CHANGED
|
@@ -2,7 +2,8 @@ import logging
|
|
| 2 |
import math
|
| 3 |
import os
|
| 4 |
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 5 |
-
from diffusers.models.controlnet import ControlNetConditioningEmbedding
|
|
|
|
| 6 |
import torch
|
| 7 |
from torch import nn
|
| 8 |
import torch.nn.functional as F
|
|
@@ -39,7 +40,7 @@ from diffusers import UniPCMultistepScheduler
|
|
| 39 |
from src.models.stage2_inpaint_unet_2d_condition import Stage2_InapintUNet2DConditionModel
|
| 40 |
|
| 41 |
from torchvision import transforms
|
| 42 |
-
from diffusers.models.controlnet import ControlNetConditioningEmbedding
|
| 43 |
from transformers import CLIPImageProcessor
|
| 44 |
from transformers import Dinov2Model
|
| 45 |
from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel,ControlNetModel,DDIMScheduler
|
|
@@ -49,6 +50,7 @@ from src.pipelines.PCDMs_pipeline import PCDMsPipeline
|
|
| 49 |
|
| 50 |
import spaces
|
| 51 |
from libs.easy_dwpose import DWposeDetector
|
|
|
|
| 52 |
from PIL import Image
|
| 53 |
import cv2
|
| 54 |
import os
|
|
@@ -1160,6 +1162,7 @@ def run_generate_frame(images, target_poses, train_steps=100, inference_steps=10
|
|
| 1160 |
is_app=True
|
| 1161 |
|
| 1162 |
print(target_poses)
|
|
|
|
| 1163 |
|
| 1164 |
dwpose, rembg_session, pcdms_model, noise_scheduler, image_encoder_p, image_encoder_g, vae, unet = load_models()
|
| 1165 |
|
|
|
|
| 2 |
import math
|
| 3 |
import os
|
| 4 |
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 5 |
+
#from diffusers.models.controlnet import ControlNetConditioningEmbedding
|
| 6 |
+
from diffusers.models.controlnets.controlnet import ControlNetConditioningEmbedding
|
| 7 |
import torch
|
| 8 |
from torch import nn
|
| 9 |
import torch.nn.functional as F
|
|
|
|
| 40 |
from src.models.stage2_inpaint_unet_2d_condition import Stage2_InapintUNet2DConditionModel
|
| 41 |
|
| 42 |
from torchvision import transforms
|
| 43 |
+
#from diffusers.models.controlnet import ControlNetConditioningEmbedding
|
| 44 |
from transformers import CLIPImageProcessor
|
| 45 |
from transformers import Dinov2Model
|
| 46 |
from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel,ControlNetModel,DDIMScheduler
|
|
|
|
| 50 |
|
| 51 |
import spaces
|
| 52 |
from libs.easy_dwpose import DWposeDetector
|
| 53 |
+
from libs.easy_dwpose.draw import draw_openpose
|
| 54 |
from PIL import Image
|
| 55 |
import cv2
|
| 56 |
import os
|
|
|
|
| 1162 |
is_app=True
|
| 1163 |
|
| 1164 |
print(target_poses)
|
| 1165 |
+
target_poses = [draw_openpose(pose, height=img_height, width=img_width, include_hands=True, include_face=False) for pose in target_poses]
|
| 1166 |
|
| 1167 |
dwpose, rembg_session, pcdms_model, noise_scheduler, image_encoder_p, image_encoder_g, vae, unet = load_models()
|
| 1168 |
|
requirements.txt
CHANGED
|
@@ -5,3 +5,4 @@ accelerate
|
|
| 5 |
gradio
|
| 6 |
rembg[cpu]
|
| 7 |
spaces
|
|
|
|
|
|
| 5 |
gradio
|
| 6 |
rembg[cpu]
|
| 7 |
spaces
|
| 8 |
+
matplotlib
|