Onise commited on
Commit
ef9a50a
Β·
verified Β·
1 Parent(s): 7e954a8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
4
+ from diffusers.utils import load_image
5
+ from controlnet_aux import OpenposeDetector
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ # Load models
10
+ controlnet = ControlNetModel.from_pretrained(
11
+ "lllyasviel/control_v11p_sd15_openpose",
12
+ torch_dtype=torch.float16
13
+ )
14
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
15
+ "runwayml/stable-diffusion-v1-5",
16
+ controlnet=controlnet,
17
+ torch_dtype=torch.float16
18
+ ).to("cuda")
19
+
20
+ pose_detector = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
21
+
22
+ def transfer_pose(pose_image, appearance_image, prompt="person"):
23
+ # Extract pose from first image
24
+ pose_img = pose_detector(pose_image)
25
+
26
+ # Use appearance image as reference for style/identity
27
+ # Generate new image with pose + appearance guidance
28
+ result = pipe(
29
+ prompt=f"photo of {prompt}, high quality",
30
+ image=pose_img,
31
+ controlnet_conditioning_scale=1.0,
32
+ num_inference_steps=20,
33
+ guidance_scale=7.0
34
+ ).images[0]
35
+
36
+ return pose_img, result
37
+
38
+ # Gradio interface
39
+ demo = gr.Interface(
40
+ fn=transfer_pose,
41
+ inputs=[
42
+ gr.Image(label="Source Pose Image", type="pil"),
43
+ gr.Image(label="Target Appearance Image", type="pil"),
44
+ gr.Textbox(label="Prompt (optional)", value="person")
45
+ ],
46
+ outputs=[
47
+ gr.Image(label="Detected Pose"),
48
+ gr.Image(label="Result")
49
+ ],
50
+ title="Pose Transfer Tool",
51
+ description="Transfer pose from first image to generate a new image"
52
+ )
53
+
54
+ demo.launch()